diff --git a/docs/bigquery-client.rst b/docs/bigquery-client.rst deleted file mode 100644 index 94c3c4139f6b..000000000000 --- a/docs/bigquery-client.rst +++ /dev/null @@ -1,15 +0,0 @@ -BigQuery Client -=============== - -.. automodule:: gcloud.bigquery.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.bigquery.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigquery-dataset.rst b/docs/bigquery-dataset.rst deleted file mode 100644 index 9c00c733866b..000000000000 --- a/docs/bigquery-dataset.rst +++ /dev/null @@ -1,7 +0,0 @@ -Datasets -~~~~~~~~ - -.. automodule:: gcloud.bigquery.dataset - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigquery-job.rst b/docs/bigquery-job.rst deleted file mode 100644 index ce24a0adb01c..000000000000 --- a/docs/bigquery-job.rst +++ /dev/null @@ -1,7 +0,0 @@ -Jobs -~~~~ - -.. automodule:: gcloud.bigquery.job - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigquery-query.rst b/docs/bigquery-query.rst deleted file mode 100644 index b161c8da0c87..000000000000 --- a/docs/bigquery-query.rst +++ /dev/null @@ -1,7 +0,0 @@ -Query -~~~~~ - -.. automodule:: gcloud.bigquery.query - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigquery-table.rst b/docs/bigquery-table.rst deleted file mode 100644 index c47a554a6967..000000000000 --- a/docs/bigquery-table.rst +++ /dev/null @@ -1,7 +0,0 @@ -Tables -~~~~~~ - -.. automodule:: gcloud.bigquery.table - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigquery-usage.rst b/docs/bigquery-usage.rst deleted file mode 100644 index edab2e1510d4..000000000000 --- a/docs/bigquery-usage.rst +++ /dev/null @@ -1,626 +0,0 @@ -Using the API -============= - -Authentication / Configuration ------------------------------- - -- Use :class:`Client ` objects to configure - your applications. - -- :class:`Client ` objects hold both a ``project`` - and an authenticated connection to the BigQuery service. - -- The authentication credentials can be implicitly determined from the - environment or directly via - :meth:`from_service_account_json ` - and - :meth:`from_service_account_p12 `. - -- After setting ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GCLOUD_PROJECT`` - environment variables, create an instance of - :class:`Client `. - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - -- Override the credentials inferred from the environment by passing explicit - ``credentials`` to one of the alternative ``classmethod`` factories, - :meth:`gcloud.bigquery.client.Client.from_service_account_json`: - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client.from_service_account_json('/path/to/creds.json') - - or :meth:`gcloud.bigquery.client.Client.from_service_account_p12`: - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client.from_service_account_p12( - ... '/path/to/creds.p12', 'jrandom@example.com') - - -Projects --------- - -A project is the top-level container in the ``BigQuery`` API: it is tied -closely to billing, and can provide default access control across all its -datasets. If no ``project`` is passed to the client container, the library -attempts to infer a project using the environment (including explicit -environment variables, GAE, and GCE). - -To override the project inferred from the environment, pass an explicit -``project`` to the constructor, or to either of the alternative -``classmethod`` factories: - - .. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client(project='PROJECT_ID') - -Project ACLs -~~~~~~~~~~~~ - -Each project has an access control list granting reader / writer / owner -permission to one or more entities. This list cannot be queried or set -via the API: it must be managed using the Google Developer Console. - -Datasets --------- - -A dataset represents a collection of tables, and applies several default -policies to tables as they are created: - -- An access control list (ACL). When created, a dataset has an ACL - which maps to the ACL inherited from its project. - -- A default table expiration period. If set, tables created within the - dataset will have the value as their expiration period. - -Dataset operations -~~~~~~~~~~~~~~~~~~ - -Create a new dataset for the client's project: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.create() # API request - -Check for the existence of a dataset: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.exists() # API request - True - -List datasets for the client's project: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> datasets, next_page_token = client.list_datasets() # API request - >>> [dataset.name for dataset in datasets] - ['dataset_name'] - -Refresh metadata for a dataset (to pick up changes made by another client): - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.reload() # API request - -Patch metadata for a dataset: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> one_day_ms = 24 * 60 * 60 * 1000 - >>> dataset.patch(description='Description goes here', - ... default_table_expiration_ms=one_day_ms) # API request - -Replace the ACL for a dataset, and update all writeable fields: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.get() # API request - >>> acl = list(dataset.acl) - >>> acl.append(bigquery.Access(role='READER', entity_type='domain', entity='example.com')) - >>> dataset.acl = acl - >>> dataset.update() # API request - -Delete a dataset: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.delete() # API request - - -Tables ------- - -Tables exist within datasets. List tables for the dataset: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> tables, next_page_token = dataset.list_tables() # API request - >>> [table.name for table in tables] - ['table_name'] - -Create a table: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.create() # API request - -Check for the existence of a table: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.exists() # API request - True - -Refresh metadata for a table (to pick up changes made by another client): - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> dataset.reload() # API request - -Patch specific properties for a table: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.patch(friendly_name='Person Ages', - ... description='Ages of persons') # API request - -Update all writable metadata for a table - -.. doctest:: - - >>> from gcloud import bigquery - >>> from gcloud.bigquery import SchemaField - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> table.update() # API request - -Upload table data from a file: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> with open('person_ages.csv', 'rb') as csv_file: - ... table.upload_from_file(csv_file, CSV, - ... create_disposition='CREATE_IF_NEEDED') - -Get rows from a table's data: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> rows, next_page_token = table.fetch_data(max_results=100) # API request - >>> for row in rows: - ... for field, value in zip(table.schema, row): - ... do_something(field, value) - -Delete a table: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> table.delete() # API request - -Jobs ----- - -Jobs describe actions peformed on data in BigQuery tables: - -- Load data into a table -- Run a query against data in one or more tables -- Extract data from a table -- Copy a table - -List jobs for a project: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> jobs, token = client.list_jobs() # API request - >>> [(job.name, job.job_type, job.created, job.state) for job in jobs] - ['load-table-job', 'load', (datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=), 'done')] - -Querying data (synchronous) -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Run a query which can be expected to complete within bounded time: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> QUERY = """\ - ... SELECT count(*) AS age_count FROM dataset_name.person_ages - ... """ - >>> query = client.run_sync_query(QUERY) - >>> query.timeout_ms = 1000 - >>> query.run() # API request - >>> query.complete - True - >>> len(query.schema) - 1 - >>> field = query.schema[0] - >>> field.name - u'count' - >>> field.field_type - u'INTEGER' - >>> field.mode - u'NULLABLE' - >>> query.rows - [(15,)] - >>> query.total_rows - 1 - -If the rows returned by the query do not fit into the inital response, -then we need to fetch the remaining rows via ``fetch_data``: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> QUERY = """\ - ... SELECT * FROM dataset_name.person_ages - ... """ - >>> query = client.run_sync_query(QUERY) - >>> query.timeout_ms = 1000 - >>> query.run() # API request - >>> query.complete - True - >>> query.total_rows - 1234 - >>> query.page_token - '8d6e452459238eb0fe87d8eb191dd526ee70a35e' - >>> do_something_with(query.schema, query.rows) - >>> token = query.page_token # for initial request - >>> while True: - ... do_something_with(query.schema, rows) - ... if token is None: - ... break - ... rows, _, token = query.fetch_data(page_token=token) - - -If the query takes longer than the timeout allowed, ``query.complete`` -will be ``False``. In that case, we need to poll the associated job until -it is done, and then fetch the reuslts: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> QUERY = """\ - ... SELECT * FROM dataset_name.person_ages - ... """ - >>> query = client.run_sync_query(QUERY) - >>> query.timeout_ms = 1000 - >>> query.run() # API request - >>> query.complete - False - >>> job = query.job - >>> retry_count = 100 - >>> while retry_count > 0 and job.state == 'running': - ... retry_count -= 1 - ... time.sleep(10) - ... job.reload() # API call - >>> job.state - 'done' - >>> token = None # for initial request - >>> while True: - ... rows, _, token = query.fetch_data(page_token=token) - ... do_something_with(query.schema, rows) - ... if token is None: - ... break - - - -Querying data (asynchronous) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Background a query, loading the results into a table: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> query = """\ - SELECT firstname + ' ' + last_name AS full_name, - FLOOR(DATEDIFF(CURRENT_DATE(), birth_date) / 365) AS age - FROM dataset_name.persons - """ - >>> dataset = client.dataset('dataset_name') - >>> table = dataset.table(name='person_ages') - >>> job = client.run_async_query('fullname-age-query-job', query) - >>> job.destination_table = table - >>> job.write_disposition= 'truncate' - >>> job.name - 'fullname-age-query-job' - >>> job.job_type - 'query' - >>> job.created - None - >>> job.state - None - -.. note:: - - - ``gcloud.bigquery`` generates a UUID for each job. - - The ``created`` and ``state`` fields are not set until the job - is submitted to the BigQuery back-end. - -Then, begin executing the job on the server: - -.. doctest:: - - >>> job.submit() # API call - >>> job.created - datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) - >>> job.state - 'running' - -Poll until the job is complete: - -.. doctest:: - - >>> import time - >>> retry_count = 100 - >>> while retry_count > 0 and job.state == 'running': - ... retry_count -= 1 - ... time.sleep(10) - ... job.reload() # API call - >>> job.state - 'done' - >>> job.ended - datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) - -Inserting data (synchronous) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Load data synchronously from a local CSV file into a new table: - -.. doctest:: - - >>> import csv - >>> from gcloud import bigquery - >>> from gcloud.bigquery import SchemaField - >>> client = bigquery.Client() - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> with open('/path/to/person_ages.csv', 'rb') as file_obj: - ... reader = csv.reader(file_obj) - ... rows = list(reader) - >>> table.insert_data(rows) # API request - -Inserting data (asynchronous) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Start a job loading data asynchronously from a set of CSV files, located on -Google Cloud Storage, appending rows into an existing table. First, create -the job locally: - -.. doctest:: - - >>> from gcloud import bigquery - >>> from gcloud.bigquery import SchemaField - >>> client = bigquery.Client() - >>> table = dataset.table(name='person_ages') - >>> table.schema = [ - ... SchemaField('full_name', 'STRING', mode='required'), - ... SchemaField('age', 'INTEGER', mode='required)] - >>> job = client.load_table_from_storage( - ... 'load-from-storage-job', table, 'gs://bucket-name/object-prefix*') - >>> job.source_format = 'CSV' - >>> job.skip_leading_rows = 1 # count of skipped header rows - >>> job.write_disposition = 'truncate' - >>> job.name - 'load-from-storage-job' - >>> job.job_type - 'load' - >>> job.created - None - >>> job.state - None - -.. note:: - - - ``gcloud.bigquery`` generates a UUID for each job. - - The ``created`` and ``state`` fields are not set until the job - is submitted to the BigQuery back-end. - -Then, begin executing the job on the server: - -.. doctest:: - - >>> job.begin() # API call - >>> job.created - datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) - >>> job.state - 'running' - -Poll until the job is complete: - -.. doctest:: - - >>> import time - >>> retry_count = 100 - >>> while retry_count > 0 and job.state == 'running': - ... retry_count -= 1 - ... time.sleep(10) - ... job.reload() # API call - >>> job.state - 'done' - >>> job.ended - datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) - -Exporting data (async) -~~~~~~~~~~~~~~~~~~~~~~ - -Start a job exporting a table's data asynchronously to a set of CSV files, -located on Google Cloud Storage. First, create the job locally: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> table = dataset.table(name='person_ages') - >>> job = client.extract_table_to_storage( - ... 'extract-person-ages-job', table, - ... 'gs://bucket-name/export-prefix*.csv') - ... job.destination_format = 'CSV' - ... job.print_header = True - ... job.write_disposition = 'truncate' - >>> job.name - 'extract-person-ages-job' - >>> job.job_type - 'extract' - >>> job.created - None - >>> job.state - None - -.. note:: - - - ``gcloud.bigquery`` generates a UUID for each job. - - The ``created`` and ``state`` fields are not set until the job - is submitted to the BigQuery back-end. - -Then, begin executing the job on the server: - -.. doctest:: - - >>> job.begin() # API call - >>> job.created - datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) - >>> job.state - 'running' - -Poll until the job is complete: - -.. doctest:: - - >>> import time - >>> retry_count = 100 - >>> while retry_count > 0 and job.state == 'running': - ... retry_count -= 1 - ... time.sleep(10) - ... job.reload() # API call - >>> job.state - 'done' - >>> job.ended - datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) - - -Copy tables (async) -~~~~~~~~~~~~~~~~~~~ - -First, create the job locally: - -.. doctest:: - - >>> from gcloud import bigquery - >>> client = bigquery.Client() - >>> source_table = dataset.table(name='person_ages') - >>> destination_table = dataset.table(name='person_ages_copy') - >>> job = client.copy_table( - ... 'copy-table-job', destination_table, source_table) - >>> job.name - 'copy-table-job' - >>> job.job_type - 'copy' - >>> job.created - None - >>> job.state - None - -.. note:: - - - ``gcloud.bigquery`` generates a UUID for each job. - - The ``created`` and ``state`` fields are not set until the job - is submitted to the BigQuery back-end. - -Then, begin executing the job on the server: - -.. doctest:: - - >>> job.begin() # API call - >>> job.created - datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) - >>> job.state - 'running' - -Poll until the job is complete: - -.. doctest:: - - >>> import time - >>> retry_count = 100 - >>> while retry_count > 0 and job.state == 'running': - ... retry_count -= 1 - ... time.sleep(10) - ... job.reload() # API call - >>> job.state - 'done' - >>> job.ended - datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) diff --git a/docs/bigtable-client-intro.rst b/docs/bigtable-client-intro.rst deleted file mode 100644 index 3f43371ec1db..000000000000 --- a/docs/bigtable-client-intro.rst +++ /dev/null @@ -1,92 +0,0 @@ -Base for Everything -=================== - -To use the API, the :class:`Client ` -class defines a high-level interface which handles authorization -and creating other objects: - -.. code:: python - - from gcloud.bigtable.client import Client - client = Client() - -Long-lived Defaults -------------------- - -When creating a :class:`Client `, the -``user_agent`` and ``timeout_seconds`` arguments have sensible -defaults -(:data:`DEFAULT_USER_AGENT ` and -:data:`DEFAULT_TIMEOUT_SECONDS `). -However, you may over-ride them and these will be used throughout all API -requests made with the ``client`` you create. - -Configuration -------------- - -- For an overview of authentication in ``gcloud-python``, - see :doc:`gcloud-auth`. - -- In addition to any authentication configuration, you can also set the - :envvar:`GCLOUD_PROJECT` environment variable for the Google Cloud Console - project you'd like to interact with. If your code is running in Google App - Engine or Google Compute Engine the project will be detected automatically. - (Setting this environment variable is not required, you may instead pass the - ``project`` explicitly when constructing a - :class:`Client `). - -- After configuring your environment, create a - :class:`Client ` - - .. code:: - - >>> from gcloud import bigtable - >>> client = bigtable.Client() - - or pass in ``credentials`` and ``project`` explicitly - - .. code:: - - >>> from gcloud import bigtable - >>> client = bigtable.Client(project='my-project', credentials=creds) - -.. tip:: - - Be sure to use the **Project ID**, not the **Project Number**. - -Admin API Access ----------------- - -If you'll be using your client to make `Cluster Admin`_ and `Table Admin`_ -API requests, you'll need to pass the ``admin`` argument: - -.. code:: python - - client = bigtable.Client(admin=True) - -Read-Only Mode --------------- - -If on the other hand, you only have (or want) read access to the data, -you can pass the ``read_only`` argument: - -.. code:: python - - client = bigtable.Client(read_only=True) - -This will ensure that the -:data:`READ_ONLY_SCOPE ` is used -for API requests (so any accidental requests that would modify data will -fail). - -Next Step ---------- - -After a :class:`Client `, the next highest-level -object is a :class:`Cluster `. You'll need -one before you can interact with tables or data. - -Head next to learn about the :doc:`bigtable-cluster-api`. - -.. _Cluster Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1 -.. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/docs/bigtable-client.rst b/docs/bigtable-client.rst deleted file mode 100644 index b765144a160d..000000000000 --- a/docs/bigtable-client.rst +++ /dev/null @@ -1,7 +0,0 @@ -Client -~~~~~~ - -.. automodule:: gcloud.bigtable.client - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigtable-cluster-api.rst b/docs/bigtable-cluster-api.rst deleted file mode 100644 index 17fba96840db..000000000000 --- a/docs/bigtable-cluster-api.rst +++ /dev/null @@ -1,181 +0,0 @@ -Cluster Admin API -================= - -After creating a :class:`Client `, you can -interact with individual clusters, groups of clusters or available -zones for a project. - -List Clusters -------------- - -If you want a comprehensive list of all existing clusters, make a -`ListClusters`_ API request with -:meth:`Client.list_clusters() `: - -.. code:: python - - clusters = client.list_clusters() - -List Zones ----------- - -If you aren't sure which ``zone`` to create a cluster in, find out -which zones your project has access to with a `ListZones`_ API request -with :meth:`Client.list_zones() `: - -.. code:: python - - zones = client.list_zones() - -You can choose a :class:`string ` from among the result to pass to -the :class:`Cluster ` constructor. - -The available zones (as of February 2016) are - -.. code:: python - - >>> zones - [u'asia-east1-b', u'europe-west1-c', u'us-central1-c', u'us-central1-b'] - -Cluster Factory ---------------- - -To create a :class:`Cluster ` object: - -.. code:: python - - cluster = client.cluster(zone, cluster_id, - display_name=display_name, - serve_nodes=3) - -Both ``display_name`` and ``serve_nodes`` are optional. When not provided, -``display_name`` defaults to the ``cluster_id`` value and ``serve_nodes`` -defaults to the minimum allowed: -:data:`DEFAULT_SERVE_NODES `. - -Even if this :class:`Cluster ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`Table ` just as the -:class:`Client ` is used as the parent of -a :class:`Cluster `. - -Create a new Cluster --------------------- - -After creating the cluster object, make a `CreateCluster`_ API request -with :meth:`create() `: - -.. code:: python - - cluster.display_name = 'My very own cluster' - cluster.create() - -If you would like more than the minimum number of nodes -(:data:`DEFAULT_SERVE_NODES `) -in your cluster: - -.. code:: python - - cluster.serve_nodes = 10 - cluster.create() - -Check on Current Operation --------------------------- - -.. note:: - - When modifying a cluster (via a `CreateCluster`_, `UpdateCluster`_ or - `UndeleteCluster`_ request), the Bigtable API will return a - `long-running operation`_ and a corresponding - :class:`Operation ` object - will be returned by each of - :meth:`create() `, - :meth:`update() ` and - :meth:`undelete() `. - -You can check if a long-running operation (for a -:meth:`create() `, -:meth:`update() ` or -:meth:`undelete() `) has finished -by making a `GetOperation`_ request with -:meth:`Operation.finished() `: - -.. code:: python - - >>> operation = cluster.create() - >>> operation.finished() - True - -.. note:: - - Once an :class:`Operation ` object - has returned :data:`True` from - :meth:`finished() `, the - object should not be re-used. Subsequent calls to - :meth:`finished() ` - will result in a :class:`ValueError `. - -Get metadata for an existing Cluster ------------------------------------- - -After creating the cluster object, make a `GetCluster`_ API request -with :meth:`reload() `: - -.. code:: python - - cluster.reload() - -This will load ``serve_nodes`` and ``display_name`` for the existing -``cluster`` in addition to the ``cluster_id``, ``zone`` and ``project`` -already set on the :class:`Cluster ` object. - -Update an existing Cluster --------------------------- - -After creating the cluster object, make an `UpdateCluster`_ API request -with :meth:`update() `: - -.. code:: python - - client.display_name = 'New display_name' - cluster.update() - -Delete an existing Cluster --------------------------- - -Make a `DeleteCluster`_ API request with -:meth:`delete() `: - -.. code:: python - - cluster.delete() - -Undelete a deleted Cluster --------------------------- - -Make an `UndeleteCluster`_ API request with -:meth:`undelete() `: - -.. code:: python - - cluster.undelete() - -Next Step ---------- - -Now we go down the hierarchy from -:class:`Cluster ` to a -:class:`Table `. - -Head next to learn about the :doc:`bigtable-table-api`. - -.. _Cluster Admin API: https://cloud.google.com/bigtable/docs/creating-cluster -.. _CreateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L66-L68 -.. _GetCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L38-L40 -.. _UpdateCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L93-L95 -.. _DeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L109-L111 -.. _ListZones: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L33-L35 -.. _ListClusters: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _UndeleteCluster: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto#L126-L128 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/docs/bigtable-cluster.rst b/docs/bigtable-cluster.rst deleted file mode 100644 index 9b88f2059d14..000000000000 --- a/docs/bigtable-cluster.rst +++ /dev/null @@ -1,7 +0,0 @@ -Cluster -~~~~~~~ - -.. automodule:: gcloud.bigtable.cluster - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigtable-column-family.rst b/docs/bigtable-column-family.rst deleted file mode 100644 index b51d7a18e4ee..000000000000 --- a/docs/bigtable-column-family.rst +++ /dev/null @@ -1,50 +0,0 @@ -Column Families -=============== - -When creating a -:class:`ColumnFamily `, it is -possible to set garbage collection rules for expired data. - -By setting a rule, cells in the table matching the rule will be deleted -during periodic garbage collection (which executes opportunistically in the -background). - -The types -:class:`MaxAgeGCRule `, -:class:`MaxVersionsGCRule `, -:class:`GarbageCollectionRuleUnion ` and -:class:`GarbageCollectionRuleIntersection ` -can all be used as the optional ``gc_rule`` argument in the -:class:`ColumnFamily ` -constructor. This value is then used in the -:meth:`create() ` and -:meth:`update() ` methods. - -These rules can be nested arbitrarily, with a -:class:`MaxAgeGCRule ` or -:class:`MaxVersionsGCRule ` -at the lowest level of the nesting: - -.. code:: python - - import datetime - - max_age = datetime.timedelta(days=3) - rule1 = MaxAgeGCRule(max_age) - rule2 = MaxVersionsGCRule(1) - - # Make a composite that matches anything older than 3 days **AND** - # with more than 1 version. - rule3 = GarbageCollectionIntersection(rules=[rule1, rule2]) - - # Make another composite that matches our previous intersection - # **OR** anything that has more than 3 versions. - rule4 = GarbageCollectionRule(max_num_versions=3) - rule5 = GarbageCollectionUnion(rules=[rule3, rule4]) - ----- - -.. automodule:: gcloud.bigtable.column_family - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigtable-data-api.rst b/docs/bigtable-data-api.rst deleted file mode 100644 index 779efa991886..000000000000 --- a/docs/bigtable-data-api.rst +++ /dev/null @@ -1,344 +0,0 @@ -Data API -======== - -After creating a :class:`Table ` and some -column families, you are ready to store and retrieve data. - -Cells vs. Columns vs. Column Families -+++++++++++++++++++++++++++++++++++++ - -* As explained in the :doc:`table overview `, tables can - have many column families. -* As described below, a table can also have many rows which are - specified by row keys. -* Within a row, data is stored in a cell. A cell simply has a value (as - bytes) and a timestamp. The number of cells in each row can be - different, depending on what was stored in each row. -* Each cell lies in a column (**not** a column family). A column is really - just a more **specific** modifier within a column family. A column - can be present in every column family, in only one or anywhere in between. -* Within a column family there can be many columns. For example within - the column family ``foo`` we could have columns ``bar`` and ``baz``. - These would typically be represented as ``foo:bar`` and ``foo:baz``. - -Modifying Data -++++++++++++++ - -Since data is stored in cells, which are stored in rows, we -use the metaphor of a **row** in classes that are used to modify -(write, update, delete) data in a -:class:`Table `. - -Direct vs. Conditional vs. Append ---------------------------------- - -There are three ways to modify data in a table, described by the -`MutateRow`_, `CheckAndMutateRow`_ and `ReadModifyWriteRow`_ API -methods. - -* The **direct** way is via `MutateRow`_ which involves simply - adding, overwriting or deleting cells. The - :class:`DirectRow ` class - handles direct mutations. -* The **conditional** way is via `CheckAndMutateRow`_. This method - first checks if some filter is matched in a a given row, then - applies one of two sets of mutations, depending on if a match - occurred or not. (These mutation sets are called the "true - mutations" and "false mutations".) The - :class:`ConditionalRow ` class - handles conditional mutations. -* The **append** way is via `ReadModifyWriteRow`_. This simply - appends (as bytes) or increments (as an integer) data in a presumed - existing cell in a row. The - :class:`AppendRow ` class - handles append mutations. - -Row Factory ------------ - -A single factory can be used to create any of the three row types. -To create a :class:`DirectRow `: - -.. code:: python - - row = table.row(row_key) - -Unlike the previous string values we've used before, the row key must -be ``bytes``. - -To create a :class:`ConditionalRow `, -first create a :class:`RowFilter ` and -then - -.. code:: python - - cond_row = table.row(row_key, filter_=filter_) - -To create an :class:`AppendRow ` - -.. code:: python - - append_row = table.row(row_key, append=True) - -Building Up Mutations ---------------------- - -In all three cases, a set of mutations (or two sets) are built up -on a row before they are sent of in a batch via - -.. code:: python - - row.commit() - -Direct Mutations ----------------- - -Direct mutations can be added via one of four methods - -* :meth:`set_cell() ` allows a - single value to be written to a column - - .. code:: python - - row.set_cell(column_family_id, column, value, - timestamp=timestamp) - - If the ``timestamp`` is omitted, the current time on the Google Cloud - Bigtable server will be used when the cell is stored. - - The value can either by bytes or an integer (which will be converted to - bytes as a signed 64-bit integer). - -* :meth:`delete_cell() ` deletes - all cells (i.e. for all timestamps) in a given column - - .. code:: python - - row.delete_cell(column_family_id, column) - - Remember, this only happens in the ``row`` we are using. - - If we only want to delete cells from a limited range of time, a - :class:`TimestampRange ` can - be used - - .. code:: python - - row.delete_cell(column_family_id, column, - time_range=time_range) - -* :meth:`delete_cells() ` does - the same thing as - :meth:`delete_cell() ` - but accepts a list of columns in a column family rather than a single one. - - .. code:: python - - row.delete_cells(column_family_id, [column1, column2], - time_range=time_range) - - In addition, if we want to delete cells from every column in a column family, - the special :attr:`ALL_COLUMNS ` - value can be used - - .. code:: python - - row.delete_cells(column_family_id, row.ALL_COLUMNS, - time_range=time_range) - -* :meth:`delete() ` will delete the - entire row - - .. code:: python - - row.delete() - -Conditional Mutations ---------------------- - -Making **conditional** modifications is essentially identical -to **direct** modifications: it uses the exact same methods -to accumulate mutations. - -However, each mutation added must specify a ``state``: will the mutation be -applied if the filter matches or if it fails to match. - -For example: - -.. code:: python - - cond_row.set_cell(column_family_id, column, value, - timestamp=timestamp, state=True) - -will add to the set of true mutations. - -Append Mutations ----------------- - -Append mutations can be added via one of two methods - -* :meth:`append_cell_value() ` - appends a bytes value to an existing cell: - - .. code:: python - - append_row.append_cell_value(column_family_id, column, bytes_value) - -* :meth:`increment_cell_value() ` - increments an integer value in an existing cell: - - .. code:: python - - append_row.increment_cell_value(column_family_id, column, int_value) - - Since only bytes are stored in a cell, the cell value is decoded as - a signed 64-bit integer before being incremented. (This happens on - the Google Cloud Bigtable server, not in the library.) - -Notice that no timestamp was specified. This is because **append** mutations -operate on the latest value of the specified column. - -If there are no cells in the specified column, then the empty string (bytes -case) or zero (integer case) are the assumed values. - -Starting Fresh --------------- - -If accumulated mutations need to be dropped, use - -.. code:: python - - row.clear() - -Reading Data -++++++++++++ - -Read Single Row from a Table ----------------------------- - -To make a `ReadRows`_ API request for a single row key, use -:meth:`Table.read_row() `: - -.. code:: python - - >>> row_data = table.read_row(row_key) - >>> row_data.cells - { - u'fam1': { - b'col1': [ - , - , - ], - b'col2': [ - , - ], - }, - u'fam2': { - b'col3': [ - , - , - , - ], - }, - } - >>> cell = row_data.cells[u'fam1'][b'col1'][0] - >>> cell - - >>> cell.value - b'val1' - >>> cell.timestamp - datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=) - -Rather than returning a :class:`DirectRow ` -or similar class, this method returns a -:class:`PartialRowData ` -instance. This class is used for reading and parsing data rather than for -modifying data (as :class:`DirectRow ` is). - -A filter can also be applied to the results: - -.. code:: python - - row_data = table.read_row(row_key, filter_=filter_val) - -The allowable ``filter_`` values are the same as those used for a -:class:`ConditionalRow `. For -more information, see the -:meth:`Table.read_row() ` documentation. - -Stream Many Rows from a Table ------------------------------ - -To make a `ReadRows`_ API request for a stream of rows, use -:meth:`Table.read_rows() `: - -.. code:: python - - row_data = table.read_rows() - -Using gRPC over HTTP/2, a continual stream of responses will be delivered. -In particular - -* :meth:`consume_next() ` - pulls the next result from the stream, parses it and stores it on the - :class:`PartialRowsData ` instance -* :meth:`consume_all() ` - pulls results from the stream until there are no more -* :meth:`cancel() ` closes - the stream - -See the :class:`PartialRowsData ` -documentation for more information. - -As with -:meth:`Table.read_row() `, an optional -``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key`` -can be supplied for the stream, a ``limit`` can be set and a boolean -``allow_row_interleaving`` can be specified to allow faster streamed results -at the potential cost of non-sequential reads. - -See the :meth:`Table.read_rows() ` -documentation for more information on the optional arguments. - -Sample Keys in a Table ----------------------- - -Make a `SampleRowKeys`_ API request with -:meth:`Table.sample_row_keys() `: - -.. code:: python - - keys_iterator = table.sample_row_keys() - -The returned row keys will delimit contiguous sections of the table of -approximately equal size, which can be used to break up the data for -distributed tasks like mapreduces. - -As with -:meth:`Table.read_rows() `, the -returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream. - -The next key in the result can be accessed via - -.. code:: python - - next_key = keys_iterator.next() - -or all keys can be iterated over via - -.. code:: python - - for curr_key in keys_iterator: - do_something(curr_key) - -Just as with reading, the stream can be canceled: - -.. code:: python - - keys_iterator.cancel() - -.. _ReadRows: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L36-L38 -.. _SampleRowKeys: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L44-L46 -.. _MutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L50-L52 -.. _CheckAndMutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L62-L64 -.. _ReadModifyWriteRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L70-L72 diff --git a/docs/bigtable-row-data.rst b/docs/bigtable-row-data.rst deleted file mode 100644 index 5ec98f932d1d..000000000000 --- a/docs/bigtable-row-data.rst +++ /dev/null @@ -1,7 +0,0 @@ -Row Data -~~~~~~~~ - -.. automodule:: gcloud.bigtable.row_data - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigtable-row-filters.rst b/docs/bigtable-row-filters.rst deleted file mode 100644 index b5e99eab6575..000000000000 --- a/docs/bigtable-row-filters.rst +++ /dev/null @@ -1,68 +0,0 @@ -Bigtable Row Filters -==================== - -It is possible to use a -:class:`RowFilter ` -when adding mutations to a -:class:`ConditionalRow ` and when -reading row data with :meth:`read_row() ` -:meth:`read_rows() `. - -As laid out in the `RowFilter definition`_, the following basic filters -are provided: - -* :class:`SinkFilter <.row_filters.SinkFilter>` -* :class:`PassAllFilter <.row_filters.PassAllFilter>` -* :class:`BlockAllFilter <.row_filters.BlockAllFilter>` -* :class:`RowKeyRegexFilter <.row_filters.RowKeyRegexFilter>` -* :class:`RowSampleFilter <.row_filters.RowSampleFilter>` -* :class:`FamilyNameRegexFilter <.row_filters.FamilyNameRegexFilter>` -* :class:`ColumnQualifierRegexFilter <.row_filters.ColumnQualifierRegexFilter>` -* :class:`TimestampRangeFilter <.row_filters.TimestampRangeFilter>` -* :class:`ColumnRangeFilter <.row_filters.ColumnRangeFilter>` -* :class:`ValueRegexFilter <.row_filters.ValueRegexFilter>` -* :class:`ValueRangeFilter <.row_filters.ValueRangeFilter>` -* :class:`CellsRowOffsetFilter <.row_filters.CellsRowOffsetFilter>` -* :class:`CellsRowLimitFilter <.row_filters.CellsRowLimitFilter>` -* :class:`CellsColumnLimitFilter <.row_filters.CellsColumnLimitFilter>` -* :class:`StripValueTransformerFilter <.row_filters.StripValueTransformerFilter>` -* :class:`ApplyLabelFilter <.row_filters.ApplyLabelFilter>` - -In addition, these filters can be combined into composite filters with - -* :class:`RowFilterChain <.row_filters.RowFilterChain>` -* :class:`RowFilterUnion <.row_filters.RowFilterUnion>` -* :class:`ConditionalRowFilter <.row_filters.ConditionalRowFilter>` - -These rules can be nested arbitrarily, with a basic filter at the lowest -level. For example: - -.. code:: python - - # Filter in a specified column (matching any column family). - col1_filter = ColumnQualifierRegexFilter(b'columnbia') - - # Create a filter to label results. - label1 = u'label-red' - label1_filter = ApplyLabelFilter(label1) - - # Combine the filters to label all the cells in columnbia. - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Create a similar filter to label cells blue. - col2_filter = ColumnQualifierRegexFilter(b'columnseeya') - label2 = u'label-blue' - label2_filter = ApplyLabelFilter(label2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - ----- - -.. automodule:: gcloud.bigtable.row_filters - :members: - :undoc-members: - :show-inheritance: - -.. _RowFilter definition: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/1ff247c2e3b7cd0a2dd49071b2d95beaf6563092/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_data.proto#L195 diff --git a/docs/bigtable-row.rst b/docs/bigtable-row.rst deleted file mode 100644 index ae9835995bda..000000000000 --- a/docs/bigtable-row.rst +++ /dev/null @@ -1,8 +0,0 @@ -Bigtable Row -============ - -.. automodule:: gcloud.bigtable.row - :members: - :undoc-members: - :show-inheritance: - :inherited-members: diff --git a/docs/bigtable-table-api.rst b/docs/bigtable-table-api.rst deleted file mode 100644 index b3108da75a1b..000000000000 --- a/docs/bigtable-table-api.rst +++ /dev/null @@ -1,165 +0,0 @@ -Table Admin API -=============== - -After creating a :class:`Cluster `, you can -interact with individual tables, groups of tables or column families within -a table. - -List Tables ------------ - -If you want a comprehensive list of all existing tables in a cluster, make a -`ListTables`_ API request with -:meth:`Cluster.list_tables() `: - -.. code:: python - - >>> cluster.list_tables() - [, - ] - -Table Factory -------------- - -To create a :class:`Table ` object: - -.. code:: python - - table = cluster.table(table_id) - -Even if this :class:`Table ` already -has been created with the API, you'll want this object to use as a -parent of a :class:`ColumnFamily ` -or :class:`Row `. - -Create a new Table ------------------- - -After creating the table object, make a `CreateTable`_ API request -with :meth:`create() `: - -.. code:: python - - table.create() - -If you would to initially split the table into several tablets (Tablets are -similar to HBase regions): - -.. code:: python - - table.create(initial_split_keys=['s1', 's2']) - -Delete an existing Table ------------------------- - -Make a `DeleteTable`_ API request with -:meth:`delete() `: - -.. code:: python - - table.delete() - -Rename an existing Table ------------------------- - -Though the `RenameTable`_ API request is listed in the service -definition, requests to that method return:: - - BigtableTableService.RenameTable is not yet implemented - -We have implemented :meth:`rename() ` -but it will not work unless the backend supports the method. - -List Column Families in a Table -------------------------------- - -Though there is no **official** method for retrieving `column families`_ -associated with a table, the `GetTable`_ API method returns a -table object with the names of the column families. - -To retrieve the list of column families use -:meth:`list_column_families() `: - -.. code:: python - - column_families = table.list_column_families() - -Column Family Factory ---------------------- - -To create a -:class:`ColumnFamily ` object: - -.. code:: python - - column_family = table.column_family(column_family_id) - -There is no real reason to use this factory unless you intend to -create or delete a column family. - -In addition, you can specify an optional ``gc_rule`` (a -:class:`GarbageCollectionRule ` -or similar): - -.. code:: python - - column_family = table.column_family(column_family_id, - gc_rule=gc_rule) - -This rule helps the backend determine when and how to clean up old cells -in the column family. - -See :doc:`bigtable-column-family` for more information about -:class:`GarbageCollectionRule ` -and related classes. - -Create a new Column Family --------------------------- - -After creating the column family object, make a `CreateColumnFamily`_ API -request with -:meth:`ColumnFamily.create() ` - -.. code:: python - - column_family.create() - -Delete an existing Column Family --------------------------------- - -Make a `DeleteColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` - -.. code:: python - - column_family.delete() - -Update an existing Column Family --------------------------------- - -Make an `UpdateColumnFamily`_ API request with -:meth:`ColumnFamily.delete() ` - -.. code:: python - - column_family.update() - -Next Step ---------- - -Now we go down the final step of the hierarchy from -:class:`Table ` to -:class:`Row ` as well as streaming -data directly via a :class:`Table `. - -Head next to learn about the :doc:`bigtable-data-api`. - -.. _ListTables: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L40-L42 -.. _CreateTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L35-L37 -.. _DeleteTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L50-L52 -.. _RenameTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L56-L58 -.. _GetTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L45-L47 -.. _CreateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L61-L63 -.. _UpdateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L66-L68 -.. _DeleteColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L71-L73 -.. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers diff --git a/docs/bigtable-table.rst b/docs/bigtable-table.rst deleted file mode 100644 index 03ca332f9c9a..000000000000 --- a/docs/bigtable-table.rst +++ /dev/null @@ -1,7 +0,0 @@ -Table -~~~~~ - -.. automodule:: gcloud.bigtable.table - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/bigtable-usage.rst b/docs/bigtable-usage.rst deleted file mode 100644 index 95faba854e69..000000000000 --- a/docs/bigtable-usage.rst +++ /dev/null @@ -1,25 +0,0 @@ -Using the API -============= - -API requests are sent to the `Google Cloud Bigtable`_ API via RPC over HTTP/2. -In order to support this, we'll rely on `gRPC`_. We are working with the gRPC -team to rapidly make the install story more user-friendly. - -Get started by learning about the -:class:`Client ` on the -:doc:`bigtable-client-intro` page. - -In the hierarchy of API concepts - -* a :class:`Client ` owns a - :class:`Cluster ` -* a :class:`Cluster ` owns a - :class:`Table ` -* a :class:`Table ` owns a - :class:`ColumnFamily ` -* a :class:`Table ` owns a - :class:`Row ` - (and all the cells in the row) - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ -.. _gRPC: http://www.grpc.io/ diff --git a/docs/datastore-batches.rst b/docs/datastore-batches.rst deleted file mode 100644 index 49527a9fe495..000000000000 --- a/docs/datastore-batches.rst +++ /dev/null @@ -1,7 +0,0 @@ -Batches -~~~~~~~ - -.. automodule:: gcloud.datastore.batch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-client.rst b/docs/datastore-client.rst deleted file mode 100644 index bce60ef5052b..000000000000 --- a/docs/datastore-client.rst +++ /dev/null @@ -1,15 +0,0 @@ -Datastore Client -================ - -.. automodule:: gcloud.datastore.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.datastore.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-entities.rst b/docs/datastore-entities.rst deleted file mode 100644 index 119699fcb89e..000000000000 --- a/docs/datastore-entities.rst +++ /dev/null @@ -1,7 +0,0 @@ -Entities -~~~~~~~~ - -.. automodule:: gcloud.datastore.entity - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-helpers.rst b/docs/datastore-helpers.rst deleted file mode 100644 index 45b7637dd78c..000000000000 --- a/docs/datastore-helpers.rst +++ /dev/null @@ -1,7 +0,0 @@ -Helpers -~~~~~~~ - -.. automodule:: gcloud.datastore.helpers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-keys.rst b/docs/datastore-keys.rst deleted file mode 100644 index 3567201a7101..000000000000 --- a/docs/datastore-keys.rst +++ /dev/null @@ -1,7 +0,0 @@ -Keys -~~~~ - -.. automodule:: gcloud.datastore.key - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-queries.rst b/docs/datastore-queries.rst deleted file mode 100644 index 1babc7c9661a..000000000000 --- a/docs/datastore-queries.rst +++ /dev/null @@ -1,7 +0,0 @@ -Queries -~~~~~~~ - -.. automodule:: gcloud.datastore.query - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/datastore-transactions.rst b/docs/datastore-transactions.rst deleted file mode 100644 index 305279fe621a..000000000000 --- a/docs/datastore-transactions.rst +++ /dev/null @@ -1,8 +0,0 @@ -Transactions -~~~~~~~~~~~~ - -.. automodule:: gcloud.datastore.transaction - :members: - :undoc-members: - :show-inheritance: - :inherited-members: diff --git a/docs/dns-changes.rst b/docs/dns-changes.rst deleted file mode 100644 index ba3e0f44a414..000000000000 --- a/docs/dns-changes.rst +++ /dev/null @@ -1,7 +0,0 @@ -Change Sets -~~~~~~~~~~~ - -.. automodule:: gcloud.dns.changes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/dns-client.rst b/docs/dns-client.rst deleted file mode 100644 index d6c8ac49ad89..000000000000 --- a/docs/dns-client.rst +++ /dev/null @@ -1,15 +0,0 @@ -DNS Client -========== - -.. automodule:: gcloud.dns.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.dns.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/dns-resource-record-set.rst b/docs/dns-resource-record-set.rst deleted file mode 100644 index e68df9f58f90..000000000000 --- a/docs/dns-resource-record-set.rst +++ /dev/null @@ -1,7 +0,0 @@ -Resource Record Sets -~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.dns.resource_record_set - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/dns-usage.rst b/docs/dns-usage.rst deleted file mode 100644 index 8c6f9ae187a5..000000000000 --- a/docs/dns-usage.rst +++ /dev/null @@ -1,177 +0,0 @@ -Using the API -============= - -Client ------- - -:class:`Client ` objects provide a means to -configure your DNS applications. Eash instance holds both a ``project`` -and an authenticated connection to the DNS service. - -For an overview of authentication in ``gcloud-python``, see :doc:`gcloud-auth`. - -Assuming your environment is set up as described in that document, -create an instance of :class:`Client `. - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client() - -Projects --------- - -A project is the top-level container in the ``DNS`` API: it is tied -closely to billing, and can provide default access control across all its -datasets. If no ``project`` is passed to the client container, the library -attempts to infer a project using the environment (including explicit -environment variables, GAE, or GCE). - -To override the project inferred from the environment, pass an explicit -``project`` to the constructor, or to either of the alternative -``classmethod`` factories: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - -Project Quotas --------------- - -Query the quotas for a given project: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> quotas = client.quotas() # API request - >>> for key, value in sorted(quotas.items()): - ... print('%s: %s' % (key, value)) - managedZones: 10000 - resourceRecordsPerRrset: 100 - rrsetsPerManagedZone: 10000 - rrsetAdditionsPerChange: 100 - rrsetDeletionsPerChange: 100 - totalRrdataSizePerChange: 10000 - - -Project ACLs -~~~~~~~~~~~~ - -Each project has an access control list granting reader / writer / owner -permission to one or more entities. This list cannot be queried or set -via the API: it must be managed using the Google Developer Console. - - -Managed Zones -------------- - -A "managed zone" is the container for DNS records for the same DNS name -suffix and has a set of name servers that accept and responds to queries: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> zone = client.zone('acme-co', 'example.com', - ... description='Acme Company zone') - - >>> zone.exists() # API request - False - >>> zone.create() # API request - >>> zone.exists() # API request - True - -List the zones for a given project: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> zones = client.list_zones() # API request - >>> [zone.name for zone in zones] - ['acme-co'] - - -Resource Record Sets --------------------- - -Each managed zone exposes a read-only set of resource records: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> zone = client.zone('acme-co', 'example.com') - >>> records, page_token = zone.list_resource_record_sets() # API request - >>> [(record.name, record.record_type, record.ttl, record.rrdatas) - ... for record in records] - [('example.com.', 'SOA', 21600, ['ns-cloud1.googlecomains.com dns-admin.google.com 1 21600 3600 1209600 300'])] - -.. note:: - - The ``page_token`` returned from ``zone.list_resource_record_sets()`` will - be an opaque string if there are more resources than can be returned in a - single request. To enumerate them all, repeat calling - ``zone.list_resource_record_sets()``, passing the ``page_token``, until - the token is ``None``. E.g. - - .. doctest:: - - >>> records, page_token = zone.list_resource_record_sets() # API request - >>> while page_token is not None: - ... next_batch, page_token = zone.list_resource_record_sets( - ... page_token=page_token) # API request - ... records.extend(next_batch) - - -Change requests ---------------- - -Update the resource record set for a zone by creating a change request -bundling additions to or deletions from the set. - - .. doctest:: - - >>> import time - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> zone = client.zone('acme-co', 'example.com') - >>> TWO_HOURS = 2 * 60 * 60 # seconds - >>> record_set = zone.resource_record_set( - ... 'www.example.com', 'CNAME', TWO_HOURS, 'www1.example.com') - >>> changes = zone.changes() - >>> changes.add_record_set(record_set) - >>> changes.begin() # API request - >>> while changes.status != 'done': - ... print('Waiting for changes to complete') - ... time.sleep(60) # or whatever interval is appropriate - ... changes.reload() # API request - - -List changes made to the resource record set for a given zone: - - .. doctest:: - - >>> from gcloud import dns - >>> client = dns.Client(project='PROJECT_ID') - >>> zone = client.zone('acme-co', 'example.com') - >>> changes = [] - >>> changes, page_token = zone.list_changes() # API request - -.. note:: - - The ``page_token`` returned from ``zone.list_changes()`` will be - an opaque string if there are more changes than can be returned in a - single request. To enumerate them all, repeat calling - ``zone.list_changes()``, passing the ``page_token``, until the token - is ``None``. E.g.: - - .. doctest:: - - >>> changes, page_token = zone.list_changes() # API request - >>> while page_token is not None: - ... next_batch, page_token = zone.list_changes( - ... page_token=page_token) # API request - ... changes.extend(next_batch) diff --git a/docs/dns-zone.rst b/docs/dns-zone.rst deleted file mode 100644 index 059f535d6356..000000000000 --- a/docs/dns-zone.rst +++ /dev/null @@ -1,7 +0,0 @@ -Managed Zones -~~~~~~~~~~~~~ - -.. automodule:: gcloud.dns.zone - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/happybase-batch.rst b/docs/happybase-batch.rst deleted file mode 100644 index c1fc86b9d6e0..000000000000 --- a/docs/happybase-batch.rst +++ /dev/null @@ -1,7 +0,0 @@ -HappyBase Batch -~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.bigtable.happybase.batch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/happybase-connection.rst b/docs/happybase-connection.rst deleted file mode 100644 index 01485bbdbde0..000000000000 --- a/docs/happybase-connection.rst +++ /dev/null @@ -1,7 +0,0 @@ -HappyBase Connection -~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.bigtable.happybase.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/happybase-package.rst b/docs/happybase-package.rst deleted file mode 100644 index 22e6134f0fa5..000000000000 --- a/docs/happybase-package.rst +++ /dev/null @@ -1,7 +0,0 @@ -HappyBase Package -~~~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.bigtable.happybase.__init__ - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/happybase-pool.rst b/docs/happybase-pool.rst deleted file mode 100644 index 9390fd41c01d..000000000000 --- a/docs/happybase-pool.rst +++ /dev/null @@ -1,7 +0,0 @@ -HappyBase Connection Pool -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.bigtable.happybase.pool - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/happybase-table.rst b/docs/happybase-table.rst deleted file mode 100644 index b5f477d8058d..000000000000 --- a/docs/happybase-table.rst +++ /dev/null @@ -1,7 +0,0 @@ -HappyBase Table -~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.bigtable.happybase.table - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index a9c1094a6789..51460d7b1ea1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,30 +6,6 @@ gcloud-api gcloud-auth -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Datastore - - Client - datastore-entities - datastore-keys - datastore-queries - datastore-transactions - datastore-batches - datastore-helpers - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Storage - - Client - storage-blobs - storage-buckets - storage-acl - storage-batch - .. toctree:: :maxdepth: 0 :hidden: @@ -42,83 +18,6 @@ pubsub-message pubsub-iam -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: BigQuery - - bigquery-usage - Client - bigquery-dataset - bigquery-job - bigquery-table - bigquery-query - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Cloud Bigtable - - bigtable-usage - HappyBase - bigtable-client-intro - bigtable-cluster-api - bigtable-table-api - bigtable-data-api - Client - bigtable-cluster - bigtable-table - bigtable-column-family - bigtable-row - bigtable-row-filters - bigtable-row-data - happybase-connection - happybase-pool - happybase-table - happybase-batch - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Resource Manager - - Overview - resource-manager-client - resource-manager-project - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: DNS - - dns-usage - Client - dns-zone - dns-resource-record-set - dns-changes - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Cloud Search - - search-usage - Client - search-index - search-document - -.. toctree:: - :maxdepth: 0 - :hidden: - :caption: Cloud Logging - - logging-usage - Client - logging-logger - logging-entries - logging-metric - logging-sink - .. toctree:: :maxdepth: 0 :hidden: @@ -163,39 +62,4 @@ you can clone the repository from GitHub: $ cd gcloud-python $ python setup.py install ----- - -Cloud Datastore -~~~~~~~~~~~~~~~ - -`Google Cloud Datastore`_ is a fully managed, schemaless database for storing non-relational data. - -.. _Google Cloud Datastore: https://developers.google.com/datastore/ - -.. code-block:: python - - from gcloud import datastore - - client = datastore.Client() - key = client.key('Person') - - entity = datastore.Entity(key=key) - entity['name'] = 'Your name' - entity['age'] = 25 - client.put(entity) - -Cloud Storage -~~~~~~~~~~~~~ - -`Google Cloud Storage`_ allows you to store data on Google infrastructure. - -.. _Google Cloud Storage: https://developers.google.com/storage/ - -.. code-block:: python - - from gcloud import storage - - client = storage.Client() - bucket = client.get_bucket('') - blob = bucket.blob('my-test-file.txt') - blob.upload_from_string('this is test content!') +---- \ No newline at end of file diff --git a/docs/logging-client.rst b/docs/logging-client.rst deleted file mode 100644 index 528414e1a2e3..000000000000 --- a/docs/logging-client.rst +++ /dev/null @@ -1,16 +0,0 @@ -Logging Client -============== - -.. automodule:: gcloud.logging.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.logging.connection - :members: - :undoc-members: - :show-inheritance: - diff --git a/docs/logging-entries.rst b/docs/logging-entries.rst deleted file mode 100644 index a7b96721d30b..000000000000 --- a/docs/logging-entries.rst +++ /dev/null @@ -1,8 +0,0 @@ -Entries -======= - -.. automodule:: gcloud.logging.entries - :members: - :undoc-members: - :show-inheritance: - diff --git a/docs/logging-logger.rst b/docs/logging-logger.rst deleted file mode 100644 index 8deb9b434534..000000000000 --- a/docs/logging-logger.rst +++ /dev/null @@ -1,8 +0,0 @@ -Logger -====== - -.. automodule:: gcloud.logging.logger - :members: - :undoc-members: - :show-inheritance: - diff --git a/docs/logging-metric.rst b/docs/logging-metric.rst deleted file mode 100644 index 343634e8307d..000000000000 --- a/docs/logging-metric.rst +++ /dev/null @@ -1,7 +0,0 @@ -Metrics -======= - -.. automodule:: gcloud.logging.metric - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/logging-sink.rst b/docs/logging-sink.rst deleted file mode 100644 index bbfb62130f27..000000000000 --- a/docs/logging-sink.rst +++ /dev/null @@ -1,7 +0,0 @@ -Sinks -===== - -.. automodule:: gcloud.logging.sink - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/logging-usage.rst b/docs/logging-usage.rst deleted file mode 100644 index 6711bf207a0a..000000000000 --- a/docs/logging-usage.rst +++ /dev/null @@ -1,315 +0,0 @@ -Using the API -============= - - -Authentication and Configuration --------------------------------- - -- For an overview of authentication in ``gcloud-python``, - see :doc:`gcloud-auth`. - -- In addition to any authentication configuration, you should also set the - :envvar:`GCLOUD_PROJECT` environment variable for the project you'd like - to interact with. If you are Google App Engine or Google Compute Engine - this will be detected automatically. - -- After configuring your environment, create a - :class:`Client ` - - .. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - - or pass in ``credentials`` and ``project`` explicitly - - .. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client(project='my-project', credentials=creds) - - -Writing log entries -------------------- - -Write a simple text entry to a logger. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> logger = client.logger('log_name') - >>> logger.log_text("A simple entry") # API call - -Write a dictionary entry to a logger. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> logger = client.logger('log_name') - >>> logger.log_struct( - ... message="My second entry", - ... weather="partly cloudy") # API call - - -Retrieving log entries ----------------------- - -Fetch entries for the default project. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> entries, token = client.list_entries() # API call - >>> for entry in entries: - ... timestamp = entry.timestamp.isoformat() - ... print('%sZ: %s | %s' % - ... (timestamp, entry.text_payload, entry.struct_payload)) - 2016-02-17T20:35:49.031864072Z: A simple entry | None - 2016-02-17T20:38:15.944418531Z: None | {'message': 'My second entry', 'weather': 'partly cloudy'} - -Fetch entries across multiple projects. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> entries, token = client.list_entries( - ... project_ids=['one-project', 'another-project']) # API call - -Filter entries retrieved using the `Advanced Logs Filters`_ syntax - -.. _Advanced Logs Filters: https://cloud.google.com/logging/docs/view/advanced_filters - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> FILTER = "log:log_name AND textPayload:simple" - >>> entries, token = client.list_entries(filter=FILTER) # API call - -Sort entries in descending timestamp order. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> entries, token = client.list_entries(order_by=logging.DESCENDING) # API call - -Retrieve entries in batches of 10, iterating until done. - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> retrieved = [] - >>> token = None - >>> while True: - ... entries, token = client.list_entries(page_size=10, page_token=token) # API call - ... retrieved.extend(entries) - ... if token is None: - ... break - -Retrieve entries for a single logger, sorting in descending timestamp order: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> logger = client.logger('log_name') - >>> entries, token = logger.list_entries(order_by=logging.DESCENDING) # API call - -Delete all entries for a logger -------------------------------- - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> logger = client.logger('log_name') - >>> logger.delete_entries() # API call - - -Manage log metrics ------------------- - -Metrics are counters of entries which match a given filter. They can be -used within Cloud Monitoring to create charts and alerts. - -Create a metric: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> metric = client.metric( - ... "robots", "Robots all up in your server", - ... filter='log:apache-access AND textPayload:robot') - >>> metric.exists() # API call - False - >>> metric.create() # API call - >>> metric.exists() # API call - True - -List all metrics for a project: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> metrics, token = client.list_metrics() - >>> len(metrics) - 1 - >>> metric = metrics[0] - >>> metric.name - "robots" - -Refresh local information about a metric: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> metric = client.metric("robots") - >>> metric.reload() # API call - >>> metric.description - "Robots all up in your server" - >>> metric.filter - "log:apache-access AND textPayload:robot" - -Update a metric: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> metric = client.metric("robots") - >>> metric.exists() # API call - True - >>> metric.reload() # API call - >>> metric.description = "Danger, Will Robinson!" - >>> metric.update() # API call - -Delete a metric: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> metric = client.metric("robots") - >>> metric.exists() # API call - True - >>> metric.delete() # API call - >>> metric.exists() # API call - False - - -Export log entries using sinks ------------------------------- - -Sinks allow exporting entries which match a given filter to Cloud Storage -buckets, BigQuery datasets, or Cloud Pub/Sub topics. - -Create a Cloud Storage sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink( - ... "robots-storage", - ... filter='log:apache-access AND textPayload:robot') - >>> sink.storage_bucket = "my-bucket-name" - >>> sink.exists() # API call - False - >>> sink.create() # API call - >>> sink.exists() # API call - True - -Create a BigQuery sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink( - ... "robots-bq", - ... filter='log:apache-access AND textPayload:robot') - >>> sink.bigquery_dataset = "projects/my-project/datasets/my-dataset" - >>> sink.exists() # API call - False - >>> sink.create() # API call - >>> sink.exists() # API call - True - -Create a Cloud Pub/Sub sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink( - ... "robots-pubsub", - ... filter='log:apache-access AND textPayload:robot') - >>> sink.pubsub_topic = 'projects/my-project/topics/my-topic' - >>> sink.exists() # API call - False - >>> sink.create() # API call - >>> sink.exists() # API call - True - -List all sinks for a project: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sinks, token = client.list_sinks() - >>> for sink in sinks: - ... print('%s: %s' % (sink.name, sink.destination)) - robots-storage: storage.googleapis.com/my-bucket-name - robots-bq: bigquery.googleapis.com/projects/my-project/datasets/my-dataset - robots-pubsub: pubsub.googleapis.com/projects/my-project/topics/my-topic - -Refresh local information about a sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink('robots-storage') - >>> sink.filter is None - True - >>> sink.reload() # API call - >>> sink.filter - 'log:apache-access AND textPayload:robot' - >>> sink.destination - 'storage.googleapis.com/my-bucket-name' - -Update a sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink("robots") - >>> sink.reload() # API call - >>> sink.filter = "log:apache-access" - >>> sink.update() # API call - -Delete a sink: - -.. doctest:: - - >>> from gcloud import logging - >>> client = logging.Client() - >>> sink = client.sink( - ... "robots", - ... filter='log:apache-access AND textPayload:robot') - >>> sink.exists() # API call - True - >>> sink.delete() # API call - >>> sink.exists() # API call - False diff --git a/docs/resource-manager-api.rst b/docs/resource-manager-api.rst deleted file mode 100644 index fca738c62513..000000000000 --- a/docs/resource-manager-api.rst +++ /dev/null @@ -1,86 +0,0 @@ -Resource Manager Overview -------------------------- - -The Cloud Resource Manager API provides methods that you can use -to programmatically manage your projects in the Google Cloud Platform. -With this API, you can do the following: - -- Get a list of all projects associated with an account -- Create new projects -- Update existing projects -- Delete projects -- Undelete, or recover, projects that you don't want to delete - -.. note:: - - Don't forget to look at the :ref:`Authentication` section below. - It's slightly different from the rest of this library. - -Here's a quick example of the full life-cycle: - -.. code-block:: python - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() - - >>> # List all projects you have access to - >>> for project in client.list_projects(): - ... print(project) - - >>> # Create a new project - >>> new_project = client.new_project('your-project-id-here', - ... name='My new project) - >>> new_project.create() - - >>> # Update an existing project - >>> project = client.fetch_project('my-existing-project') - >>> print(project) - - >>> project.name = 'Modified name' - >>> project.update() - >>> print(project) - - - >>> # Delete a project - >>> project = client.new_project('my-existing-project') - >>> project.delete() - - >>> # Undelete a project - >>> project = client.new_project('my-existing-project') - >>> project.undelete() - -.. _Authentication: - -Authentication -~~~~~~~~~~~~~~ - -Unlike the other APIs, the Resource Manager API is focused on managing your -various projects inside Google Cloud Platform. What this means (currently, as -of August 2015) is that you can't use a Service Account to work with some -parts of this API (for example, creating projects). - -The reason is actually pretty simple: if your API call is trying to do -something like create a project, what project's Service Account can you use? -Currently none. - -This means that for this API you should always use the credentials -provided by the `Google Cloud SDK`_, which you can get by running -``gcloud auth login``. - -.. _Google Cloud SDK: http://cloud.google.com/sdk - -Once you run that command, ``gcloud-python`` will automatically pick up the -credentials, and you can use the "automatic discovery" feature of the library. - -Start by authenticating: - -.. code-block:: bash - - $ gcloud auth login - -And then simply create a client: - -.. code-block:: python - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() diff --git a/docs/resource-manager-client.rst b/docs/resource-manager-client.rst deleted file mode 100644 index eda8e7ac1fb8..000000000000 --- a/docs/resource-manager-client.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. toctree:: - :maxdepth: 0 - :hidden: - -Client ------- - -.. automodule:: gcloud.resource_manager.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.resource_manager.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/resource-manager-project.rst b/docs/resource-manager-project.rst deleted file mode 100644 index 8b6b93bf133e..000000000000 --- a/docs/resource-manager-project.rst +++ /dev/null @@ -1,7 +0,0 @@ -Projects -~~~~~~~~ - -.. automodule:: gcloud.resource_manager.project - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/search-client.rst b/docs/search-client.rst deleted file mode 100644 index 6d83696000d2..000000000000 --- a/docs/search-client.rst +++ /dev/null @@ -1,15 +0,0 @@ -Search Client -============= - -.. automodule:: gcloud.search.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.search.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/search-document.rst b/docs/search-document.rst deleted file mode 100644 index 25b32d0cf2b6..000000000000 --- a/docs/search-document.rst +++ /dev/null @@ -1,7 +0,0 @@ -Documents -~~~~~~~~~ - -.. automodule:: gcloud.search.document - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/search-index.rst b/docs/search-index.rst deleted file mode 100644 index 5cb17dc9f0db..000000000000 --- a/docs/search-index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Indexes -~~~~~~~ - -.. automodule:: gcloud.search.index - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/search-usage.rst b/docs/search-usage.rst deleted file mode 100644 index 9d2ee0c05152..000000000000 --- a/docs/search-usage.rst +++ /dev/null @@ -1,199 +0,0 @@ -Using the API -============= - -Overview -~~~~~~~~ - -Cloud Search allows an application to quickly perform full-text and -geospatial searches without having to spin up instances -and without the hassle of managing and maintaining a search service. - -Cloud Search provides a model for indexing documents containing structured data, -with documents and indexes saved to a separate persistent store optimized -for search operations. - -The API supports full text matching on string fields and allows indexing -any number of documents in any number of indexes. - -Client ------- - -:class:`Client ` objects provide a means to -configure your Cloud Search applications. Eash instance holds both a -``project`` and an authenticated connection to the Cloud Search service. - -For an overview of authentication in ``gcloud-python``, see :doc:`gcloud-auth`. - -Assuming your environment is set up as described in that document, -create an instance of :class:`Client `. - -.. doctest:: - - >>> from gcloud import search - >>> client = search.Client() - -Indexes -~~~~~~~ - -Indexes are searchable collections of documents. - -List all indexes in the client's project: - -.. doctest:: - - >>> indexes = client.list_indexes() # API call - >>> for index in indexes: - ... print(index.name) - ... field_names = ', '.join([field.name for field in index.fields]) - ... print('- %s' % field_names) - index-name - - field-1, field-2 - another-index-name - - field-3 - -Create a new index: - -.. doctest:: - - >>> new_index = client.index('new-index-name') - -.. note:: - - Indexes cannot be created, updated, or deleted directly on the server: - they are derived from the documents which are created "within" them. - -Documents -~~~~~~~~~ - -Create a document instance, which is not yet added to its index on -the server: - -.. doctest:: - - >>> index = client.index('index-id') - >>> document = index.document('document-1') - >>> document.exists() # API call - False - >>> document.rank - None - -Add one or more fields to the document: - -.. doctest:: - - >>> field = document.Field('fieldname') - >>> field.add_value('string') - -Save the document into the index: - -.. doctest:: - - >>> document.create() # API call - >>> document.exists() # API call - True - >>> document.rank # set by the server - 1443648166 - -List all documents in an index: - -.. doctest:: - - >>> documents = index.list_documents() # API call - >>> [document.id for document in documents] - ['document-1'] - -Delete a document from its index: - -.. doctest:: - - >>> document = index.document('to-be-deleted') - >>> document.exists() # API call - True - >>> document.delete() # API call - >>> document.exists() # API clal - False - -.. note:: - - To update a document in place after manipulating its fields or rank, just - recreate it: E.g.: - - .. doctest:: - - >>> document = index.document('document-id') - >>> document.exists() # API call - True - >>> document.rank = 12345 - >>> field = document.field('field-name') - >>> field.add_value('christina aguilera') - >>> document.create() # API call - -Fields -~~~~~~ - -Fields belong to documents and are the data that actually gets searched. - -Each field can have multiple values, which can be of the following types: - -- String (Python2 :class:`unicode`, Python3 :class:`str`) -- Number (Python :class:`int` or :class:`float`) -- Timestamp (Python :class:`datetime.datetime`) -- Geovalue (Python tuple, (:class:`float`, :class:`float`)) - -String values can be tokenized using one of three different types of -tokenization, which can be passed when the value is added: - -- **Atom** (``atom``) means "don't tokenize this string", treat it as one - thing to compare against. - -- **Text** (``text``) means "treat this string as normal text" and split words - apart to be compared against. - -- **HTML** (``html``) means "treat this string as HTML", understanding the - tags, and treating the rest of the content like Text. - -.. doctest:: - - >>> from gcloud import search - >>> client = search.Client() - >>> index = client.index('index-id') - >>> document = index.document('document-id') - >>> field = document.field('field-name') - >>> field.add_value('britney spears', tokenization='atom') - >>> field.add_value(''

Britney Spears

', tokenization='html') - -Searching -~~~~~~~~~ - -After populating an index with documents, search through them by -issuing a search query: - -.. doctest:: - - >>> from gcloud import search - >>> client = search.Client() - >>> index = client.index('index-id') - >>> query = client.query('britney spears') - >>> matching_documents = index.search(query) # API call - >>> for document in matching_documents: - ... print(document.id) - ['document-id'] - -By default, all queries are sorted by the ``rank`` value set when the -document was created. See: -https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents#resource_representation.google.cloudsearch.v1.Document.rank - -To sort differently, use the ``order_by`` parameter: - -.. doctest:: - - >>> ordered = client.query('britney spears', order_by=['field1', '-field2']) - -Note that the ``-`` character before ``field2`` means that this query will -be sorted ascending by ``field1`` and then descending by ``field2``. - -To limit the fields to be returned in the match, use the ``fields`` paramater: - -.. doctest:: - - >>> projected = client.query('britney spears', fields=['field1', 'field2']) diff --git a/docs/storage-acl.rst b/docs/storage-acl.rst deleted file mode 100644 index 79fc6e4d2aea..000000000000 --- a/docs/storage-acl.rst +++ /dev/null @@ -1,7 +0,0 @@ -ACL -~~~ - -.. automodule:: gcloud.storage.acl - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/storage-batch.rst b/docs/storage-batch.rst deleted file mode 100644 index ceb92da278d9..000000000000 --- a/docs/storage-batch.rst +++ /dev/null @@ -1,7 +0,0 @@ -Batches -~~~~~~~ - -.. automodule:: gcloud.storage.batch - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/storage-blobs.rst b/docs/storage-blobs.rst deleted file mode 100644 index 384806d6e3d8..000000000000 --- a/docs/storage-blobs.rst +++ /dev/null @@ -1,7 +0,0 @@ -Blobs / Objects -~~~~~~~~~~~~~~~ - -.. automodule:: gcloud.storage.blob - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/storage-buckets.rst b/docs/storage-buckets.rst deleted file mode 100644 index 55c19a461b93..000000000000 --- a/docs/storage-buckets.rst +++ /dev/null @@ -1,7 +0,0 @@ -Buckets -~~~~~~~ - -.. automodule:: gcloud.storage.bucket - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/storage-client.rst b/docs/storage-client.rst deleted file mode 100644 index 36eb4a21525d..000000000000 --- a/docs/storage-client.rst +++ /dev/null @@ -1,15 +0,0 @@ -Storage Client -============== - -.. automodule:: gcloud.storage.client - :members: - :undoc-members: - :show-inheritance: - -Connection -~~~~~~~~~~ - -.. automodule:: gcloud.storage.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/gcloud/_helpers.py b/gcloud/_helpers.py index 7b91c00271da..d1bea396065d 100644 --- a/gcloud/_helpers.py +++ b/gcloud/_helpers.py @@ -24,7 +24,6 @@ import sys from threading import local as Local -from google.protobuf import timestamp_pb2 import six from six.moves.http_client import HTTPConnection @@ -146,19 +145,6 @@ def _ensure_tuple_or_list(arg_name, tuple_or_list): return list(tuple_or_list) -def _app_engine_id(): - """Gets the App Engine application ID if it can be inferred. - - :rtype: string or ``NoneType`` - :returns: App Engine application ID if running in App Engine, - else ``None``. - """ - if app_identity is None: - return None - - return app_identity.get_application_id() - - def _compute_engine_id(): """Gets the Compute Engine project ID if it can be inferred. @@ -216,12 +202,6 @@ def _determine_default_project(project=None): if project is None: project = _get_production_project() - if project is None: - project = _app_engine_id() - - if project is None: - project = _compute_engine_id() - return project @@ -392,39 +372,6 @@ def _to_bytes(value, encoding='ascii'): raise TypeError('%r could not be converted to bytes' % (value,)) -def _pb_timestamp_to_datetime(timestamp): - """Convert a Timestamp protobuf to a datetime object. - - :type timestamp: :class:`google.protobuf.timestamp_pb2.Timestamp` - :param timestamp: A Google returned timestamp protobuf. - - :rtype: :class:`datetime.datetime` - :returns: A UTC datetime object converted from a protobuf timestamp. - """ - return ( - _EPOCH + - datetime.timedelta( - seconds=timestamp.seconds, - microseconds=(timestamp.nanos / 1000.0), - ) - ) - - -def _datetime_to_pb_timestamp(when): - """Convert a datetime object to a Timestamp protobuf. - - :type when: :class:`datetime.datetime` - :param when: the datetime to convert - - :rtype: :class:`google.protobuf.timestamp_pb2.Timestamp` - :returns: A timestamp protobuf corresponding to the object. - """ - ms_value = _microseconds_from_datetime(when) - seconds, micros = divmod(ms_value, 10**6) - nanos = micros * 10**3 - return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) - - def _name_from_project_path(path, project, template): """Validate a URI path and get the leaf object's name. diff --git a/gcloud/bigquery/__init__.py b/gcloud/bigquery/__init__.py deleted file mode 100644 index cca30b80e91f..000000000000 --- a/gcloud/bigquery/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud BigQuery API wrapper. - -The main concepts with this API are: - -- :class:`gcloud.bigquery.dataset.Dataset` represents an collection of tables. - -- :class:`gcloud.bigquery.table.Table` represents a single "relation". -""" - -from gcloud.bigquery.client import Client -from gcloud.bigquery.connection import Connection -from gcloud.bigquery.dataset import Dataset -from gcloud.bigquery.table import SchemaField -from gcloud.bigquery.table import Table - - -SCOPE = Connection.SCOPE diff --git a/gcloud/bigquery/_helpers.py b/gcloud/bigquery/_helpers.py deleted file mode 100644 index 701aedb4c055..000000000000 --- a/gcloud/bigquery/_helpers.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shared elper functions for BigQuery API classes.""" - -from gcloud._helpers import _datetime_from_microseconds - - -def _not_null(value, field): - return value is not None or field.mode != 'NULLABLE' - - -def _int_from_json(value, field): - if _not_null(value, field): - return int(value) - - -def _float_from_json(value, field): - if _not_null(value, field): - return float(value) - - -def _bool_from_json(value, field): - if _not_null(value, field): - return value.lower() in ['t', 'true', '1'] - - -def _datetime_from_json(value, field): - if _not_null(value, field): - # value will be a float in seconds, to microsecond precision, in UTC. - return _datetime_from_microseconds(1e6 * float(value)) - - -def _record_from_json(value, field): - if _not_null(value, field): - record = {} - for subfield, cell in zip(field.fields, value['f']): - converter = _CELLDATA_FROM_JSON[subfield.field_type] - if field.mode == 'REPEATED': - value = [converter(item, field) for item in cell['v']] - else: - value = converter(cell['v'], field) - record[subfield.name] = value - return record - - -def _string_from_json(value, _): - return value - - -_CELLDATA_FROM_JSON = { - 'INTEGER': _int_from_json, - 'FLOAT': _float_from_json, - 'BOOLEAN': _bool_from_json, - 'TIMESTAMP': _datetime_from_json, - 'RECORD': _record_from_json, - 'STRING': _string_from_json, -} - - -def _rows_from_json(rows, schema): - rows_data = [] - for row in rows: - row_data = [] - for field, cell in zip(schema, row['f']): - converter = _CELLDATA_FROM_JSON[field.field_type] - if field.mode == 'REPEATED': - row_data.append([converter(item, field) - for item in cell['v']]) - else: - row_data.append(converter(cell['v'], field)) - rows_data.append(tuple(row_data)) - return rows_data - - -class _ConfigurationProperty(object): - """Base property implementation. - - Values will be stored on a `_configuration` helper attribute of the - property's job instance. - - :type name: string - :param name: name of the property - """ - - def __init__(self, name): - self.name = name - self._backing_name = '_%s' % (self.name,) - - def __get__(self, instance, owner): - """Descriptor protocal: accesstor""" - if instance is None: - return self - return getattr(instance._configuration, self._backing_name) - - def _validate(self, value): - """Subclasses override to impose validation policy.""" - pass - - def __set__(self, instance, value): - """Descriptor protocal: mutator""" - self._validate(value) - setattr(instance._configuration, self._backing_name, value) - - def __delete__(self, instance): - """Descriptor protocal: deleter""" - delattr(instance._configuration, self._backing_name) - - -class _TypedProperty(_ConfigurationProperty): - """Property implementation: validates based on value type. - - :type name: string - :param name: name of the property - - :type property_type: type or sequence of types - :param property_type: type to be validated - """ - def __init__(self, name, property_type): - super(_TypedProperty, self).__init__(name) - self.property_type = property_type - - def _validate(self, value): - if not isinstance(value, self.property_type): - raise ValueError('Required type: %s' % (self.property_type,)) - - -class _EnumProperty(_ConfigurationProperty): - """Psedo-enumeration class. - - Subclasses must define ``ALLOWED`` as a class-level constant: it must - be a sequence of strings. - - :type name: string - :param name: name of the property - """ - def _validate(self, value): - """Check that ``value`` is one of the allowed values. - - :raises: ValueError if value is not allowed. - """ - if value not in self.ALLOWED: - raise ValueError('Pass one of: %s' ', '.join(self.ALLOWED)) diff --git a/gcloud/bigquery/client.py b/gcloud/bigquery/client.py deleted file mode 100644 index 9b9f04d9267a..000000000000 --- a/gcloud/bigquery/client.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google BigQuery API.""" - - -from gcloud.client import JSONClient -from gcloud.bigquery.connection import Connection -from gcloud.bigquery.dataset import Dataset -from gcloud.bigquery.job import CopyJob -from gcloud.bigquery.job import ExtractTableToStorageJob -from gcloud.bigquery.job import LoadTableFromStorageJob -from gcloud.bigquery.job import QueryJob -from gcloud.bigquery.query import QueryResults - - -class Client(JSONClient): - """Client to bundle configuration needed for API requests. - - :type project: string - :param project: the project which the client acts on behalf of. Will be - passed when creating a dataset / job. If not passed, - falls back to the default inferred from the environment. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def list_datasets(self, include_all=False, max_results=None, - page_token=None): - """List datasets for the project associated with this client. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/datasets/list - - :type include_all: boolean - :param include_all: True if results include hidden datasets. - - :type max_results: int - :param max_results: maximum number of datasets to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of datasets. If - not passed, the API will return the first page of - datasets. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.bigquery.dataset.Dataset`, plus a - "next page token" string: if the token is not None, - indicates that more datasets can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if include_all: - params['all'] = True - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/datasets' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - datasets = [Dataset.from_api_repr(resource, self) - for resource in resp.get('datasets', ())] - return datasets, resp.get('nextPageToken') - - def dataset(self, dataset_name): - """Construct a dataset bound to this client. - - :type dataset_name: string - :param dataset_name: Name of the dataset. - - :rtype: :class:`gcloud.bigquery.dataset.Dataset` - :returns: a new ``Dataset`` instance - """ - return Dataset(dataset_name, client=self) - - def job_from_resource(self, resource): - """Detect correct job type from resource and instantiate. - - :type resource: dict - :param resource: one job resource from API response - - :rtype; One of: - :class:`gcloud.bigquery.job.LoadTableFromStorageJob`, - :class:`gcloud.bigquery.job.CopyJob`, - :class:`gcloud.bigquery.job.ExtractTableToStorageJob`, - :class:`gcloud.bigquery.job.QueryJob`, - :class:`gcloud.bigquery.job.RunSyncQueryJob` - :returns: the job instance, constructed via the resource - """ - config = resource['configuration'] - if 'load' in config: - return LoadTableFromStorageJob.from_api_repr(resource, self) - elif 'copy' in config: - return CopyJob.from_api_repr(resource, self) - elif 'extract' in config: - return ExtractTableToStorageJob.from_api_repr(resource, self) - elif 'query' in config: - return QueryJob.from_api_repr(resource, self) - raise ValueError('Cannot parse job resource') - - def list_jobs(self, max_results=None, page_token=None, all_users=None, - state_filter=None): - """List jobs for the project associated with this client. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/list - - :type max_results: int - :param max_results: maximum number of jobs to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of jobs. If - not passed, the API will return the first page of - jobs. - - :type all_users: boolean - :param all_users: if true, include jobs owned by all users in the - project. - - :type state_filter: string - :param state_filter: if passed, include only jobs matching the given - state. One of - - * ``"done"`` - * ``"pending"`` - * ``"running"`` - - :rtype: tuple, (list, str) - :returns: list of job instances, plus a "next page token" string: - if the token is not ``None``, indicates that more jobs can be - retrieved with another call, passing that value as - ``page_token``). - """ - params = {'projection': 'full'} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - if all_users is not None: - params['allUsers'] = all_users - - if state_filter is not None: - params['stateFilter'] = state_filter - - path = '/projects/%s/jobs' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - jobs = [self.job_from_resource(resource) for resource in resp['jobs']] - return jobs, resp.get('nextPageToken') - - def load_table_from_storage(self, job_name, destination, *source_uris): - """Construct a job for loading data into a table from CloudStorage. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load - - :type job_name: string - :param job_name: Name of the job. - - :type destination: :class:`gcloud.bigquery.table.Table` - :param destination: Table into which data is to be loaded. - - :type source_uris: sequence of string - :param source_uris: URIs of data files to be loaded; in format - ``gs:///``. - - :rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob` - :returns: a new ``LoadTableFromStorageJob`` instance - """ - return LoadTableFromStorageJob(job_name, destination, source_uris, - client=self) - - def copy_table(self, job_name, destination, *sources): - """Construct a job for copying one or more tables into another table. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy - - :type job_name: string - :param job_name: Name of the job. - - :type destination: :class:`gcloud.bigquery.table.Table` - :param destination: Table into which data is to be copied. - - :type sources: sequence of :class:`gcloud.bigquery.table.Table` - :param sources: tables to be copied. - - :rtype: :class:`gcloud.bigquery.job.CopyJob` - :returns: a new ``CopyJob`` instance - """ - return CopyJob(job_name, destination, sources, client=self) - - def extract_table_to_storage(self, job_name, source, *destination_uris): - """Construct a job for extracting a table into Cloud Storage files. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extract - - :type job_name: string - :param job_name: Name of the job. - - :type source: :class:`gcloud.bigquery.table.Table` - :param source: table to be extracted. - - :type destination_uris: sequence of string - :param destination_uris: URIs of CloudStorage file(s) into which - table data is to be extracted; in format - ``gs:///``. - - :rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob` - :returns: a new ``ExtractTableToStorageJob`` instance - """ - return ExtractTableToStorageJob(job_name, source, destination_uris, - client=self) - - def run_async_query(self, job_name, query): - """Construct a job for running a SQL query asynchronously. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query - - :type job_name: string - :param job_name: Name of the job. - - :type query: string - :param query: SQL query to be executed - - :rtype: :class:`gcloud.bigquery.job.QueryJob` - :returns: a new ``QueryJob`` instance - """ - return QueryJob(job_name, query, client=self) - - def run_sync_query(self, query): - """Run a SQL query synchronously. - - :type query: string - :param query: SQL query to be executed - - :rtype: :class:`gcloud.bigquery.query.QueryResults` - :returns: a new ``QueryResults`` instance - """ - return QueryResults(query, client=self) diff --git a/gcloud/bigquery/connection.py b/gcloud/bigquery/connection.py deleted file mode 100644 index 6195c1a4b0e8..000000000000 --- a/gcloud/bigquery/connection.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud bigquery connections.""" - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud BigQuery via the JSON REST API.""" - - API_BASE_URL = 'https://www.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v2' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/bigquery/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/bigquery', - 'https://www.googleapis.com/auth/cloud-platform') - """The scopes required for authenticating as a Cloud BigQuery consumer.""" diff --git a/gcloud/bigquery/dataset.py b/gcloud/bigquery/dataset.py deleted file mode 100644 index 64944be03fd3..000000000000 --- a/gcloud/bigquery/dataset.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Datasets.""" -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud.exceptions import NotFound -from gcloud.bigquery.table import Table - - -class AccessGrant(object): - """Represent grant of an access role to an entity. - - Every entry in the access list will have exactly one of - ``userByEmail``, ``groupByEmail``, ``domain``, ``specialGroup`` or - ``view`` set. And if anything but ``view`` is set, it'll also have a - ``role`` specified. ``role`` is omitted for a ``view``, since - ``view`` s are always read-only. - - See https://cloud.google.com/bigquery/docs/reference/v2/datasets. - - :type role: string - :param role: Role granted to the entity. One of - - * ``'OWNER'`` - * ``'WRITER'`` - * ``'READER'`` - - May also be ``None`` if the ``entity_type`` is ``view``. - - :type entity_type: string - :param entity_type: Type of entity being granted the role. One of - :attr:`ENTITY_TYPES`. - - :type entity_id: string - :param entity_id: ID of entity being granted the role. - - :raises: :class:`ValueError` if the ``entity_type`` is not among - :attr:`ENTITY_TYPES`, or if a ``view`` has ``role`` set or - a non ``view`` **does not** have a ``role`` set. - """ - - ENTITY_TYPES = frozenset(['userByEmail', 'groupByEmail', 'domain', - 'specialGroup', 'view']) - """Allowed entity types.""" - - def __init__(self, role, entity_type, entity_id): - if entity_type not in self.ENTITY_TYPES: - message = 'Entity type %r not among: %s' % ( - entity_type, ', '.join(self.ENTITY_TYPES)) - raise ValueError(message) - if entity_type == 'view': - if role is not None: - raise ValueError('Role must be None for a view. Received ' - 'role: %r' % (role,)) - else: - if role is None: - raise ValueError('Role must be set for entity ' - 'type %r' % (entity_type,)) - - self.role = role - self.entity_type = entity_type - self.entity_id = entity_id - - def __repr__(self): - return '' % ( - self.role, self.entity_type, self.entity_id) - - -class Dataset(object): - """Datasets are containers for tables. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/datasets - - :type name: string - :param name: the name of the dataset - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - - :type access_grants: list of :class:`AccessGrant` - :param access_grants: roles granted to entities for this dataset - """ - - _access_grants = None - - def __init__(self, name, client, access_grants=()): - self.name = name - self._client = client - self._properties = {} - # Let the @property do validation. - self.access_grants = access_grants - - @property - def project(self): - """Project bound to the dataset. - - :rtype: string - :returns: the project (derived from the client). - """ - return self._client.project - - @property - def path(self): - """URL path for the dataset's APIs. - - :rtype: string - :returns: the path based on project and dataste name. - """ - return '/projects/%s/datasets/%s' % (self.project, self.name) - - @property - def access_grants(self): - """Dataset's access grants. - - :rtype: list of :class:`AccessGrant` - :returns: roles granted to entities for this dataset - """ - return list(self._access_grants) - - @access_grants.setter - def access_grants(self, value): - """Update dataset's access grants - - :type value: list of :class:`AccessGrant` - :param value: roles granted to entities for this dataset - - :raises: TypeError if 'value' is not a sequence, or ValueError if - any item in the sequence is not an AccessGrant - """ - if not all(isinstance(field, AccessGrant) for field in value): - raise ValueError('Values must be AccessGrant instances') - self._access_grants = tuple(value) - - @property - def created(self): - """Datetime at which the dataset was created. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the creation time (None until set from the server). - """ - creation_time = self._properties.get('creationTime') - if creation_time is not None: - # creation_time will be in milliseconds. - return _datetime_from_microseconds(1000.0 * creation_time) - - @property - def dataset_id(self): - """ID for the dataset resource. - - :rtype: string, or ``NoneType`` - :returns: the ID (None until set from the server). - """ - return self._properties.get('id') - - @property - def etag(self): - """ETag for the dataset resource. - - :rtype: string, or ``NoneType`` - :returns: the ETag (None until set from the server). - """ - return self._properties.get('etag') - - @property - def modified(self): - """Datetime at which the dataset was last modified. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the modification time (None until set from the server). - """ - modified_time = self._properties.get('lastModifiedTime') - if modified_time is not None: - # modified_time will be in milliseconds. - return _datetime_from_microseconds(1000.0 * modified_time) - - @property - def self_link(self): - """URL for the dataset resource. - - :rtype: string, or ``NoneType`` - :returns: the URL (None until set from the server). - """ - return self._properties.get('selfLink') - - @property - def default_table_expiration_ms(self): - """Default expiration time for tables in the dataset. - - :rtype: integer, or ``NoneType`` - :returns: The time in milliseconds, or None (the default). - """ - return self._properties.get('defaultTableExpirationMs') - - @default_table_expiration_ms.setter - def default_table_expiration_ms(self, value): - """Update default expiration time for tables in the dataset. - - :type value: integer, or ``NoneType`` - :param value: new default time, in milliseconds - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.integer_types) and value is not None: - raise ValueError("Pass an integer, or None") - self._properties['defaultTableExpirationMs'] = value - - @property - def description(self): - """Description of the dataset. - - :rtype: string, or ``NoneType`` - :returns: The description as set by the user, or None (the default). - """ - return self._properties.get('description') - - @description.setter - def description(self, value): - """Update description of the dataset. - - :type value: string, or ``NoneType`` - :param value: new description - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['description'] = value - - @property - def friendly_name(self): - """Title of the dataset. - - :rtype: string, or ``NoneType`` - :returns: The name as set by the user, or None (the default). - """ - return self._properties.get('friendlyName') - - @friendly_name.setter - def friendly_name(self, value): - """Update title of the dataset. - - :type value: string, or ``NoneType`` - :param value: new title - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['friendlyName'] = value - - @property - def location(self): - """Location in which the dataset is hosted. - - :rtype: string, or ``NoneType`` - :returns: The location as set by the user, or None (the default). - """ - return self._properties.get('location') - - @location.setter - def location(self, value): - """Update location in which the dataset is hosted. - - :type value: string, or ``NoneType`` - :param value: new location - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['location'] = value - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a dataset given its API representation - - :type resource: dict - :param resource: dataset resource representation returned from the API - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. - - :rtype: :class:`gcloud.bigquery.dataset.Dataset` - :returns: Dataset parsed from ``resource``. - """ - if ('datasetReference' not in resource or - 'datasetId' not in resource['datasetReference']): - raise KeyError('Resource lacks required identity information:' - '["datasetReference"]["datasetId"]') - name = resource['datasetReference']['datasetId'] - dataset = cls(name, client=client) - dataset._set_properties(resource) - return dataset - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: :class:`gcloud.bigquery.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - @staticmethod - def _parse_access_grants(access): - """Parse a resource fragment into a set of access grants. - - ``role`` augments the entity type and present **unless** the entity - type is ``view``. - - :type access: list of mappings - :param access: each mapping represents a single access grant - - :rtype: list of :class:`AccessGrant` - :returns: a list of parsed grants - :raises: :class:`ValueError` if a grant in ``access`` has more keys - than ``role`` and one additional key. - """ - result = [] - for grant in access: - grant = grant.copy() - role = grant.pop('role', None) - entity_type, entity_id = grant.popitem() - if len(grant) != 0: - raise ValueError('Grant has unexpected keys remaining.', grant) - result.append( - AccessGrant(role, entity_type, entity_id)) - return result - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - self._properties.clear() - cleaned = api_response.copy() - access = cleaned.pop('access', ()) - self.access_grants = self._parse_access_grants(access) - if 'creationTime' in cleaned: - cleaned['creationTime'] = float(cleaned['creationTime']) - if 'lastModifiedTime' in cleaned: - cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime']) - self._properties.update(cleaned) - - def _build_access_resource(self): - """Generate a resource fragment for dataset's access grants.""" - result = [] - for grant in self.access_grants: - info = {grant.entity_type: grant.entity_id} - if grant.role is not None: - info['role'] = grant.role - result.append(info) - return result - - def _build_resource(self): - """Generate a resource for ``create`` or ``update``.""" - resource = { - 'datasetReference': { - 'projectId': self.project, 'datasetId': self.name}, - } - if self.default_table_expiration_ms is not None: - value = self.default_table_expiration_ms - resource['defaultTableExpirationMs'] = value - - if self.description is not None: - resource['description'] = self.description - - if self.friendly_name is not None: - resource['friendlyName'] = self.friendly_name - - if self.location is not None: - resource['location'] = self.location - - if len(self.access_grants) > 0: - resource['access'] = self._build_access_resource() - - return resource - - def create(self, client=None): - """API call: create the dataset via a PUT request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables/insert - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - path = '/projects/%s/datasets' % (self.project,) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test for the existence of the dataset via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/datasets/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path, - query_params={'fields': 'id'}) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: refresh dataset properties via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/datasets/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) - - def patch(self, client=None, **kw): - """API call: update individual dataset properties via a PATCH request - - See - https://cloud.google.com/bigquery/docs/reference/v2/datasets/patch - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :type kw: ``dict`` - :param kw: properties to be patched. - - :raises: ValueError for invalid value types. - """ - client = self._require_client(client) - - partial = {} - - if 'default_table_expiration_ms' in kw: - value = kw['default_table_expiration_ms'] - if not isinstance(value, six.integer_types) and value is not None: - raise ValueError("Pass an integer, or None") - partial['defaultTableExpirationMs'] = value - - if 'description' in kw: - partial['description'] = kw['description'] - - if 'friendly_name' in kw: - partial['friendlyName'] = kw['friendly_name'] - - if 'location' in kw: - partial['location'] = kw['location'] - - api_response = client.connection.api_request( - method='PATCH', path=self.path, data=partial) - self._set_properties(api_response) - - def update(self, client=None): - """API call: update dataset properties via a PUT request - - See - https://cloud.google.com/bigquery/docs/reference/v2/datasets/update - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - api_response = client.connection.api_request( - method='PUT', path=self.path, data=self._build_resource()) - self._set_properties(api_response) - - def delete(self, client=None): - """API call: delete the dataset via a DELETE request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables/delete - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) - - def list_tables(self, max_results=None, page_token=None): - """List tables for the project associated with this client. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables/list - - :type max_results: int - :param max_results: maximum number of tables to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of datasets. If - not passed, the API will return the first page of - datasets. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.bigquery.table.Table`, plus a - "next page token" string: if not ``None``, indicates that - more tables can be retrieved with another call (pass that - value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/datasets/%s/tables' % (self.project, self.name) - connection = self._client.connection - resp = connection.api_request(method='GET', path=path, - query_params=params) - tables = [Table.from_api_repr(resource, self) - for resource in resp.get('tables', ())] - return tables, resp.get('nextPageToken') - - def table(self, name, schema=()): - """Construct a table bound to this dataset. - - :type name: string - :param name: Name of the table. - - :type schema: list of :class:`gcloud.bigquery.table.SchemaField` - :param schema: The table's schema - - :rtype: :class:`gcloud.bigquery.table.Table` - :returns: a new ``Table`` instance - """ - return Table(name, dataset=self, schema=schema) diff --git a/gcloud/bigquery/job.py b/gcloud/bigquery/job.py deleted file mode 100644 index dfcac73edd33..000000000000 --- a/gcloud/bigquery/job.py +++ /dev/null @@ -1,1023 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Jobs.""" - -import six - -from gcloud.exceptions import NotFound -from gcloud._helpers import _datetime_from_microseconds -from gcloud.bigquery.dataset import Dataset -from gcloud.bigquery.table import SchemaField -from gcloud.bigquery.table import Table -from gcloud.bigquery.table import _build_schema_resource -from gcloud.bigquery.table import _parse_schema_resource -from gcloud.bigquery._helpers import _EnumProperty -from gcloud.bigquery._helpers import _TypedProperty - - -class Compression(_EnumProperty): - """Pseudo-enum for ``compression`` properties.""" - GZIP = 'GZIP' - NONE = 'NONE' - ALLOWED = (GZIP, NONE) - - -class CreateDisposition(_EnumProperty): - """Pseudo-enum for ``create_disposition`` properties.""" - CREATE_IF_NEEDED = 'CREATE_IF_NEEDED' - CREATE_NEVER = 'CREATE_NEVER' - ALLOWED = (CREATE_IF_NEEDED, CREATE_NEVER) - - -class DestinationFormat(_EnumProperty): - """Pseudo-enum for ``destination_format`` properties.""" - CSV = 'CSV' - NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' - AVRO = 'AVRO' - ALLOWED = (CSV, NEWLINE_DELIMITED_JSON, AVRO) - - -class Encoding(_EnumProperty): - """Pseudo-enum for ``encoding`` properties.""" - UTF_8 = 'UTF-8' - ISO_8559_1 = 'ISO-8559-1' - ALLOWED = (UTF_8, ISO_8559_1) - - -class QueryPriority(_EnumProperty): - """Pseudo-enum for ``QueryJob.priority`` property.""" - INTERACTIVE = 'INTERACTIVE' - BATCH = 'BATCH' - ALLOWED = (INTERACTIVE, BATCH) - - -class SourceFormat(_EnumProperty): - """Pseudo-enum for ``source_format`` properties.""" - CSV = 'CSV' - DATASTORE_BACKUP = 'DATASTORE_BACKUP' - NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' - ALLOWED = (CSV, DATASTORE_BACKUP, NEWLINE_DELIMITED_JSON) - - -class WriteDisposition(_EnumProperty): - """Pseudo-enum for ``write_disposition`` properties.""" - WRITE_APPEND = 'WRITE_APPEND' - WRITE_TRUNCATE = 'WRITE_TRUNCATE' - WRITE_EMPTY = 'WRITE_EMPTY' - ALLOWED = (WRITE_APPEND, WRITE_TRUNCATE, WRITE_EMPTY) - - -class _BaseJob(object): - """Base class for jobs. - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - def __init__(self, client): - self._client = client - self._properties = {} - - @property - def project(self): - """Project bound to the job. - - :rtype: string - :returns: the project (derived from the client). - """ - return self._client.project - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: :class:`gcloud.bigquery.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - -class _AsyncJob(_BaseJob): - """Base class for asynchronous jobs. - - :type name: string - :param name: the name of the job - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - def __init__(self, name, client): - super(_AsyncJob, self).__init__(client) - self.name = name - - @property - def job_type(self): - """Type of job - - :rtype: string - :returns: one of 'load', 'copy', 'extract', 'query' - """ - return self._JOB_TYPE - - @property - def path(self): - """URL path for the job's APIs. - - :rtype: string - :returns: the path based on project and job name. - """ - return '/projects/%s/jobs/%s' % (self.project, self.name) - - @property - def etag(self): - """ETag for the job resource. - - :rtype: string, or ``NoneType`` - :returns: the ETag (None until set from the server). - """ - return self._properties.get('etag') - - @property - def self_link(self): - """URL for the job resource. - - :rtype: string, or ``NoneType`` - :returns: the URL (None until set from the server). - """ - return self._properties.get('selfLink') - - @property - def user_email(self): - """E-mail address of user who submitted the job. - - :rtype: string, or ``NoneType`` - :returns: the URL (None until set from the server). - """ - return self._properties.get('user_email') - - @property - def created(self): - """Datetime at which the job was created. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the creation time (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - millis = statistics.get('creationTime') - if millis is not None: - return _datetime_from_microseconds(millis * 1000.0) - - @property - def started(self): - """Datetime at which the job was started. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the start time (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - millis = statistics.get('startTime') - if millis is not None: - return _datetime_from_microseconds(millis * 1000.0) - - @property - def ended(self): - """Datetime at which the job finished. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the end time (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - millis = statistics.get('endTime') - if millis is not None: - return _datetime_from_microseconds(millis * 1000.0) - - @property - def error_result(self): - """Error information about the job as a whole. - - :rtype: mapping, or ``NoneType`` - :returns: the error information (None until set from the server). - """ - status = self._properties.get('status') - if status is not None: - return status.get('errorResult') - - @property - def errors(self): - """Information about individual errors generated by the job. - - :rtype: list of mappings, or ``NoneType`` - :returns: the error information (None until set from the server). - """ - status = self._properties.get('status') - if status is not None: - return status.get('errors') - - @property - def state(self): - """Status of the job. - - :rtype: string, or ``NoneType`` - :returns: the state (None until set from the server). - """ - status = self._properties.get('status') - if status is not None: - return status.get('state') - - def _scrub_local_properties(self, cleaned): - """Helper: handle subclass properties in cleaned.""" - pass - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - cleaned = api_response.copy() - self._scrub_local_properties(cleaned) - - statistics = cleaned.get('statistics', {}) - if 'creationTime' in statistics: - statistics['creationTime'] = float(statistics['creationTime']) - if 'startTime' in statistics: - statistics['startTime'] = float(statistics['startTime']) - if 'endTime' in statistics: - statistics['endTime'] = float(statistics['endTime']) - - self._properties.clear() - self._properties.update(cleaned) - - @classmethod - def _get_resource_config(cls, resource): - """Helper for :meth:`from_api_repr` - - :type resource: dict - :param resource: resource for the job - - :rtype: dict - :returns: tuple (string, dict), where the first element is the - job name and the second contains job-specific configuration. - :raises: :class:`KeyError` if the resource has no identifier, or - is missing the appropriate configuration. - """ - if ('jobReference' not in resource or - 'jobId' not in resource['jobReference']): - raise KeyError('Resource lacks required identity information: ' - '["jobReference"]["jobId"]') - name = resource['jobReference']['jobId'] - if ('configuration' not in resource or - cls._JOB_TYPE not in resource['configuration']): - raise KeyError('Resource lacks required configuration: ' - '["configuration"]["%s"]' % cls._JOB_TYPE) - config = resource['configuration'][cls._JOB_TYPE] - return name, config - - def begin(self, client=None): - """API call: begin the job via a POST request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - path = '/projects/%s/jobs' % (self.project,) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test for the existence of the job via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/jobs/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path, - query_params={'fields': 'id'}) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: refresh job properties via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/jobs/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) - - def cancel(self, client=None): - """API call: cancel job via a POST request - - See - https://cloud.google.com/bigquery/docs/reference/v2/jobs/cancel - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='POST', path='%s/cancel' % (self.path,)) - self._set_properties(api_response) - - -class _LoadConfiguration(object): - """User-settable configuration options for load jobs. - - Values which are ``None`` -> server defaults. - """ - _allow_jagged_rows = None - _allow_quoted_newlines = None - _create_disposition = None - _encoding = None - _field_delimiter = None - _ignore_unknown_values = None - _max_bad_records = None - _quote_character = None - _skip_leading_rows = None - _source_format = None - _write_disposition = None - - -class LoadTableFromStorageJob(_AsyncJob): - """Asynchronous job for loading data into a table from CloudStorage. - - :type name: string - :param name: the name of the job - - :type destination: :class:`gcloud.bigquery.table.Table` - :param destination: Table into which data is to be loaded. - - :type source_uris: sequence of string - :param source_uris: URIs of one or more data files to be loaded, in - format ``gs:///``. - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - - :type schema: list of :class:`gcloud.bigquery.table.SchemaField` - :param schema: The job's schema - """ - - _schema = None - _JOB_TYPE = 'load' - - def __init__(self, name, destination, source_uris, client, schema=()): - super(LoadTableFromStorageJob, self).__init__(name, client) - self.destination = destination - self.source_uris = source_uris - # Let the @property do validation. - self.schema = schema - self._configuration = _LoadConfiguration() - - @property - def schema(self): - """Table's schema. - - :rtype: list of :class:`SchemaField` - :returns: fields describing the schema - """ - return list(self._schema) - - @schema.setter - def schema(self, value): - """Update table's schema - - :type value: list of :class:`SchemaField` - :param value: fields describing the schema - - :raises: TypeError if 'value' is not a sequence, or ValueError if - any item in the sequence is not a SchemaField - """ - if not all(isinstance(field, SchemaField) for field in value): - raise ValueError('Schema items must be fields') - self._schema = tuple(value) - - @property - def input_file_bytes(self): - """Count of bytes loaded from source files. - - :rtype: integer, or ``NoneType`` - :returns: the count (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - return int(statistics['load']['inputFileBytes']) - - @property - def input_files(self): - """Count of source files. - - :rtype: integer, or ``NoneType`` - :returns: the count (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - return int(statistics['load']['inputFiles']) - - @property - def output_bytes(self): - """Count of bytes saved to destination table. - - :rtype: integer, or ``NoneType`` - :returns: the count (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - return int(statistics['load']['outputBytes']) - - @property - def output_rows(self): - """Count of rows saved to destination table. - - :rtype: integer, or ``NoneType`` - :returns: the count (None until set from the server). - """ - statistics = self._properties.get('statistics') - if statistics is not None: - return int(statistics['load']['outputRows']) - - allow_jagged_rows = _TypedProperty('allow_jagged_rows', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowJaggedRows - """ - - allow_quoted_newlines = _TypedProperty('allow_quoted_newlines', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowQuotedNewlines - """ - - create_disposition = CreateDisposition('create_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition - """ - - encoding = Encoding('encoding') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding - """ - - field_delimiter = _TypedProperty('field_delimiter', six.string_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.fieldDelimiter - """ - - ignore_unknown_values = _TypedProperty('ignore_unknown_values', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.ignoreUnknownValues - """ - - max_bad_records = _TypedProperty('max_bad_records', six.integer_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.maxBadRecords - """ - - quote_character = _TypedProperty('quote_character', six.string_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.quote - """ - - skip_leading_rows = _TypedProperty('skip_leading_rows', six.integer_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.skipLeadingRows - """ - - source_format = SourceFormat('source_format') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.sourceFormat - """ - - write_disposition = WriteDisposition('write_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition - """ - - def _populate_config_resource(self, configuration): - """Helper for _build_resource: copy config properties to resource""" - if self.allow_jagged_rows is not None: - configuration['allowJaggedRows'] = self.allow_jagged_rows - if self.allow_quoted_newlines is not None: - configuration['allowQuotedNewlines'] = self.allow_quoted_newlines - if self.create_disposition is not None: - configuration['createDisposition'] = self.create_disposition - if self.encoding is not None: - configuration['encoding'] = self.encoding - if self.field_delimiter is not None: - configuration['fieldDelimiter'] = self.field_delimiter - if self.ignore_unknown_values is not None: - configuration['ignoreUnknownValues'] = self.ignore_unknown_values - if self.max_bad_records is not None: - configuration['maxBadRecords'] = self.max_bad_records - if self.quote_character is not None: - configuration['quote'] = self.quote_character - if self.skip_leading_rows is not None: - configuration['skipLeadingRows'] = self.skip_leading_rows - if self.source_format is not None: - configuration['sourceFormat'] = self.source_format - if self.write_disposition is not None: - configuration['writeDisposition'] = self.write_disposition - - def _build_resource(self): - """Generate a resource for :meth:`begin`.""" - resource = { - 'jobReference': { - 'projectId': self.project, - 'jobId': self.name, - }, - 'configuration': { - self._JOB_TYPE: { - 'sourceUris': self.source_uris, - 'destinationTable': { - 'projectId': self.destination.project, - 'datasetId': self.destination.dataset_name, - 'tableId': self.destination.name, - }, - }, - }, - } - configuration = resource['configuration'][self._JOB_TYPE] - self._populate_config_resource(configuration) - - if len(self.schema) > 0: - configuration['schema'] = { - 'fields': _build_schema_resource(self.schema)} - - return resource - - def _scrub_local_properties(self, cleaned): - """Helper: handle subclass properties in cleaned.""" - schema = cleaned.pop('schema', {'fields': ()}) - self.schema = _parse_schema_resource(schema) - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a job given its API representation - - .. note: - - This method assumes that the project found in the resource matches - the client's project. - - :type resource: dict - :param resource: dataset job representation returned from the API - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. - - :rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob` - :returns: Job parsed from ``resource``. - """ - name, config = cls._get_resource_config(resource) - dest_config = config['destinationTable'] - dataset = Dataset(dest_config['datasetId'], client) - destination = Table(dest_config['tableId'], dataset) - source_urls = config.get('sourceUris', ()) - job = cls(name, destination, source_urls, client=client) - job._set_properties(resource) - return job - - -class _CopyConfiguration(object): - """User-settable configuration options for copy jobs. - - Values which are ``None`` -> server defaults. - """ - _create_disposition = None - _write_disposition = None - - -class CopyJob(_AsyncJob): - """Asynchronous job: copy data into a table from other tables. - - :type name: string - :param name: the name of the job - - :type destination: :class:`gcloud.bigquery.table.Table` - :param destination: Table into which data is to be loaded. - - :type sources: list of :class:`gcloud.bigquery.table.Table` - :param sources: Table into which data is to be loaded. - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - - _JOB_TYPE = 'copy' - - def __init__(self, name, destination, sources, client): - super(CopyJob, self).__init__(name, client) - self.destination = destination - self.sources = sources - self._configuration = _CopyConfiguration() - - create_disposition = CreateDisposition('create_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.createDisposition - """ - - write_disposition = WriteDisposition('write_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.writeDisposition - """ - - def _populate_config_resource(self, configuration): - """Helper for _build_resource: copy config properties to resource""" - if self.create_disposition is not None: - configuration['createDisposition'] = self.create_disposition - if self.write_disposition is not None: - configuration['writeDisposition'] = self.write_disposition - - def _build_resource(self): - """Generate a resource for :meth:`begin`.""" - - source_refs = [{ - 'projectId': table.project, - 'datasetId': table.dataset_name, - 'tableId': table.name, - } for table in self.sources] - - resource = { - 'jobReference': { - 'projectId': self.project, - 'jobId': self.name, - }, - 'configuration': { - self._JOB_TYPE: { - 'sourceTables': source_refs, - 'destinationTable': { - 'projectId': self.destination.project, - 'datasetId': self.destination.dataset_name, - 'tableId': self.destination.name, - }, - }, - }, - } - configuration = resource['configuration'][self._JOB_TYPE] - self._populate_config_resource(configuration) - - return resource - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a job given its API representation - - .. note: - - This method assumes that the project found in the resource matches - the client's project. - - :type resource: dict - :param resource: dataset job representation returned from the API - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. - - :rtype: :class:`gcloud.bigquery.job.CopyJob` - :returns: Job parsed from ``resource``. - """ - name, config = cls._get_resource_config(resource) - dest_config = config['destinationTable'] - dataset = Dataset(dest_config['datasetId'], client) - destination = Table(dest_config['tableId'], dataset) - sources = [] - for source_config in config['sourceTables']: - dataset = Dataset(source_config['datasetId'], client) - sources.append(Table(source_config['tableId'], dataset)) - job = cls(name, destination, sources, client=client) - job._set_properties(resource) - return job - - -class _ExtractConfiguration(object): - """User-settable configuration options for extract jobs. - - Values which are ``None`` -> server defaults. - """ - _compression = None - _destination_format = None - _field_delimiter = None - _print_header = None - - -class ExtractTableToStorageJob(_AsyncJob): - """Asynchronous job: extract data from a table into Cloud Storage. - - :type name: string - :param name: the name of the job - - :type source: :class:`gcloud.bigquery.table.Table` - :param source: Table into which data is to be loaded. - - :type destination_uris: list of string - :param destination_uris: URIs describing Cloud Storage blobs into which - extracted data will be written, in format - ``gs:///``. - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - _JOB_TYPE = 'extract' - - def __init__(self, name, source, destination_uris, client): - super(ExtractTableToStorageJob, self).__init__(name, client) - self.source = source - self.destination_uris = destination_uris - self._configuration = _ExtractConfiguration() - - compression = Compression('compression') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.compression - """ - - destination_format = DestinationFormat('destination_format') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.destinationFormat - """ - - field_delimiter = _TypedProperty('field_delimiter', six.string_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.fieldDelimiter - """ - - print_header = _TypedProperty('print_header', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.printHeader - """ - - def _populate_config_resource(self, configuration): - """Helper for _build_resource: copy config properties to resource""" - if self.compression is not None: - configuration['compression'] = self.compression - if self.destination_format is not None: - configuration['destinationFormat'] = self.destination_format - if self.field_delimiter is not None: - configuration['fieldDelimiter'] = self.field_delimiter - if self.print_header is not None: - configuration['printHeader'] = self.print_header - - def _build_resource(self): - """Generate a resource for :meth:`begin`.""" - - source_ref = { - 'projectId': self.source.project, - 'datasetId': self.source.dataset_name, - 'tableId': self.source.name, - } - - resource = { - 'jobReference': { - 'projectId': self.project, - 'jobId': self.name, - }, - 'configuration': { - self._JOB_TYPE: { - 'sourceTable': source_ref, - 'destinationUris': self.destination_uris, - }, - }, - } - configuration = resource['configuration'][self._JOB_TYPE] - self._populate_config_resource(configuration) - - return resource - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a job given its API representation - - .. note: - - This method assumes that the project found in the resource matches - the client's project. - - :type resource: dict - :param resource: dataset job representation returned from the API - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. - - :rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob` - :returns: Job parsed from ``resource``. - """ - name, config = cls._get_resource_config(resource) - source_config = config['sourceTable'] - dataset = Dataset(source_config['datasetId'], client) - source = Table(source_config['tableId'], dataset) - destination_uris = config['destinationUris'] - job = cls(name, source, destination_uris, client=client) - job._set_properties(resource) - return job - - -class _AsyncQueryConfiguration(object): - """User-settable configuration options for asynchronous query jobs. - - Values which are ``None`` -> server defaults. - """ - _allow_large_results = None - _create_disposition = None - _default_dataset = None - _destination = None - _flatten_results = None - _priority = None - _use_query_cache = None - _write_disposition = None - - -class QueryJob(_AsyncJob): - """Asynchronous job: query tables. - - :type name: string - :param name: the name of the job - - :type query: string - :param query: SQL query string - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - _JOB_TYPE = 'query' - - def __init__(self, name, query, client): - super(QueryJob, self).__init__(name, client) - self.query = query - self._configuration = _AsyncQueryConfiguration() - - allow_large_results = _TypedProperty('allow_large_results', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.allowLargeResults - """ - - create_disposition = CreateDisposition('create_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.createDisposition - """ - - default_dataset = _TypedProperty('default_dataset', Dataset) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset - """ - - destination = _TypedProperty('destination', Table) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.destinationTable - """ - - flatten_results = _TypedProperty('flatten_results', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.flattenResults - """ - - priority = QueryPriority('priority') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.priority - """ - - use_query_cache = _TypedProperty('use_query_cache', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.useQueryCache - """ - - write_disposition = WriteDisposition('write_disposition') - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.writeDisposition - """ - - def _destination_table_resource(self): - if self.destination is not None: - return { - 'projectId': self.destination.project, - 'datasetId': self.destination.dataset_name, - 'tableId': self.destination.name, - } - - def _populate_config_resource(self, configuration): - """Helper for _build_resource: copy config properties to resource""" - if self.allow_large_results is not None: - configuration['allowLargeResults'] = self.allow_large_results - if self.create_disposition is not None: - configuration['createDisposition'] = self.create_disposition - if self.default_dataset is not None: - configuration['defaultDataset'] = { - 'projectId': self.default_dataset.project, - 'datasetId': self.default_dataset.name, - } - if self.destination is not None: - table_res = self._destination_table_resource() - configuration['destinationTable'] = table_res - if self.flatten_results is not None: - configuration['flattenResults'] = self.flatten_results - if self.priority is not None: - configuration['priority'] = self.priority - if self.use_query_cache is not None: - configuration['useQueryCache'] = self.use_query_cache - if self.write_disposition is not None: - configuration['writeDisposition'] = self.write_disposition - - def _build_resource(self): - """Generate a resource for :meth:`begin`.""" - - resource = { - 'jobReference': { - 'projectId': self.project, - 'jobId': self.name, - }, - 'configuration': { - self._JOB_TYPE: { - 'query': self.query, - }, - }, - } - configuration = resource['configuration'][self._JOB_TYPE] - self._populate_config_resource(configuration) - - return resource - - def _scrub_local_properties(self, cleaned): - """Helper: handle subclass properties in cleaned. - - .. note: - - This method assumes that the project found in the resource matches - the client's project. - """ - configuration = cleaned['configuration']['query'] - dest_remote = configuration.get('destinationTable') - - if dest_remote is None: - if self.destination is not None: - del self.destination - else: - dest_local = self._destination_table_resource() - if dest_remote != dest_local: - dataset = self._client.dataset(dest_remote['datasetId']) - self.destination = dataset.table(dest_remote['tableId']) - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a job given its API representation - - :type resource: dict - :param resource: dataset job representation returned from the API - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. - - :rtype: :class:`gcloud.bigquery.job.RunAsyncQueryJob` - :returns: Job parsed from ``resource``. - """ - name, config = cls._get_resource_config(resource) - query = config['query'] - job = cls(name, query, client=client) - job._set_properties(resource) - return job diff --git a/gcloud/bigquery/query.py b/gcloud/bigquery/query.py deleted file mode 100644 index f9158703ce68..000000000000 --- a/gcloud/bigquery/query.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Queries.""" - -import six - -from gcloud.bigquery._helpers import _TypedProperty -from gcloud.bigquery._helpers import _rows_from_json -from gcloud.bigquery.dataset import Dataset -from gcloud.bigquery.job import QueryJob -from gcloud.bigquery.table import _parse_schema_resource - - -class _SyncQueryConfiguration(object): - """User-settable configuration options for synchronous query jobs. - - Values which are ``None`` -> server defaults. - """ - _default_dataset = None - _max_results = None - _timeout_ms = None - _preserve_nulls = None - _use_query_cache = None - - -class QueryResults(object): - """Synchronous job: query tables. - - :type query: string - :param query: SQL query string - - :type client: :class:`gcloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). - """ - def __init__(self, query, client): - self._client = client - self._properties = {} - self.query = query - self._configuration = _SyncQueryConfiguration() - self._job = None - - @property - def project(self): - """Project bound to the job. - - :rtype: string - :returns: the project (derived from the client). - """ - return self._client.project - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: :class:`gcloud.bigquery.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - @property - def cache_hit(self): - """Query results served from cache. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#cacheHit - - :rtype: boolean or ``NoneType`` - :returns: True if the query results were served from cache (None - until set by the server). - """ - return self._properties.get('cacheHit') - - @property - def complete(self): - """Server completed query. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobComplete - - :rtype: boolean or ``NoneType`` - :returns: True if the query completed on the server (None - until set by the server). - """ - return self._properties.get('jobComplete') - - @property - def errors(self): - """Errors generated by the query. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#errors - - :rtype: list of mapping, or ``NoneType`` - :returns: Mappings describing errors generated on the server (None - until set by the server). - """ - return self._properties.get('errors') - - @property - def name(self): - """Job name, generated by the back-end. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobReference - - :rtype: list of mapping, or ``NoneType`` - :returns: Mappings describing errors generated on the server (None - until set by the server). - """ - return self._properties.get('jobReference', {}).get('jobId') - - @property - def job(self): - """Job instance used to run the query. - - :rtype: :class:`gcloud.bigquery.job.QueryJob`, or ``NoneType`` - :returns: Job instance used to run the query (None until - ``jobReference`` property is set by the server). - """ - if self._job is None: - job_ref = self._properties.get('jobReference') - if job_ref is not None: - self._job = QueryJob(job_ref['jobId'], self.query, - self._client) - return self._job - - @property - def page_token(self): - """Token for fetching next bach of results. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#pageToken - - :rtype: string, or ``NoneType`` - :returns: Token generated on the server (None until set by the server). - """ - return self._properties.get('pageToken') - - @property - def total_rows(self): - """Total number of rows returned by the query - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalRows - - :rtype: integer, or ``NoneType`` - :returns: Count generated on the server (None until set by the server). - """ - return self._properties.get('totalRows') - - @property - def total_bytes_processed(self): - """Total number of bytes processed by the query - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalBytesProcessed - - :rtype: integer, or ``NoneType`` - :returns: Count generated on the server (None until set by the server). - """ - return self._properties.get('totalBytesProcessed') - - @property - def rows(self): - """Query results. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#rows - - :rtype: list of tuples of row values, or ``NoneType`` - :returns: fields describing the schema (None until set by the server). - """ - return _rows_from_json(self._properties.get('rows', ()), self.schema) - - @property - def schema(self): - """Schema for query results. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#schema - - :rtype: list of :class:`SchemaField`, or ``NoneType`` - :returns: fields describing the schema (None until set by the server). - """ - return _parse_schema_resource(self._properties.get('schema', {})) - - default_dataset = _TypedProperty('default_dataset', Dataset) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#defaultDataset - """ - - max_results = _TypedProperty('max_results', six.integer_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#maxResults - """ - - preserve_nulls = _TypedProperty('preserve_nulls', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#preserveNulls - """ - - timeout_ms = _TypedProperty('timeout_ms', six.integer_types) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#timeoutMs - """ - - use_query_cache = _TypedProperty('use_query_cache', bool) - """See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#useQueryCache - """ - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - self._properties.clear() - self._properties.update(api_response) - - def _build_resource(self): - """Generate a resource for :meth:`begin`.""" - resource = {'query': self.query} - - if self.default_dataset is not None: - resource['defaultDataset'] = { - 'projectId': self.project, - 'datasetId': self.default_dataset.name, - } - - if self.max_results is not None: - resource['maxResults'] = self.max_results - - if self.preserve_nulls is not None: - resource['preserveNulls'] = self.preserve_nulls - - if self.timeout_ms is not None: - resource['timeoutMs'] = self.timeout_ms - - if self.use_query_cache is not None: - resource['useQueryCache'] = self.use_query_cache - - return resource - - def run(self, client=None): - """API call: run the query via a POST request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/query - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - path = '/projects/%s/queries' % (self.project,) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def fetch_data(self, max_results=None, page_token=None, start_index=None, - timeout_ms=None, client=None): - """API call: fetch a page of query result data via a GET request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults - - :type max_results: integer or ``NoneType`` - :param max_results: maximum number of rows to return. - - :type page_token: string or ``NoneType`` - :param page_token: token representing a cursor into the table's rows. - - :type start_index: integer or ``NoneType`` - :param start_index: zero-based index of starting row - - :type timeout_ms: integer or ``NoneType`` - :param timeout_ms: timeout, in milliseconds, to wait for query to - complete - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: tuple - :returns: ``(row_data, total_rows, page_token)``, where ``row_data`` - is a list of tuples, one per result row, containing only - the values; ``total_rows`` is a count of the total number - of rows in the table; and ``page_token`` is an opaque - string which can be used to fetch the next batch of rows - (``None`` if no further batches can be fetched). - :raises: ValueError if the query has not yet been executed. - """ - if self.name is None: - raise ValueError("Query not yet executed: call 'run()'") - - client = self._require_client(client) - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - if start_index is not None: - params['startIndex'] = start_index - - if timeout_ms is not None: - params['timeoutMs'] = timeout_ms - - path = '/projects/%s/queries/%s' % (self.project, self.name) - response = client.connection.api_request(method='GET', - path=path, - query_params=params) - self._set_properties(response) - - total_rows = response.get('totalRows') - page_token = response.get('pageToken') - rows_data = _rows_from_json(response.get('rows', ()), self.schema) - - return rows_data, total_rows, page_token diff --git a/gcloud/bigquery/table.py b/gcloud/bigquery/table.py deleted file mode 100644 index 56e4f7eebe7b..000000000000 --- a/gcloud/bigquery/table.py +++ /dev/null @@ -1,1001 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Datasets.""" - -import datetime -import json -import os - -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud._helpers import _microseconds_from_datetime -from gcloud._helpers import _millis_from_datetime -from gcloud.exceptions import NotFound -from gcloud.streaming.http_wrapper import Request -from gcloud.streaming.http_wrapper import make_api_request -from gcloud.streaming.transfer import RESUMABLE_UPLOAD -from gcloud.streaming.transfer import Upload -from gcloud.bigquery._helpers import _rows_from_json - - -_MARKER = object() - - -class SchemaField(object): - """Describe a single field within a table schema. - - :type name: string - :param name: the name of the field - - :type field_type: string - :param field_type: the type of the field (one of 'STRING', 'INTEGER', - 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD') - - :type mode: string - :param mode: the type of the field (one of 'NULLABLE', 'REQUIRED', - or 'REPEATED') - - :type description: string - :param description: optional description for the field - - :type fields: list of :class:`SchemaField`, or None - :param fields: subfields (requires ``field_type`` of 'RECORD'). - """ - def __init__(self, name, field_type, mode='NULLABLE', description=None, - fields=None): - self.name = name - self.field_type = field_type - self.mode = mode - self.description = description - self.fields = fields - - -class Table(object): - """Tables represent a set of rows whose values correspond to a schema. - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables - - :type name: string - :param name: the name of the table - - :type dataset: :class:`gcloud.bigquery.dataset.Dataset` - :param dataset: The dataset which contains the table. - - :type schema: list of :class:`SchemaField` - :param schema: The table's schema - """ - - _schema = None - - def __init__(self, name, dataset, schema=()): - self.name = name - self._dataset = dataset - self._properties = {} - # Let the @property do validation. - self.schema = schema - - @property - def project(self): - """Project bound to the table. - - :rtype: string - :returns: the project (derived from the dataset). - """ - return self._dataset.project - - @property - def dataset_name(self): - """Name of dataset containing the table. - - :rtype: string - :returns: the ID (derived from the dataset). - """ - return self._dataset.name - - @property - def path(self): - """URL path for the table's APIs. - - :rtype: string - :returns: the path based on project and dataste name. - """ - return '%s/tables/%s' % (self._dataset.path, self.name) - - @property - def schema(self): - """Table's schema. - - :rtype: list of :class:`SchemaField` - :returns: fields describing the schema - """ - return list(self._schema) - - @schema.setter - def schema(self, value): - """Update table's schema - - :type value: list of :class:`SchemaField` - :param value: fields describing the schema - - :raises: TypeError if 'value' is not a sequence, or ValueError if - any item in the sequence is not a SchemaField - """ - if not all(isinstance(field, SchemaField) for field in value): - raise ValueError('Schema items must be fields') - self._schema = tuple(value) - - @property - def created(self): - """Datetime at which the table was created. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the creation time (None until set from the server). - """ - creation_time = self._properties.get('creationTime') - if creation_time is not None: - # creation_time will be in milliseconds. - return _datetime_from_microseconds(1000.0 * creation_time) - - @property - def etag(self): - """ETag for the table resource. - - :rtype: string, or ``NoneType`` - :returns: the ETag (None until set from the server). - """ - return self._properties.get('etag') - - @property - def modified(self): - """Datetime at which the table was last modified. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the modification time (None until set from the server). - """ - modified_time = self._properties.get('lastModifiedTime') - if modified_time is not None: - # modified_time will be in milliseconds. - return _datetime_from_microseconds(1000.0 * modified_time) - - @property - def num_bytes(self): - """The size of the table in bytes. - - :rtype: integer, or ``NoneType`` - :returns: the byte count (None until set from the server). - """ - num_bytes_as_str = self._properties.get('numBytes') - if num_bytes_as_str is not None: - return int(num_bytes_as_str) - - @property - def num_rows(self): - """The number of rows in the table. - - :rtype: integer, or ``NoneType`` - :returns: the row count (None until set from the server). - """ - num_rows_as_str = self._properties.get('numRows') - if num_rows_as_str is not None: - return int(num_rows_as_str) - - @property - def self_link(self): - """URL for the table resource. - - :rtype: string, or ``NoneType`` - :returns: the URL (None until set from the server). - """ - return self._properties.get('selfLink') - - @property - def table_id(self): - """ID for the table resource. - - :rtype: string, or ``NoneType`` - :returns: the ID (None until set from the server). - """ - return self._properties.get('id') - - @property - def table_type(self): - """The type of the table. - - Possible values are "TABLE" or "VIEW". - - :rtype: string, or ``NoneType`` - :returns: the URL (None until set from the server). - """ - return self._properties.get('type') - - @property - def description(self): - """Description of the table. - - :rtype: string, or ``NoneType`` - :returns: The description as set by the user, or None (the default). - """ - return self._properties.get('description') - - @description.setter - def description(self, value): - """Update description of the table. - - :type value: string, or ``NoneType`` - :param value: new description - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['description'] = value - - @property - def expires(self): - """Datetime at which the table will be removed. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the expiration time, or None - """ - expiration_time = self._properties.get('expirationTime') - if expiration_time is not None: - # expiration_time will be in milliseconds. - return _datetime_from_microseconds(1000.0 * expiration_time) - - @expires.setter - def expires(self, value): - """Update datetime at which the table will be removed. - - :type value: ``datetime.datetime``, or ``NoneType`` - :param value: the new expiration time, or None - """ - if not isinstance(value, datetime.datetime) and value is not None: - raise ValueError("Pass a datetime, or None") - self._properties['expirationTime'] = _millis_from_datetime(value) - - @property - def friendly_name(self): - """Title of the table. - - :rtype: string, or ``NoneType`` - :returns: The name as set by the user, or None (the default). - """ - return self._properties.get('friendlyName') - - @friendly_name.setter - def friendly_name(self, value): - """Update title of the table. - - :type value: string, or ``NoneType`` - :param value: new title - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['friendlyName'] = value - - @property - def location(self): - """Location in which the table is hosted. - - :rtype: string, or ``NoneType`` - :returns: The location as set by the user, or None (the default). - """ - return self._properties.get('location') - - @location.setter - def location(self, value): - """Update location in which the table is hosted. - - :type value: string, or ``NoneType`` - :param value: new location - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['location'] = value - - @property - def view_query(self): - """SQL query defining the table as a view. - - :rtype: string, or ``NoneType`` - :returns: The query as set by the user, or None (the default). - """ - view = self._properties.get('view') - if view is not None: - return view.get('query') - - @view_query.setter - def view_query(self, value): - """Update SQL query defining the table as a view. - - :type value: string - :param value: new query - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types): - raise ValueError("Pass a string") - self._properties['view'] = {'query': value} - - @view_query.deleter - def view_query(self): - """Delete SQL query defining the table as a view.""" - self._properties.pop('view', None) - - @classmethod - def from_api_repr(cls, resource, dataset): - """Factory: construct a table given its API representation - - :type resource: dict - :param resource: table resource representation returned from the API - - :type dataset: :class:`gcloud.bigquery.dataset.Dataset` - :param dataset: The dataset containing the table. - - :rtype: :class:`gcloud.bigquery.table.Table` - :returns: Table parsed from ``resource``. - """ - if ('tableReference' not in resource or - 'tableId' not in resource['tableReference']): - raise KeyError('Resource lacks required identity information:' - '["tableReference"]["tableId"]') - table_name = resource['tableReference']['tableId'] - table = cls(table_name, dataset=dataset) - table._set_properties(resource) - return table - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: :class:`gcloud.bigquery.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._dataset._client - return client - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - self._properties.clear() - cleaned = api_response.copy() - schema = cleaned.pop('schema', {'fields': ()}) - self.schema = _parse_schema_resource(schema) - if 'creationTime' in cleaned: - cleaned['creationTime'] = float(cleaned['creationTime']) - if 'lastModifiedTime' in cleaned: - cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime']) - if 'expirationTime' in cleaned: - cleaned['expirationTime'] = float(cleaned['expirationTime']) - self._properties.update(cleaned) - - def _build_resource(self): - """Generate a resource for ``create`` or ``update``.""" - resource = { - 'tableReference': { - 'projectId': self._dataset.project, - 'datasetId': self._dataset.name, - 'tableId': self.name}, - } - if self.description is not None: - resource['description'] = self.description - - if self.expires is not None: - value = _millis_from_datetime(self.expires) - resource['expirationTime'] = value - - if self.friendly_name is not None: - resource['friendlyName'] = self.friendly_name - - if self.location is not None: - resource['location'] = self.location - - if self.view_query is not None: - view = resource['view'] = {} - view['query'] = self.view_query - elif self._schema: - resource['schema'] = { - 'fields': _build_schema_resource(self._schema) - } - else: - raise ValueError("Set either 'view_query' or 'schema'.") - - return resource - - def create(self, client=None): - """API call: create the dataset via a PUT request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables/insert - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - path = '/projects/%s/datasets/%s/tables' % ( - self._dataset.project, self._dataset.name) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test for the existence of the table via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/tables/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path, - query_params={'fields': 'id'}) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: refresh table properties via a GET request - - See - https://cloud.google.com/bigquery/docs/reference/v2/tables/get - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) - - def patch(self, - client=None, - friendly_name=_MARKER, - description=_MARKER, - location=_MARKER, - expires=_MARKER, - view_query=_MARKER, - schema=_MARKER): - """API call: update individual table properties via a PATCH request - - See - https://cloud.google.com/bigquery/docs/reference/v2/tables/patch - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :type friendly_name: string or ``NoneType`` - :param friendly_name: point in time at which the table expires. - - :type description: string or ``NoneType`` - :param description: point in time at which the table expires. - - :type location: string or ``NoneType`` - :param location: point in time at which the table expires. - - :type expires: :class:`datetime.datetime` or ``NoneType`` - :param expires: point in time at which the table expires. - - :type view_query: string - :param view_query: SQL query defining the table as a view - - :type schema: list of :class:`SchemaField` - :param schema: fields describing the schema - - :raises: ValueError for invalid value types. - """ - client = self._require_client(client) - - partial = {} - - if expires is not _MARKER: - if (not isinstance(expires, datetime.datetime) and - expires is not None): - raise ValueError("Pass a datetime, or None") - partial['expirationTime'] = _millis_from_datetime(expires) - - if description is not _MARKER: - partial['description'] = description - - if friendly_name is not _MARKER: - partial['friendlyName'] = friendly_name - - if location is not _MARKER: - partial['location'] = location - - if view_query is not _MARKER: - if view_query is None: - partial['view'] = None - else: - partial['view'] = {'query': view_query} - - if schema is not _MARKER: - if schema is None: - partial['schema'] = None - else: - partial['schema'] = { - 'fields': _build_schema_resource(schema)} - - api_response = client.connection.api_request( - method='PATCH', path=self.path, data=partial) - self._set_properties(api_response) - - def update(self, client=None): - """API call: update table properties via a PUT request - - See - https://cloud.google.com/bigquery/docs/reference/v2/tables/update - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - api_response = client.connection.api_request( - method='PUT', path=self.path, data=self._build_resource()) - self._set_properties(api_response) - - def delete(self, client=None): - """API call: delete the table via a DELETE request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tables/delete - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) - - def fetch_data(self, max_results=None, page_token=None, client=None): - """API call: fetch the table data via a GET request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list - - .. note:: - - This method assumes that its instance's ``schema`` attribute is - up-to-date with the schema as defined on the back-end: if the - two schemas are not identical, the values returned may be - incomplete. To ensure that the local copy of the schema is - up-to-date, call the table's ``reload`` method. - - :type max_results: integer or ``NoneType`` - :param max_results: maximum number of rows to return. - - :type page_token: string or ``NoneType`` - :param page_token: token representing a cursor into the table's rows. - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: tuple - :returns: ``(row_data, total_rows, page_token)``, where ``row_data`` - is a list of tuples, one per result row, containing only - the values; ``total_rows`` is a count of the total number - of rows in the table; and ``page_token`` is an opaque - string which can be used to fetch the next batch of rows - (``None`` if no further batches can be fetched). - """ - client = self._require_client(client) - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - response = client.connection.api_request(method='GET', - path='%s/data' % self.path, - query_params=params) - total_rows = response.get('totalRows') - page_token = response.get('pageToken') - rows_data = _rows_from_json(response.get('rows', ()), self._schema) - - return rows_data, total_rows, page_token - - def insert_data(self, - rows, - row_ids=None, - skip_invalid_rows=None, - ignore_unknown_values=None, - template_suffix=None, - client=None): - """API call: insert table data via a POST request - - See: - https://cloud.google.com/bigquery/docs/reference/v2/tabledata/insertAll - - :type rows: list of tuples - :param rows: Row data to be inserted. Each tuple should contain data - for each schema field on the current table and in the - same order as the schema fields. - - :type row_ids: list of string - :param row_ids: Unique ids, one per row being inserted. If not - passed, no de-duplication occurs. - - :type skip_invalid_rows: boolean or ``NoneType`` - :param skip_invalid_rows: skip rows w/ invalid data? - - :type ignore_unknown_values: boolean or ``NoneType`` - :param ignore_unknown_values: ignore columns beyond schema? - - :type template_suffix: string or ``NoneType`` - :param template_suffix: treat ``name`` as a template table and provide - a suffix. BigQuery will create the table - `` + `` based on the - schema of the template table. See: - https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables - - :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. - - :rtype: list of mappings - :returns: One mapping per row with insert errors: the "index" key - identifies the row, and the "errors" key contains a list - of the mappings describing one or more problems with the - row. - """ - client = self._require_client(client) - rows_info = [] - data = {'rows': rows_info} - - for index, row in enumerate(rows): - row_info = {} - - for field, value in zip(self._schema, row): - if field.field_type == 'TIMESTAMP' and value is not None: - # BigQuery stores TIMESTAMP data internally as a - # UNIX timestamp with microsecond precision. - # Specifies the number of seconds since the epoch. - value = _microseconds_from_datetime(value) * 1e-6 - row_info[field.name] = value - - info = {'json': row_info} - if row_ids is not None: - info['insertId'] = row_ids[index] - - rows_info.append(info) - - if skip_invalid_rows is not None: - data['skipInvalidRows'] = skip_invalid_rows - - if ignore_unknown_values is not None: - data['ignoreUnknownValues'] = ignore_unknown_values - - if template_suffix is not None: - data['templateSuffix'] = template_suffix - - response = client.connection.api_request( - method='POST', - path='%s/insertAll' % self.path, - data=data) - errors = [] - - for error in response.get('insertErrors', ()): - errors.append({'index': int(error['index']), - 'errors': error['errors']}) - - return errors - - def upload_from_file(self, # pylint: disable=R0913,R0914 - file_obj, - source_format, - rewind=False, - size=None, - num_retries=6, - allow_jagged_rows=None, - allow_quoted_newlines=None, - create_disposition=None, - encoding=None, - field_delimiter=None, - ignore_unknown_values=None, - max_bad_records=None, - quote_character=None, - skip_leading_rows=None, - write_disposition=None, - client=None): - """Upload the contents of this table from a file-like object. - - The content type of the upload will either be - - The value passed in to the function (if any) - - ``text/csv``. - - :type file_obj: file - :param file_obj: A file handle open for reading. - - :type source_format: string - :param source_format: one of 'CSV' or 'NEWLINE_DELIMITED_JSON'. - job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type rewind: boolean - :param rewind: If True, seek to the beginning of the file handle before - writing the file to Cloud Storage. - - :type size: int - :param size: The number of bytes to read from the file handle. - If not provided, we'll try to guess the size using - :func:`os.fstat`. (If the file handle is not from the - filesystem this won't be possible.) - - :type num_retries: integer - :param num_retries: Number of upload retries. Defaults to 6. - - :type allow_jagged_rows: boolean - :param allow_jagged_rows: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type allow_quoted_newlines: boolean - :param allow_quoted_newlines: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type create_disposition: string - :param create_disposition: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type encoding: string - :param encoding: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type field_delimiter: string - :param field_delimiter: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type ignore_unknown_values: boolean - :param ignore_unknown_values: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type max_bad_records: integer - :param max_bad_records: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type quote_character: string - :param quote_character: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type skip_leading_rows: integer - :param skip_leading_rows: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type write_disposition: string - :param write_disposition: job configuration option; see - :meth:`gcloud.bigquery.job.LoadJob` - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current dataset. - - :rtype: :class:`gcloud.bigquery.jobs.LoadTableFromStorageJob` - :returns: the job instance used to load the data (e.g., for - querying status) - :raises: :class:`ValueError` if size is not passed in and can not be - determined - """ - client = self._require_client(client) - connection = client.connection - content_type = 'application/octet-stream' - - # Rewind the file if desired. - if rewind: - file_obj.seek(0, os.SEEK_SET) - - # Get the basic stats about the file. - total_bytes = size - if total_bytes is None: - if hasattr(file_obj, 'fileno'): - total_bytes = os.fstat(file_obj.fileno()).st_size - else: - raise ValueError('total bytes could not be determined. Please ' - 'pass an explicit size.') - headers = { - 'Accept': 'application/json', - 'Accept-Encoding': 'gzip, deflate', - 'User-Agent': connection.USER_AGENT, - 'content-type': 'application/json', - } - - metadata = { - 'configuration': { - 'load': { - 'sourceFormat': source_format, - 'schema': { - 'fields': _build_schema_resource(self._schema), - }, - 'destinationTable': { - 'projectId': self._dataset.project, - 'datasetId': self._dataset.name, - 'tableId': self.name, - } - } - } - } - - _configure_job_metadata(metadata, allow_jagged_rows, - allow_quoted_newlines, create_disposition, - encoding, field_delimiter, - ignore_unknown_values, max_bad_records, - quote_character, skip_leading_rows, - write_disposition) - - upload = Upload(file_obj, content_type, total_bytes, - auto_transfer=False) - - url_builder = _UrlBuilder() - upload_config = _UploadConfig() - - # Base URL may change once we know simple vs. resumable. - base_url = connection.API_BASE_URL + '/upload' - path = '/projects/%s/jobs' % (self._dataset.project,) - upload_url = connection.build_api_url(api_base_url=base_url, path=path) - - # Use apitools 'Upload' facility. - request = Request(upload_url, 'POST', headers, - body=json.dumps(metadata)) - - upload.configure_request(upload_config, request, url_builder) - query_params = url_builder.query_params - base_url = connection.API_BASE_URL + '/upload' - request.url = connection.build_api_url(api_base_url=base_url, - path=path, - query_params=query_params) - upload.initialize_upload(request, connection.http) - - if upload.strategy == RESUMABLE_UPLOAD: - http_response = upload.stream_file(use_chunks=True) - else: - http_response = make_api_request(connection.http, request, - retries=num_retries) - response_content = http_response.content - if not isinstance(response_content, - six.string_types): # pragma: NO COVER Python3 - response_content = response_content.decode('utf-8') - return client.job_from_resource(json.loads(response_content)) - - -def _configure_job_metadata(metadata, # pylint: disable=R0913 - allow_jagged_rows, - allow_quoted_newlines, - create_disposition, - encoding, - field_delimiter, - ignore_unknown_values, - max_bad_records, - quote_character, - skip_leading_rows, - write_disposition): - """Helper for :meth:`Table.upload_from_file`.""" - load_config = metadata['configuration']['load'] - - if allow_jagged_rows is not None: - load_config['allowJaggedRows'] = allow_jagged_rows - - if allow_quoted_newlines is not None: - load_config['allowQuotedNewlines'] = allow_quoted_newlines - - if create_disposition is not None: - load_config['createDisposition'] = create_disposition - - if encoding is not None: - load_config['encoding'] = encoding - - if field_delimiter is not None: - load_config['fieldDelimiter'] = field_delimiter - - if ignore_unknown_values is not None: - load_config['ignoreUnknownValues'] = ignore_unknown_values - - if max_bad_records is not None: - load_config['maxBadRecords'] = max_bad_records - - if quote_character is not None: - load_config['quote'] = quote_character - - if skip_leading_rows is not None: - load_config['skipLeadingRows'] = skip_leading_rows - - if write_disposition is not None: - load_config['writeDisposition'] = write_disposition - - -def _parse_schema_resource(info): - """Parse a resource fragment into a schema field. - - :type info: mapping - :param info: should contain a "fields" key to be parsed - - :rtype: list of :class:`SchemaField`, or ``NoneType`` - :returns: a list of parsed fields, or ``None`` if no "fields" key is - present in ``info``. - """ - if 'fields' not in info: - return None - - schema = [] - for r_field in info['fields']: - name = r_field['name'] - field_type = r_field['type'] - mode = r_field.get('mode', 'NULLABLE') - description = r_field.get('description') - sub_fields = _parse_schema_resource(r_field) - schema.append( - SchemaField(name, field_type, mode, description, sub_fields)) - return schema - - -def _build_schema_resource(fields): - """Generate a resource fragment for a schema. - - :type fields: sequence of :class:`SchemaField` - :param fields: schema to be dumped - - :rtype: mapping - :returns; a mapping describing the schema of the supplied fields. - """ - infos = [] - for field in fields: - info = {'name': field.name, - 'type': field.field_type, - 'mode': field.mode} - if field.description is not None: - info['description'] = field.description - if field.fields is not None: - info['fields'] = _build_schema_resource(field.fields) - infos.append(info) - return infos - - -class _UploadConfig(object): - """Faux message FBO apitools' 'configure_request'.""" - accept = ['*/*'] - max_size = None - resumable_multipart = True - resumable_path = u'/upload/bigquery/v2/projects/{project}/jobs' - simple_multipart = True - simple_path = u'/upload/bigquery/v2/projects/{project}/jobs' - - -class _UrlBuilder(object): - """Faux builder FBO apitools' 'configure_request'""" - def __init__(self): - self.query_params = {} - self._relative_path = '' diff --git a/gcloud/bigquery/test__helpers.py b/gcloud/bigquery/test__helpers.py deleted file mode 100644 index 45f036e04263..000000000000 --- a/gcloud/bigquery/test__helpers.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_ConfigurationProperty(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery._helpers import _ConfigurationProperty - return _ConfigurationProperty - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_it(self): - - class Configuration(object): - _attr = None - - class Wrapper(object): - attr = self._makeOne('attr') - - def __init__(self): - self._configuration = Configuration() - - self.assertEqual(Wrapper.attr.name, 'attr') - - wrapper = Wrapper() - self.assertEqual(wrapper.attr, None) - - value = object() - wrapper.attr = value - self.assertTrue(wrapper.attr is value) - self.assertTrue(wrapper._configuration._attr is value) - - del wrapper.attr - self.assertEqual(wrapper.attr, None) - self.assertEqual(wrapper._configuration._attr, None) - - -class Test_TypedProperty(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery._helpers import _TypedProperty - return _TypedProperty - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_it(self): - - class Configuration(object): - _attr = None - - class Wrapper(object): - attr = self._makeOne('attr', int) - - def __init__(self): - self._configuration = Configuration() - - wrapper = Wrapper() - with self.assertRaises(ValueError): - wrapper.attr = 'BOGUS' - - wrapper.attr = 42 - self.assertEqual(wrapper.attr, 42) - self.assertEqual(wrapper._configuration._attr, 42) - - del wrapper.attr - self.assertEqual(wrapper.attr, None) - self.assertEqual(wrapper._configuration._attr, None) - - -class Test_EnumProperty(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery._helpers import _EnumProperty - return _EnumProperty - - def test_it(self): - - class Sub(self._getTargetClass()): - ALLOWED = ('FOO', 'BAR', 'BAZ') - - class Configuration(object): - _attr = None - - class Wrapper(object): - attr = Sub('attr') - - def __init__(self): - self._configuration = Configuration() - - wrapper = Wrapper() - with self.assertRaises(ValueError): - wrapper.attr = 'BOGUS' - - wrapper.attr = 'FOO' - self.assertEqual(wrapper.attr, 'FOO') - self.assertEqual(wrapper._configuration._attr, 'FOO') - - del wrapper.attr - self.assertEqual(wrapper.attr, None) - self.assertEqual(wrapper._configuration._attr, None) diff --git a/gcloud/bigquery/test_client.py b/gcloud/bigquery/test_client.py deleted file mode 100644 index aa72834f560f..000000000000 --- a/gcloud/bigquery/test_client.py +++ /dev/null @@ -1,431 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestClient(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - from gcloud.bigquery.connection import Connection - PROJECT = 'PROJECT' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - self.assertTrue(isinstance(client.connection, Connection)) - self.assertTrue(client.connection.credentials is creds) - self.assertTrue(client.connection.http is http) - - def test_list_datasets_defaults(self): - from gcloud.bigquery.dataset import Dataset - PROJECT = 'PROJECT' - DATASET_1 = 'dataset_one' - DATASET_2 = 'dataset_two' - PATH = 'projects/%s/datasets' % PROJECT - TOKEN = 'TOKEN' - DATA = { - 'nextPageToken': TOKEN, - 'datasets': [ - {'kind': 'bigquery#dataset', - 'id': '%s:%s' % (PROJECT, DATASET_1), - 'datasetReference': {'datasetId': DATASET_1, - 'projectId': PROJECT}, - 'friendlyName': None}, - {'kind': 'bigquery#dataset', - 'id': '%s:%s' % (PROJECT, DATASET_2), - 'datasetReference': {'datasetId': DATASET_2, - 'projectId': PROJECT}, - 'friendlyName': 'Two'}, - ] - } - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - datasets, token = client.list_datasets() - - self.assertEqual(len(datasets), len(DATA['datasets'])) - for found, expected in zip(datasets, DATA['datasets']): - self.assertTrue(isinstance(found, Dataset)) - self.assertEqual(found.dataset_id, expected['id']) - self.assertEqual(found.friendly_name, expected['friendlyName']) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_datasets_explicit_response_missing_datasets_key(self): - PROJECT = 'PROJECT' - PATH = 'projects/%s/datasets' % PROJECT - TOKEN = 'TOKEN' - DATA = {} - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - datasets, token = client.list_datasets( - include_all=True, max_results=3, page_token=TOKEN) - - self.assertEqual(len(datasets), 0) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'all': True, 'maxResults': 3, 'pageToken': TOKEN}) - - def test_dataset(self): - from gcloud.bigquery.dataset import Dataset - PROJECT = 'PROJECT' - DATASET = 'dataset_name' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - dataset = client.dataset(DATASET) - self.assertTrue(isinstance(dataset, Dataset)) - self.assertEqual(dataset.name, DATASET) - self.assertTrue(dataset._client is client) - - def test_job_from_resource_unknown_type(self): - PROJECT = 'PROJECT' - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - with self.assertRaises(ValueError): - client.job_from_resource({'configuration': {'nonesuch': {}}}) - - def test_list_jobs_defaults(self): - from gcloud.bigquery.job import LoadTableFromStorageJob - from gcloud.bigquery.job import CopyJob - from gcloud.bigquery.job import ExtractTableToStorageJob - from gcloud.bigquery.job import QueryJob - PROJECT = 'PROJECT' - DATASET = 'test_dataset' - SOURCE_TABLE = 'source_table' - DESTINATION_TABLE = 'destination_table' - QUERY_DESTINATION_TABLE = 'query_destination_table' - SOURCE_URI = 'gs://test_bucket/src_object*' - DESTINATION_URI = 'gs://test_bucket/dst_object*' - JOB_TYPES = { - 'load_job': LoadTableFromStorageJob, - 'copy_job': CopyJob, - 'extract_job': ExtractTableToStorageJob, - 'query_job': QueryJob, - } - PATH = 'projects/%s/jobs' % PROJECT - TOKEN = 'TOKEN' - QUERY = 'SELECT * from test_dataset:test_table' - ASYNC_QUERY_DATA = { - 'id': '%s:%s' % (PROJECT, 'query_job'), - 'jobReference': { - 'projectId': PROJECT, - 'jobId': 'query_job', - }, - 'state': 'DONE', - 'configuration': { - 'query': { - 'query': QUERY, - 'destinationTable': { - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': QUERY_DESTINATION_TABLE, - }, - 'createDisposition': 'CREATE_IF_NEEDED', - 'writeDisposition': 'WRITE_TRUNCATE', - } - }, - } - EXTRACT_DATA = { - 'id': '%s:%s' % (PROJECT, 'extract_job'), - 'jobReference': { - 'projectId': PROJECT, - 'jobId': 'extract_job', - }, - 'state': 'DONE', - 'configuration': { - 'extract': { - 'sourceTable': { - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': SOURCE_TABLE, - }, - 'destinationUris': [DESTINATION_URI], - } - }, - } - COPY_DATA = { - 'id': '%s:%s' % (PROJECT, 'copy_job'), - 'jobReference': { - 'projectId': PROJECT, - 'jobId': 'copy_job', - }, - 'state': 'DONE', - 'configuration': { - 'copy': { - 'sourceTables': [{ - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': SOURCE_TABLE, - }], - 'destinationTable': { - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': DESTINATION_TABLE, - }, - } - }, - } - LOAD_DATA = { - 'id': '%s:%s' % (PROJECT, 'load_job'), - 'jobReference': { - 'projectId': PROJECT, - 'jobId': 'load_job', - }, - 'state': 'DONE', - 'configuration': { - 'load': { - 'destinationTable': { - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': SOURCE_TABLE, - }, - 'sourceUris': [SOURCE_URI], - } - }, - } - DATA = { - 'nextPageToken': TOKEN, - 'jobs': [ - ASYNC_QUERY_DATA, - EXTRACT_DATA, - COPY_DATA, - LOAD_DATA, - ] - } - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - jobs, token = client.list_jobs() - - self.assertEqual(len(jobs), len(DATA['jobs'])) - for found, expected in zip(jobs, DATA['jobs']): - name = expected['jobReference']['jobId'] - self.assertTrue(isinstance(found, JOB_TYPES[name])) - self.assertEqual(found.name, name) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'projection': 'full'}) - - def test_list_jobs_load_job_wo_sourceUris(self): - from gcloud.bigquery.job import LoadTableFromStorageJob - PROJECT = 'PROJECT' - DATASET = 'test_dataset' - SOURCE_TABLE = 'source_table' - JOB_TYPES = { - 'load_job': LoadTableFromStorageJob, - } - PATH = 'projects/%s/jobs' % PROJECT - TOKEN = 'TOKEN' - LOAD_DATA = { - 'id': '%s:%s' % (PROJECT, 'load_job'), - 'jobReference': { - 'projectId': PROJECT, - 'jobId': 'load_job', - }, - 'state': 'DONE', - 'configuration': { - 'load': { - 'destinationTable': { - 'projectId': PROJECT, - 'datasetId': DATASET, - 'tableId': SOURCE_TABLE, - }, - } - }, - } - DATA = { - 'nextPageToken': TOKEN, - 'jobs': [ - LOAD_DATA, - ] - } - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - jobs, token = client.list_jobs() - - self.assertEqual(len(jobs), len(DATA['jobs'])) - for found, expected in zip(jobs, DATA['jobs']): - name = expected['jobReference']['jobId'] - self.assertTrue(isinstance(found, JOB_TYPES[name])) - self.assertEqual(found.name, name) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'projection': 'full'}) - - def test_list_jobs_explicit_empty(self): - PROJECT = 'PROJECT' - PATH = 'projects/%s/jobs' % PROJECT - DATA = {'jobs': []} - TOKEN = 'TOKEN' - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - jobs, token = client.list_jobs(max_results=1000, page_token=TOKEN, - all_users=True, state_filter='done') - - self.assertEqual(len(jobs), 0) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'projection': 'full', - 'maxResults': 1000, - 'pageToken': TOKEN, - 'allUsers': True, - 'stateFilter': 'done'}) - - def test_load_table_from_storage(self): - from gcloud.bigquery.job import LoadTableFromStorageJob - PROJECT = 'PROJECT' - JOB = 'job_name' - DATASET = 'dataset_name' - DESTINATION = 'destination_table' - SOURCE_URI = 'http://example.com/source.csv' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - dataset = client.dataset(DATASET) - destination = dataset.table(DESTINATION) - job = client.load_table_from_storage(JOB, destination, SOURCE_URI) - self.assertTrue(isinstance(job, LoadTableFromStorageJob)) - self.assertTrue(job._client is client) - self.assertEqual(job.name, JOB) - self.assertEqual(list(job.source_uris), [SOURCE_URI]) - self.assertTrue(job.destination is destination) - - def test_copy_table(self): - from gcloud.bigquery.job import CopyJob - PROJECT = 'PROJECT' - JOB = 'job_name' - DATASET = 'dataset_name' - SOURCE = 'source_table' - DESTINATION = 'destination_table' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - dataset = client.dataset(DATASET) - source = dataset.table(SOURCE) - destination = dataset.table(DESTINATION) - job = client.copy_table(JOB, destination, source) - self.assertTrue(isinstance(job, CopyJob)) - self.assertTrue(job._client is client) - self.assertEqual(job.name, JOB) - self.assertEqual(list(job.sources), [source]) - self.assertTrue(job.destination is destination) - - def test_extract_table_to_storage(self): - from gcloud.bigquery.job import ExtractTableToStorageJob - PROJECT = 'PROJECT' - JOB = 'job_name' - DATASET = 'dataset_name' - SOURCE = 'source_table' - DESTINATION = 'gs://bucket_name/object_name' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - dataset = client.dataset(DATASET) - source = dataset.table(SOURCE) - job = client.extract_table_to_storage(JOB, source, DESTINATION) - self.assertTrue(isinstance(job, ExtractTableToStorageJob)) - self.assertTrue(job._client is client) - self.assertEqual(job.name, JOB) - self.assertEqual(job.source, source) - self.assertEqual(list(job.destination_uris), [DESTINATION]) - - def test_run_async_query(self): - from gcloud.bigquery.job import QueryJob - PROJECT = 'PROJECT' - JOB = 'job_name' - QUERY = 'select count(*) from persons' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - job = client.run_async_query(JOB, QUERY) - self.assertTrue(isinstance(job, QueryJob)) - self.assertTrue(job._client is client) - self.assertEqual(job.name, JOB) - self.assertEqual(job.query, QUERY) - - def test_run_sync_query(self): - from gcloud.bigquery.query import QueryResults - PROJECT = 'PROJECT' - QUERY = 'select count(*) from persons' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - job = client.run_sync_query(QUERY) - self.assertTrue(isinstance(job, QueryResults)) - self.assertTrue(job._client is client) - self.assertEqual(job.name, None) - self.assertEqual(job.query, QUERY) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/bigquery/test_connection.py b/gcloud/bigquery/test_connection.py deleted file mode 100644 index 7a3aa254b801..000000000000 --- a/gcloud/bigquery/test_connection.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_build_api_url_no_extra_query_params(self): - conn = self._makeOne() - URI = '/'.join([ - conn.API_BASE_URL, - 'bigquery', - conn.API_VERSION, - 'foo', - ]) - self.assertEqual(conn.build_api_url('/foo'), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - conn = self._makeOne() - uri = conn.build_api_url('/foo', {'bar': 'baz'}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, - '/'.join(['', 'bigquery', conn.API_VERSION, 'foo'])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms['bar'], 'baz') diff --git a/gcloud/bigquery/test_dataset.py b/gcloud/bigquery/test_dataset.py deleted file mode 100644 index 6e950208bff7..000000000000 --- a/gcloud/bigquery/test_dataset.py +++ /dev/null @@ -1,765 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestAccessGrant(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery.dataset import AccessGrant - return AccessGrant - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - grant = self._makeOne('OWNER', 'userByEmail', 'phred@example.com') - self.assertEqual(grant.role, 'OWNER') - self.assertEqual(grant.entity_type, 'userByEmail') - self.assertEqual(grant.entity_id, 'phred@example.com') - - def test_ctor_bad_entity_type(self): - with self.assertRaises(ValueError): - self._makeOne(None, 'unknown', None) - - def test_ctor_view_with_role(self): - role = 'READER' - entity_type = 'view' - with self.assertRaises(ValueError): - self._makeOne(role, entity_type, None) - - def test_ctor_view_success(self): - role = None - entity_type = 'view' - entity_id = object() - grant = self._makeOne(role, entity_type, entity_id) - self.assertEqual(grant.role, role) - self.assertEqual(grant.entity_type, entity_type) - self.assertEqual(grant.entity_id, entity_id) - - def test_ctor_nonview_without_role(self): - role = None - entity_type = 'userByEmail' - with self.assertRaises(ValueError): - self._makeOne(role, entity_type, None) - - -class TestDataset(unittest2.TestCase): - PROJECT = 'project' - DS_NAME = 'dataset-name' - - def _getTargetClass(self): - from gcloud.bigquery.dataset import Dataset - return Dataset - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - import datetime - from gcloud._helpers import UTC - - self.WHEN_TS = 1437767599.006 - self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace( - tzinfo=UTC) - self.ETAG = 'ETAG' - self.DS_ID = '%s:%s' % (self.PROJECT, self.DS_NAME) - self.RESOURCE_URL = 'http://example.com/path/to/resource' - - def _makeResource(self): - self._setUpConstants() - USER_EMAIL = 'phred@example.com' - GROUP_EMAIL = 'group-name@lists.example.com' - return { - 'creationTime': self.WHEN_TS * 1000, - 'datasetReference': - {'projectId': self.PROJECT, 'datasetId': self.DS_NAME}, - 'etag': self.ETAG, - 'id': self.DS_ID, - 'lastModifiedTime': self.WHEN_TS * 1000, - 'location': 'US', - 'selfLink': self.RESOURCE_URL, - 'access': [ - {'role': 'OWNER', 'userByEmail': USER_EMAIL}, - {'role': 'OWNER', 'groupByEmail': GROUP_EMAIL}, - {'role': 'WRITER', 'specialGroup': 'projectWriters'}, - {'role': 'READER', 'specialGroup': 'projectReaders'}], - } - - def _verifyAccessGrants(self, access_grants, resource): - r_grants = [] - for r_grant in resource['access']: - role = r_grant.pop('role') - for entity_type, entity_id in sorted(r_grant.items()): - r_grants.append({'role': role, - 'entity_type': entity_type, - 'entity_id': entity_id}) - - self.assertEqual(len(access_grants), len(r_grants)) - for a_grant, r_grant in zip(access_grants, r_grants): - self.assertEqual(a_grant.role, r_grant['role']) - self.assertEqual(a_grant.entity_type, r_grant['entity_type']) - self.assertEqual(a_grant.entity_id, r_grant['entity_id']) - - def _verifyReadonlyResourceProperties(self, dataset, resource): - - self.assertEqual(dataset.dataset_id, self.DS_ID) - - if 'creationTime' in resource: - self.assertEqual(dataset.created, self.WHEN) - else: - self.assertEqual(dataset.created, None) - if 'etag' in resource: - self.assertEqual(dataset.etag, self.ETAG) - else: - self.assertEqual(dataset.etag, None) - if 'lastModifiedTime' in resource: - self.assertEqual(dataset.modified, self.WHEN) - else: - self.assertEqual(dataset.modified, None) - if 'selfLink' in resource: - self.assertEqual(dataset.self_link, self.RESOURCE_URL) - else: - self.assertEqual(dataset.self_link, None) - - def _verifyResourceProperties(self, dataset, resource): - - self._verifyReadonlyResourceProperties(dataset, resource) - - self.assertEqual(dataset.default_table_expiration_ms, - resource.get('defaultTableExpirationMs')) - self.assertEqual(dataset.description, resource.get('description')) - self.assertEqual(dataset.friendly_name, resource.get('friendlyName')) - self.assertEqual(dataset.location, resource.get('location')) - - if 'access' in resource: - self._verifyAccessGrants(dataset.access_grants, resource) - else: - self.assertEqual(dataset.access_grants, []) - - def test_ctor(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - self.assertEqual(dataset.name, self.DS_NAME) - self.assertTrue(dataset._client is client) - self.assertEqual(dataset.project, client.project) - self.assertEqual( - dataset.path, - '/projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME)) - self.assertEqual(dataset.access_grants, []) - - self.assertEqual(dataset.created, None) - self.assertEqual(dataset.dataset_id, None) - self.assertEqual(dataset.etag, None) - self.assertEqual(dataset.modified, None) - self.assertEqual(dataset.self_link, None) - - self.assertEqual(dataset.default_table_expiration_ms, None) - self.assertEqual(dataset.description, None) - self.assertEqual(dataset.friendly_name, None) - self.assertEqual(dataset.location, None) - - def test_access_roles_setter_non_list(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - with self.assertRaises(TypeError): - dataset.access_grants = object() - - def test_access_roles_setter_invalid_field(self): - from gcloud.bigquery.dataset import AccessGrant - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - phred = AccessGrant('OWNER', 'userByEmail', 'phred@example.com') - with self.assertRaises(ValueError): - dataset.access_grants = [phred, object()] - - def test_access_roles_setter(self): - from gcloud.bigquery.dataset import AccessGrant - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - phred = AccessGrant('OWNER', 'userByEmail', 'phred@example.com') - bharney = AccessGrant('OWNER', 'userByEmail', 'bharney@example.com') - dataset.access_grants = [phred, bharney] - self.assertEqual(dataset.access_grants, [phred, bharney]) - - def test_default_table_expiration_ms_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - with self.assertRaises(ValueError): - dataset.default_table_expiration_ms = 'bogus' - - def test_default_table_expiration_ms_setter(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - dataset.default_table_expiration_ms = 12345 - self.assertEqual(dataset.default_table_expiration_ms, 12345) - - def test_description_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - with self.assertRaises(ValueError): - dataset.description = 12345 - - def test_description_setter(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - dataset.description = 'DESCRIPTION' - self.assertEqual(dataset.description, 'DESCRIPTION') - - def test_friendly_name_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - with self.assertRaises(ValueError): - dataset.friendly_name = 12345 - - def test_friendly_name_setter(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - dataset.friendly_name = 'FRIENDLY' - self.assertEqual(dataset.friendly_name, 'FRIENDLY') - - def test_location_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - with self.assertRaises(ValueError): - dataset.location = 12345 - - def test_location_setter(self): - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client) - dataset.location = 'LOCATION' - self.assertEqual(dataset.location, 'LOCATION') - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': '%s:%s' % (self.PROJECT, self.DS_NAME), - 'datasetReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - } - } - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test__parse_access_grants_w_unknown_entity_type(self): - ACCESS = [ - {'role': 'READER', 'unknown': 'UNKNOWN'}, - ] - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client=client) - with self.assertRaises(ValueError): - dataset._parse_access_grants(ACCESS) - - def test__parse_access_grants_w_extra_keys(self): - USER_EMAIL = 'phred@example.com' - ACCESS = [ - { - 'role': 'READER', - 'specialGroup': 'projectReaders', - 'userByEmail': USER_EMAIL, - }, - ] - client = _Client(self.PROJECT) - dataset = self._makeOne(self.DS_NAME, client=client) - with self.assertRaises(ValueError): - dataset._parse_access_grants(ACCESS) - - def test_create_w_bound_client(self): - PATH = 'projects/%s/datasets' % self.PROJECT - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - dataset.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'datasetReference': - {'projectId': self.PROJECT, 'datasetId': self.DS_NAME}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_create_w_alternate_client(self): - from gcloud.bigquery.dataset import AccessGrant - PATH = 'projects/%s/datasets' % self.PROJECT - USER_EMAIL = 'phred@example.com' - GROUP_EMAIL = 'group-name@lists.example.com' - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - dataset.friendly_name = TITLE - dataset.description = DESCRIPTION - VIEW = { - 'projectId': 'my-proj', - 'datasetId': 'starry-skies', - 'tableId': 'northern-hemisphere', - } - dataset.access_grants = [ - AccessGrant('OWNER', 'userByEmail', USER_EMAIL), - AccessGrant('OWNER', 'groupByEmail', GROUP_EMAIL), - AccessGrant('READER', 'domain', 'foo.com'), - AccessGrant('READER', 'specialGroup', 'projectReaders'), - AccessGrant('WRITER', 'specialGroup', 'projectWriters'), - AccessGrant(None, 'view', VIEW), - ] - - dataset.create(client=CLIENT2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'datasetReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - }, - 'description': DESCRIPTION, - 'friendlyName': TITLE, - 'access': [ - {'role': 'OWNER', 'userByEmail': USER_EMAIL}, - {'role': 'OWNER', 'groupByEmail': GROUP_EMAIL}, - {'role': 'READER', 'domain': 'foo.com'}, - {'role': 'READER', 'specialGroup': 'projectReaders'}, - {'role': 'WRITER', 'specialGroup': 'projectWriters'}, - {'view': VIEW}, - ], - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_create_w_missing_output_properties(self): - # In the wild, the resource returned from 'dataset.create' sometimes - # lacks 'creationTime' / 'lastModifiedTime' - PATH = 'projects/%s/datasets' % (self.PROJECT,) - RESOURCE = self._makeResource() - del RESOURCE['creationTime'] - del RESOURCE['lastModifiedTime'] - self.WHEN = None - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - dataset.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'datasetReference': - {'projectId': self.PROJECT, 'datasetId': self.DS_NAME}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - self.assertFalse(dataset.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - - self.assertTrue(dataset.exists(client=CLIENT2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - dataset.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - - dataset.reload(client=CLIENT2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_patch_w_invalid_expiration(self): - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - with self.assertRaises(ValueError): - dataset.patch(default_table_expiration_ms='BOGUS') - - def test_patch_w_bound_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - dataset.patch(description=DESCRIPTION, friendly_name=TITLE) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PATCH') - SENT = { - 'description': DESCRIPTION, - 'friendlyName': TITLE, - } - self.assertEqual(req['data'], SENT) - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_patch_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - DEF_TABLE_EXP = 12345 - LOCATION = 'EU' - RESOURCE = self._makeResource() - RESOURCE['defaultTableExpirationMs'] = DEF_TABLE_EXP - RESOURCE['location'] = LOCATION - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - - dataset.patch(client=CLIENT2, - default_table_expiration_ms=DEF_TABLE_EXP, - location=LOCATION) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PATCH') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'defaultTableExpirationMs': DEF_TABLE_EXP, - 'location': LOCATION, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_update_w_bound_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - dataset.description = DESCRIPTION - dataset.friendly_name = TITLE - - dataset.update() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - SENT = { - 'datasetReference': - {'projectId': self.PROJECT, 'datasetId': self.DS_NAME}, - 'description': DESCRIPTION, - 'friendlyName': TITLE, - } - self.assertEqual(req['data'], SENT) - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_update_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - DEF_TABLE_EXP = 12345 - LOCATION = 'EU' - RESOURCE = self._makeResource() - RESOURCE['defaultTableExpirationMs'] = 12345 - RESOURCE['location'] = LOCATION - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - dataset.default_table_expiration_ms = DEF_TABLE_EXP - dataset.location = LOCATION - - dataset.update(client=CLIENT2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'datasetReference': - {'projectId': self.PROJECT, 'datasetId': self.DS_NAME}, - 'defaultTableExpirationMs': 12345, - 'location': 'EU', - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_delete_w_bound_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - dataset.delete() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_delete_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s' % (self.PROJECT, self.DS_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - dataset = self._makeOne(self.DS_NAME, client=CLIENT1) - - dataset.delete(client=CLIENT2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_tables_empty(self): - from gcloud.bigquery.table import Table - - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - tables, token = dataset.list_tables() - self.assertEqual(tables, []) - self.assertEqual(token, None) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_tables_defaults(self): - from gcloud.bigquery.table import Table - - TABLE_1 = 'table_one' - TABLE_2 = 'table_two' - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - TOKEN = 'TOKEN' - DATA = { - 'nextPageToken': TOKEN, - 'tables': [ - {'kind': 'bigquery#table', - 'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_1), - 'tableReference': {'tableId': TABLE_1, - 'datasetId': self.DS_NAME, - 'projectId': self.PROJECT}, - 'type': 'TABLE'}, - {'kind': 'bigquery#table', - 'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_2), - 'tableReference': {'tableId': TABLE_2, - 'datasetId': self.DS_NAME, - 'projectId': self.PROJECT}, - 'type': 'TABLE'}, - ] - } - - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - tables, token = dataset.list_tables() - - self.assertEqual(len(tables), len(DATA['tables'])) - for found, expected in zip(tables, DATA['tables']): - self.assertTrue(isinstance(found, Table)) - self.assertEqual(found.table_id, expected['id']) - self.assertEqual(found.table_type, expected['type']) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_tables_explicit(self): - from gcloud.bigquery.table import Table - - TABLE_1 = 'table_one' - TABLE_2 = 'table_two' - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - TOKEN = 'TOKEN' - DATA = { - 'tables': [ - {'kind': 'bigquery#dataset', - 'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_1), - 'tableReference': {'tableId': TABLE_1, - 'datasetId': self.DS_NAME, - 'projectId': self.PROJECT}, - 'type': 'TABLE'}, - {'kind': 'bigquery#dataset', - 'id': '%s:%s.%s' % (self.PROJECT, self.DS_NAME, TABLE_2), - 'tableReference': {'tableId': TABLE_2, - 'datasetId': self.DS_NAME, - 'projectId': self.PROJECT}, - 'type': 'TABLE'}, - ] - } - - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - - tables, token = dataset.list_tables(max_results=3, page_token=TOKEN) - - self.assertEqual(len(tables), len(DATA['tables'])) - for found, expected in zip(tables, DATA['tables']): - self.assertTrue(isinstance(found, Table)) - self.assertEqual(found.table_id, expected['id']) - self.assertEqual(found.table_type, expected['type']) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': 3, 'pageToken': TOKEN}) - - def test_table_wo_schema(self): - from gcloud.bigquery.table import Table - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - table = dataset.table('table_name') - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.name, 'table_name') - self.assertTrue(table._dataset is dataset) - self.assertEqual(table.schema, []) - - def test_table_w_schema(self): - from gcloud.bigquery.table import SchemaField - from gcloud.bigquery.table import Table - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = self._makeOne(self.DS_NAME, client=client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = dataset.table('table_name', schema=[full_name, age]) - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.name, 'table_name') - self.assertTrue(table._dataset is dataset) - self.assertEqual(table.schema, [full_name, age]) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response diff --git a/gcloud/bigquery/test_job.py b/gcloud/bigquery/test_job.py deleted file mode 100644 index 64660706be2e..000000000000 --- a/gcloud/bigquery/test_job.py +++ /dev/null @@ -1,1592 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class _Base(object): - PROJECT = 'project' - SOURCE1 = 'http://example.com/source1.csv' - DS_NAME = 'datset_name' - TABLE_NAME = 'table_name' - JOB_NAME = 'job_name' - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - import datetime - from gcloud._helpers import UTC - - self.WHEN_TS = 1437767599.006 - self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace( - tzinfo=UTC) - self.ETAG = 'ETAG' - self.JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME) - self.RESOURCE_URL = 'http://example.com/path/to/resource' - self.USER_EMAIL = 'phred@example.com' - - def _makeResource(self, started=False, ended=False): - self._setUpConstants() - resource = { - 'configuration': { - self.JOB_TYPE: { - }, - }, - 'statistics': { - 'creationTime': self.WHEN_TS * 1000, - self.JOB_TYPE: { - } - }, - 'etag': self.ETAG, - 'id': self.JOB_ID, - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'selfLink': self.RESOURCE_URL, - 'user_email': self.USER_EMAIL, - } - - if started or ended: - resource['statistics']['startTime'] = self.WHEN_TS * 1000 - - if ended: - resource['statistics']['endTime'] = (self.WHEN_TS + 1000) * 1000 - - return resource - - def _verifyInitialReadonlyProperties(self, job): - # root elements of resource - self.assertEqual(job.etag, None) - self.assertEqual(job.self_link, None) - self.assertEqual(job.user_email, None) - - # derived from resource['statistics'] - self.assertEqual(job.created, None) - self.assertEqual(job.started, None) - self.assertEqual(job.ended, None) - - # derived from resource['status'] - self.assertEqual(job.error_result, None) - self.assertEqual(job.errors, None) - self.assertEqual(job.state, None) - - def _verifyReadonlyResourceProperties(self, job, resource): - from datetime import timedelta - - statistics = resource.get('statistics', {}) - - if 'creationTime' in statistics: - self.assertEqual(job.created, self.WHEN) - else: - self.assertEqual(job.created, None) - - if 'startTime' in statistics: - self.assertEqual(job.started, self.WHEN) - else: - self.assertEqual(job.started, None) - - if 'endTime' in statistics: - self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000)) - else: - self.assertEqual(job.ended, None) - - if 'etag' in resource: - self.assertEqual(job.etag, self.ETAG) - else: - self.assertEqual(job.etag, None) - - if 'selfLink' in resource: - self.assertEqual(job.self_link, self.RESOURCE_URL) - else: - self.assertEqual(job.self_link, None) - - if 'user_email' in resource: - self.assertEqual(job.user_email, self.USER_EMAIL) - else: - self.assertEqual(job.user_email, None) - - -class TestLoadTableFromStorageJob(unittest2.TestCase, _Base): - JOB_TYPE = 'load' - - def _getTargetClass(self): - from gcloud.bigquery.job import LoadTableFromStorageJob - return LoadTableFromStorageJob - - def _setUpConstants(self): - super(TestLoadTableFromStorageJob, self)._setUpConstants() - self.INPUT_FILES = 2 - self.INPUT_BYTES = 12345 - self.OUTPUT_BYTES = 23456 - self.OUTPUT_ROWS = 345 - - def _makeResource(self, started=False, ended=False): - resource = super(TestLoadTableFromStorageJob, self)._makeResource( - started, ended) - config = resource['configuration']['load'] - config['sourceUris'] = [self.SOURCE1] - config['destinationTable'] = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - } - - if ended: - resource['statistics']['load']['inputFiles'] = self.INPUT_FILES - resource['statistics']['load']['inputFileBytes'] = self.INPUT_BYTES - resource['statistics']['load']['outputBytes'] = self.OUTPUT_BYTES - resource['statistics']['load']['outputRows'] = self.OUTPUT_ROWS - - return resource - - def _verifyBooleanConfigProperties(self, job, config): - if 'allowJaggedRows' in config: - self.assertEqual(job.allow_jagged_rows, - config['allowJaggedRows']) - else: - self.assertTrue(job.allow_jagged_rows is None) - if 'allowQuotedNewlines' in config: - self.assertEqual(job.allow_quoted_newlines, - config['allowQuotedNewlines']) - else: - self.assertTrue(job.allow_quoted_newlines is None) - if 'ignoreUnknownValues' in config: - self.assertEqual(job.ignore_unknown_values, - config['ignoreUnknownValues']) - else: - self.assertTrue(job.ignore_unknown_values is None) - - def _verifyEnumConfigProperties(self, job, config): - if 'createDisposition' in config: - self.assertEqual(job.create_disposition, - config['createDisposition']) - else: - self.assertTrue(job.create_disposition is None) - if 'encoding' in config: - self.assertEqual(job.encoding, - config['encoding']) - else: - self.assertTrue(job.encoding is None) - if 'sourceFormat' in config: - self.assertEqual(job.source_format, - config['sourceFormat']) - else: - self.assertTrue(job.source_format is None) - if 'writeDisposition' in config: - self.assertEqual(job.write_disposition, - config['writeDisposition']) - else: - self.assertTrue(job.write_disposition is None) - - def _verifyResourceProperties(self, job, resource): - self._verifyReadonlyResourceProperties(job, resource) - - config = resource.get('configuration', {}).get('load') - - self._verifyBooleanConfigProperties(job, config) - self._verifyEnumConfigProperties(job, config) - - self.assertEqual(job.source_uris, config['sourceUris']) - - table_ref = config['destinationTable'] - self.assertEqual(job.destination.project, table_ref['projectId']) - self.assertEqual(job.destination.dataset_name, table_ref['datasetId']) - self.assertEqual(job.destination.name, table_ref['tableId']) - - if 'fieldDelimiter' in config: - self.assertEqual(job.field_delimiter, - config['fieldDelimiter']) - else: - self.assertTrue(job.field_delimiter is None) - if 'maxBadRecords' in config: - self.assertEqual(job.max_bad_records, - config['maxBadRecords']) - else: - self.assertTrue(job.max_bad_records is None) - if 'quote' in config: - self.assertEqual(job.quote_character, - config['quote']) - else: - self.assertTrue(job.quote_character is None) - if 'skipLeadingRows' in config: - self.assertEqual(job.skip_leading_rows, - config['skipLeadingRows']) - else: - self.assertTrue(job.skip_leading_rows is None) - - def test_ctor(self): - client = _Client(self.PROJECT) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - self.assertTrue(job.destination is table) - self.assertEqual(list(job.source_uris), [self.SOURCE1]) - self.assertTrue(job._client is client) - self.assertEqual(job.job_type, self.JOB_TYPE) - self.assertEqual( - job.path, - '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)) - self.assertEqual(job.schema, []) - - self._verifyInitialReadonlyProperties(job) - - # derived from resource['statistics']['load'] - self.assertEqual(job.input_file_bytes, None) - self.assertEqual(job.input_files, None) - self.assertEqual(job.output_bytes, None) - self.assertEqual(job.output_rows, None) - - # set/read from resource['configuration']['load'] - self.assertTrue(job.allow_jagged_rows is None) - self.assertTrue(job.allow_quoted_newlines is None) - self.assertTrue(job.create_disposition is None) - self.assertTrue(job.encoding is None) - self.assertTrue(job.field_delimiter is None) - self.assertTrue(job.ignore_unknown_values is None) - self.assertTrue(job.max_bad_records is None) - self.assertTrue(job.quote_character is None) - self.assertTrue(job.skip_leading_rows is None) - self.assertTrue(job.source_format is None) - self.assertTrue(job.write_disposition is None) - - def test_ctor_w_schema(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - table = _Table() - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client, - schema=[full_name, age]) - self.assertEqual(job.schema, [full_name, age]) - - def test_schema_setter_non_list(self): - client = _Client(self.PROJECT) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - with self.assertRaises(TypeError): - job.schema = object() - - def test_schema_setter_invalid_field(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - with self.assertRaises(ValueError): - job.schema = [full_name, object()] - - def test_schema_setter(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - job.schema = [full_name, age] - self.assertEqual(job.schema, [full_name, age]) - - def test_props_set_by_server(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _millis - - CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC) - STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC) - ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC) - JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME) - URL = 'http://example.com/projects/%s/jobs/%s' % ( - self.PROJECT, self.JOB_NAME) - EMAIL = 'phred@example.com' - ERROR_RESULT = {'debugInfo': 'DEBUG', - 'location': 'LOCATION', - 'message': 'MESSAGE', - 'reason': 'REASON'} - - client = _Client(self.PROJECT) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - job._properties['etag'] = 'ETAG' - job._properties['id'] = JOB_ID - job._properties['selfLink'] = URL - job._properties['user_email'] = EMAIL - - statistics = job._properties['statistics'] = {} - statistics['creationTime'] = _millis(CREATED) - statistics['startTime'] = _millis(STARTED) - statistics['endTime'] = _millis(ENDED) - load_stats = statistics['load'] = {} - load_stats['inputFileBytes'] = 12345 - load_stats['inputFiles'] = 1 - load_stats['outputBytes'] = 23456 - load_stats['outputRows'] = 345 - - self.assertEqual(job.etag, 'ETAG') - self.assertEqual(job.self_link, URL) - self.assertEqual(job.user_email, EMAIL) - - self.assertEqual(job.created, CREATED) - self.assertEqual(job.started, STARTED) - self.assertEqual(job.ended, ENDED) - - self.assertEqual(job.input_file_bytes, 12345) - self.assertEqual(job.input_files, 1) - self.assertEqual(job.output_bytes, 23456) - self.assertEqual(job.output_rows, 345) - - status = job._properties['status'] = {} - - self.assertEqual(job.error_result, None) - self.assertEqual(job.errors, None) - self.assertEqual(job.state, None) - - status['errorResult'] = ERROR_RESULT - status['errors'] = [ERROR_RESULT] - status['state'] = 'STATE' - - self.assertEqual(job.error_result, ERROR_RESULT) - self.assertEqual(job.errors, [ERROR_RESULT]) - self.assertEqual(job.state, 'STATE') - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_missing_config(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': '%s:%s' % (self.PROJECT, self.DS_NAME), - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - } - } - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': self.JOB_ID, - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'load': { - 'sourceUris': [self.SOURCE1], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - }, - } - }, - } - klass = self._getTargetClass() - job = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(job._client is client) - self._verifyResourceProperties(job, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_begin_w_bound_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource() - # Ensure None for missing server-set props - del RESOURCE['statistics']['creationTime'] - del RESOURCE['etag'] - del RESOURCE['selfLink'] - del RESOURCE['user_email'] - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - - job.begin() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'load': { - 'sourceUris': [self.SOURCE1], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - }, - }, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_begin_w_alternate_client(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource(ended=True) - LOAD_CONFIGURATION = { - 'sourceUris': [self.SOURCE1], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - }, - 'allowJaggedRows': True, - 'allowQuotedNewlines': True, - 'createDisposition': 'CREATE_NEVER', - 'encoding': 'ISO-8559-1', - 'fieldDelimiter': '|', - 'ignoreUnknownValues': True, - 'maxBadRecords': 100, - 'quote': "'", - 'skipLeadingRows': 1, - 'sourceFormat': 'CSV', - 'writeDisposition': 'WRITE_TRUNCATE', - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}, - ]} - } - RESOURCE['configuration']['load'] = LOAD_CONFIGURATION - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - table = _Table() - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1, - schema=[full_name, age]) - - job.allow_jagged_rows = True - job.allow_quoted_newlines = True - job.create_disposition = 'CREATE_NEVER' - job.encoding = 'ISO-8559-1' - job.field_delimiter = '|' - job.ignore_unknown_values = True - job.max_bad_records = 100 - job.quote_character = "'" - job.skip_leading_rows = 1 - job.source_format = 'CSV' - job.write_disposition = 'WRITE_TRUNCATE' - - job.begin(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'load': LOAD_CONFIGURATION, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - - self.assertFalse(job.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1) - - self.assertTrue(job.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - - job.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1) - - job.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_cancel_w_bound_client(self): - PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client) - - job.cancel() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_cancel_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - table = _Table() - job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1) - - job.cancel(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - -class TestCopyJob(unittest2.TestCase, _Base): - JOB_TYPE = 'copy' - SOURCE_TABLE = 'source_table' - DESTINATION_TABLE = 'destination_table' - - def _getTargetClass(self): - from gcloud.bigquery.job import CopyJob - return CopyJob - - def _makeResource(self, started=False, ended=False): - resource = super(TestCopyJob, self)._makeResource( - started, ended) - config = resource['configuration']['copy'] - config['sourceTables'] = [{ - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - }] - config['destinationTable'] = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.DESTINATION_TABLE, - } - - return resource - - def _verifyResourceProperties(self, job, resource): - self._verifyReadonlyResourceProperties(job, resource) - - config = resource.get('configuration', {}).get('copy') - - table_ref = config['destinationTable'] - self.assertEqual(job.destination.project, table_ref['projectId']) - self.assertEqual(job.destination.dataset_name, table_ref['datasetId']) - self.assertEqual(job.destination.name, table_ref['tableId']) - - sources = config['sourceTables'] - self.assertEqual(len(sources), len(job.sources)) - for table_ref, table in zip(sources, job.sources): - self.assertEqual(table.project, table_ref['projectId']) - self.assertEqual(table.dataset_name, table_ref['datasetId']) - self.assertEqual(table.name, table_ref['tableId']) - - if 'createDisposition' in config: - self.assertEqual(job.create_disposition, - config['createDisposition']) - else: - self.assertTrue(job.create_disposition is None) - - if 'writeDisposition' in config: - self.assertEqual(job.write_disposition, - config['writeDisposition']) - else: - self.assertTrue(job.write_disposition is None) - - def test_ctor(self): - client = _Client(self.PROJECT) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client) - self.assertTrue(job.destination is destination) - self.assertEqual(job.sources, [source]) - self.assertTrue(job._client is client) - self.assertEqual(job.job_type, self.JOB_TYPE) - self.assertEqual( - job.path, - '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)) - - self._verifyInitialReadonlyProperties(job) - - # set/read from resource['configuration']['copy'] - self.assertTrue(job.create_disposition is None) - self.assertTrue(job.write_disposition is None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_missing_config(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': '%s:%s' % (self.PROJECT, self.DS_NAME), - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - } - } - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': self.JOB_ID, - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'copy': { - 'sourceTables': [{ - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - }], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.DESTINATION_TABLE, - }, - } - }, - } - klass = self._getTargetClass() - job = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(job._client is client) - self._verifyResourceProperties(job, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_begin_w_bound_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource() - # Ensure None for missing server-set props - del RESOURCE['statistics']['creationTime'] - del RESOURCE['etag'] - del RESOURCE['selfLink'] - del RESOURCE['user_email'] - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client) - - job.begin() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'copy': { - 'sourceTables': [{ - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE - }], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.DESTINATION_TABLE, - }, - }, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_begin_w_alternate_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource(ended=True) - COPY_CONFIGURATION = { - 'sourceTables': [{ - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - }], - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.DESTINATION_TABLE, - }, - 'createDisposition': 'CREATE_NEVER', - 'writeDisposition': 'WRITE_TRUNCATE', - } - RESOURCE['configuration']['copy'] = COPY_CONFIGURATION - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client1) - - job.create_disposition = 'CREATE_NEVER' - job.write_disposition = 'WRITE_TRUNCATE' - - job.begin(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'copy': COPY_CONFIGURATION, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client) - - self.assertFalse(job.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client1) - - self.assertTrue(job.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client) - - job.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - destination = _Table(self.DESTINATION_TABLE) - job = self._makeOne(self.JOB_NAME, destination, [source], client1) - - job.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - -class TestExtractTableToStorageJob(unittest2.TestCase, _Base): - JOB_TYPE = 'extract' - SOURCE_TABLE = 'source_table' - DESTINATION_URI = 'gs://bucket_name/object_name' - - def _getTargetClass(self): - from gcloud.bigquery.job import ExtractTableToStorageJob - return ExtractTableToStorageJob - - def _makeResource(self, started=False, ended=False): - resource = super(TestExtractTableToStorageJob, self)._makeResource( - started, ended) - config = resource['configuration']['extract'] - config['sourceTable'] = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - } - config['destinationUris'] = [self.DESTINATION_URI] - return resource - - def _verifyResourceProperties(self, job, resource): - self._verifyReadonlyResourceProperties(job, resource) - - config = resource.get('configuration', {}).get('extract') - - self.assertEqual(job.destination_uris, config['destinationUris']) - - table_ref = config['sourceTable'] - self.assertEqual(job.source.project, table_ref['projectId']) - self.assertEqual(job.source.dataset_name, table_ref['datasetId']) - self.assertEqual(job.source.name, table_ref['tableId']) - - if 'compression' in config: - self.assertEqual(job.compression, - config['compression']) - else: - self.assertTrue(job.compression is None) - - if 'destinationFormat' in config: - self.assertEqual(job.destination_format, - config['destinationFormat']) - else: - self.assertTrue(job.destination_format is None) - - if 'fieldDelimiter' in config: - self.assertEqual(job.field_delimiter, - config['fieldDelimiter']) - else: - self.assertTrue(job.field_delimiter is None) - - if 'printHeader' in config: - self.assertEqual(job.print_header, - config['printHeader']) - else: - self.assertTrue(job.print_header is None) - - def test_ctor(self): - client = _Client(self.PROJECT) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client) - self.assertEqual(job.source, source) - self.assertEqual(job.destination_uris, [self.DESTINATION_URI]) - self.assertTrue(job._client is client) - self.assertEqual(job.job_type, self.JOB_TYPE) - self.assertEqual( - job.path, - '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)) - - self._verifyInitialReadonlyProperties(job) - - # set/read from resource['configuration']['copy'] - self.assertTrue(job.compression is None) - self.assertTrue(job.destination_format is None) - self.assertTrue(job.field_delimiter is None) - self.assertTrue(job.print_header is None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_missing_config(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': '%s:%s' % (self.PROJECT, self.DS_NAME), - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - } - } - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': self.JOB_ID, - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'extract': { - 'sourceTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - }, - 'destinationUris': [self.DESTINATION_URI], - } - }, - } - klass = self._getTargetClass() - job = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(job._client is client) - self._verifyResourceProperties(job, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_begin_w_bound_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource() - # Ensure None for missing server-set props - del RESOURCE['statistics']['creationTime'] - del RESOURCE['etag'] - del RESOURCE['selfLink'] - del RESOURCE['user_email'] - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client) - - job.begin() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'extract': { - 'sourceTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE - }, - 'destinationUris': [self.DESTINATION_URI], - }, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_begin_w_alternate_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource(ended=True) - EXTRACT_CONFIGURATION = { - 'sourceTable': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.SOURCE_TABLE, - }, - 'destinationUris': [self.DESTINATION_URI], - 'compression': 'GZIP', - 'destinationFormat': 'NEWLINE_DELIMITED_JSON', - 'fieldDelimiter': '|', - 'printHeader': False, - } - RESOURCE['configuration']['extract'] = EXTRACT_CONFIGURATION - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client1) - - job.compression = 'GZIP' - job.destination_format = 'NEWLINE_DELIMITED_JSON' - job.field_delimiter = '|' - job.print_header = False - - job.begin(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'extract': EXTRACT_CONFIGURATION, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client) - - self.assertFalse(job.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client1) - - self.assertTrue(job.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client) - - job.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - source = _Table(self.SOURCE_TABLE) - job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI], - client1) - - job.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - -class TestQueryJob(unittest2.TestCase, _Base): - JOB_TYPE = 'query' - QUERY = 'select count(*) from persons' - DESTINATION_TABLE = 'destination_table' - - def _getTargetClass(self): - from gcloud.bigquery.job import QueryJob - return QueryJob - - def _makeResource(self, started=False, ended=False): - resource = super(TestQueryJob, self)._makeResource( - started, ended) - config = resource['configuration']['query'] - config['query'] = self.QUERY - return resource - - def _verifyBooleanResourceProperties(self, job, config): - - if 'allowLargeResults' in config: - self.assertEqual(job.allow_large_results, - config['allowLargeResults']) - else: - self.assertTrue(job.allow_large_results is None) - if 'flattenResults' in config: - self.assertEqual(job.flatten_results, - config['flattenResults']) - else: - self.assertTrue(job.flatten_results is None) - if 'useQueryCache' in config: - self.assertEqual(job.use_query_cache, - config['useQueryCache']) - else: - self.assertTrue(job.use_query_cache is None) - - def _verifyResourceProperties(self, job, resource): - self._verifyReadonlyResourceProperties(job, resource) - - config = resource.get('configuration', {}).get('query') - self._verifyBooleanResourceProperties(job, config) - - if 'createDisposition' in config: - self.assertEqual(job.create_disposition, - config['createDisposition']) - else: - self.assertTrue(job.create_disposition is None) - if 'defaultDataset' in config: - dataset = job.default_dataset - ds_ref = { - 'projectId': dataset.project, - 'datasetId': dataset.name, - } - self.assertEqual(ds_ref, config['defaultDataset']) - else: - self.assertTrue(job.default_dataset is None) - if 'destinationTable' in config: - table = job.destination - tb_ref = { - 'projectId': table.project, - 'datasetId': table.dataset_name, - 'tableId': table.name - } - self.assertEqual(tb_ref, config['destinationTable']) - else: - self.assertTrue(job.destination is None) - if 'priority' in config: - self.assertEqual(job.priority, - config['priority']) - else: - self.assertTrue(job.priority is None) - if 'writeDisposition' in config: - self.assertEqual(job.write_disposition, - config['writeDisposition']) - else: - self.assertTrue(job.write_disposition is None) - - def test_ctor(self): - client = _Client(self.PROJECT) - job = self._makeOne(self.JOB_NAME, self.QUERY, client) - self.assertEqual(job.query, self.QUERY) - self.assertTrue(job._client is client) - self.assertEqual(job.job_type, self.JOB_TYPE) - self.assertEqual( - job.path, - '/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)) - - self._verifyInitialReadonlyProperties(job) - - # set/read from resource['configuration']['copy'] - self.assertTrue(job.allow_large_results is None) - self.assertTrue(job.create_disposition is None) - self.assertTrue(job.default_dataset is None) - self.assertTrue(job.destination is None) - self.assertTrue(job.flatten_results is None) - self.assertTrue(job.priority is None) - self.assertTrue(job.use_query_cache is None) - self.assertTrue(job.write_disposition is None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_missing_config(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': '%s:%s' % (self.PROJECT, self.DS_NAME), - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - } - } - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'id': self.JOB_ID, - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'query': {'query': self.QUERY} - }, - } - klass = self._getTargetClass() - job = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(job._client is client) - self._verifyResourceProperties(job, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - RESOURCE['configuration']['query']['destinationTable'] = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.DESTINATION_TABLE, - } - klass = self._getTargetClass() - dataset = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(dataset._client is client) - self._verifyResourceProperties(dataset, RESOURCE) - - def test_begin_w_bound_client(self): - PATH = 'projects/%s/jobs' % self.PROJECT - RESOURCE = self._makeResource() - # Ensure None for missing server-set props - del RESOURCE['statistics']['creationTime'] - del RESOURCE['etag'] - del RESOURCE['selfLink'] - del RESOURCE['user_email'] - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - job = self._makeOne(self.JOB_NAME, self.QUERY, client) - - job.begin() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'query': { - 'query': self.QUERY, - }, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_begin_w_alternate_client(self): - from gcloud.bigquery.dataset import Dataset - from gcloud.bigquery.dataset import Table - PATH = 'projects/%s/jobs' % self.PROJECT - TABLE = 'TABLE' - DS_NAME = 'DATASET' - RESOURCE = self._makeResource(ended=True) - QUERY_CONFIGURATION = { - 'query': self.QUERY, - 'allowLargeResults': True, - 'createDisposition': 'CREATE_NEVER', - 'defaultDataset': { - 'projectId': self.PROJECT, - 'datasetId': DS_NAME, - }, - 'destinationTable': { - 'projectId': self.PROJECT, - 'datasetId': DS_NAME, - 'tableId': TABLE, - }, - 'flattenResults': True, - 'priority': 'INTERACTIVE', - 'useQueryCache': True, - 'writeDisposition': 'WRITE_TRUNCATE', - } - RESOURCE['configuration']['query'] = QUERY_CONFIGURATION - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - job = self._makeOne(self.JOB_NAME, self.QUERY, client1) - - dataset = Dataset(DS_NAME, client1) - table = Table(TABLE, dataset) - - job.allow_large_results = True - job.create_disposition = 'CREATE_NEVER' - job.default_dataset = dataset - job.destination = table - job.flatten_results = True - job.priority = 'INTERACTIVE' - job.use_query_cache = True - job.write_disposition = 'WRITE_TRUNCATE' - - job.begin(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'configuration': { - 'query': QUERY_CONFIGURATION, - }, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(job, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - job = self._makeOne(self.JOB_NAME, self.QUERY, client) - - self.assertFalse(job.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - job = self._makeOne(self.JOB_NAME, self.QUERY, client1) - - self.assertTrue(job.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - from gcloud.bigquery.dataset import Dataset - from gcloud.bigquery.dataset import Table - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - DS_NAME = 'DATASET' - DEST_TABLE = 'dest_table' - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - job = self._makeOne(self.JOB_NAME, self.QUERY, client) - - dataset = Dataset(DS_NAME, client) - table = Table(DEST_TABLE, dataset) - job.destination = table - - job.reload() - - self.assertEqual(job.destination, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME) - DS_NAME = 'DATASET' - DEST_TABLE = 'dest_table' - RESOURCE = self._makeResource() - q_config = RESOURCE['configuration']['query'] - q_config['destinationTable'] = { - 'projectId': self.PROJECT, - 'datasetId': DS_NAME, - 'tableId': DEST_TABLE, - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - job = self._makeOne(self.JOB_NAME, self.QUERY, client1) - - job.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(job, RESOURCE) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - def dataset(self, name): - from gcloud.bigquery.dataset import Dataset - return Dataset(name, client=self) - - -class _Table(object): - - def __init__(self, name=None): - self._name = name - - @property - def name(self): - if self._name is not None: - return self._name - return TestLoadTableFromStorageJob.TABLE_NAME - - @property - def project(self): - return TestLoadTableFromStorageJob.PROJECT - - @property - def dataset_name(self): - return TestLoadTableFromStorageJob.DS_NAME - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response diff --git a/gcloud/bigquery/test_query.py b/gcloud/bigquery/test_query.py deleted file mode 100644 index bed46d9e85e3..000000000000 --- a/gcloud/bigquery/test_query.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestQueryResults(unittest2.TestCase): - PROJECT = 'project' - JOB_NAME = 'job_name' - JOB_NAME = 'test-synchronous-query' - JOB_TYPE = 'query' - QUERY = 'select count(*) from persons' - TOKEN = 'TOKEN' - - def _getTargetClass(self): - from gcloud.bigquery.query import QueryResults - return QueryResults - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _makeResource(self, complete=False): - resource = { - 'jobReference': { - 'projectId': self.PROJECT, - 'jobId': self.JOB_NAME, - }, - 'jobComplete': complete, - 'errors': [], - 'schema': { - 'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'}, - ], - }, - } - - if complete: - resource['totalRows'] = 1000 - resource['rows'] = [ - {'f': [ - {'v': 'Phred Phlyntstone'}, - {'v': 32}, - ]}, - {'f': [ - {'v': 'Bharney Rhubble'}, - {'v': 33}, - ]}, - {'f': [ - {'v': 'Wylma Phlyntstone'}, - {'v': 29}, - ]}, - {'f': [ - {'v': 'Bhettye Rhubble'}, - {'v': 27}, - ]}, - ] - resource['pageToken'] = self.TOKEN - resource['totalBytesProcessed'] = 100000 - resource['cacheHit'] = False - - return resource - - def _verifySchema(self, query, resource): - from gcloud.bigquery.table import SchemaField - if 'schema' in resource: - fields = resource['schema']['fields'] - self.assertEqual(len(query.schema), len(fields)) - for found, expected in zip(query.schema, fields): - self.assertTrue(isinstance(found, SchemaField)) - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.field_type, expected['type']) - self.assertEqual(found.mode, expected['mode']) - self.assertEqual(found.description, - expected.get('description')) - self.assertEqual(found.fields, expected.get('fields')) - else: - self.assertTrue(query.schema is None) - - def _verifyRows(self, query, resource): - expected = resource.get('rows') - if expected is None: - self.assertEqual(query.rows, []) - else: - found = query.rows - self.assertEqual(len(found), len(expected)) - for f_row, e_row in zip(found, expected): - self.assertEqual(f_row, - tuple([cell['v'] for cell in e_row['f']])) - - def _verifyResourceProperties(self, query, resource): - self.assertEqual(query.cache_hit, resource.get('cacheHit')) - self.assertEqual(query.complete, resource.get('jobComplete')) - self.assertEqual(query.errors, resource.get('errors')) - self.assertEqual(query.page_token, resource.get('pageToken')) - self.assertEqual(query.total_rows, resource.get('totalRows')) - self.assertEqual(query.total_bytes_processed, - resource.get('totalBytesProcessed')) - - if 'jobReference' in resource: - self.assertEqual(query.name, resource['jobReference']['jobId']) - else: - self.assertTrue(query.name is None) - - self._verifySchema(query, resource) - self._verifyRows(query, resource) - - def test_ctor(self): - client = _Client(self.PROJECT) - query = self._makeOne(self.QUERY, client) - self.assertEqual(query.query, self.QUERY) - self.assertTrue(query._client is client) - - self.assertTrue(query.cache_hit is None) - self.assertTrue(query.complete is None) - self.assertTrue(query.errors is None) - self.assertTrue(query.name is None) - self.assertTrue(query.page_token is None) - self.assertEqual(query.rows, []) - self.assertTrue(query.schema is None) - self.assertTrue(query.total_rows is None) - self.assertTrue(query.total_bytes_processed is None) - - self.assertTrue(query.default_dataset is None) - self.assertTrue(query.max_results is None) - self.assertTrue(query.preserve_nulls is None) - self.assertTrue(query.use_query_cache is None) - - def test_job_wo_jobid(self): - client = _Client(self.PROJECT) - query = self._makeOne(self.QUERY, client) - self.assertTrue(query.job is None) - - def test_job_w_jobid(self): - from gcloud.bigquery.job import QueryJob - SERVER_GENERATED = 'SERVER_GENERATED' - client = _Client(self.PROJECT) - query = self._makeOne(self.QUERY, client) - query._properties['jobReference'] = { - 'projectId': self.PROJECT, - 'jobId': SERVER_GENERATED, - } - job = query.job - self.assertTrue(isinstance(job, QueryJob)) - self.assertEqual(job.query, self.QUERY) - self.assertTrue(job._client is client) - self.assertEqual(job.name, SERVER_GENERATED) - fetched_later = query.job - self.assertTrue(fetched_later is job) - - def test_schema(self): - client = _Client(self.PROJECT) - query = self._makeOne(self.QUERY, client) - self._verifyResourceProperties(query, {}) - resource = { - 'schema': { - 'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'}, - ], - }, - } - query._set_properties(resource) - self._verifyResourceProperties(query, resource) - - def test_run_w_bound_client(self): - PATH = 'projects/%s/queries' % self.PROJECT - RESOURCE = self._makeResource(complete=False) - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - query = self._makeOne(self.QUERY, client) - - query.run() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = {'query': self.QUERY} - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(query, RESOURCE) - - def test_run_w_alternate_client(self): - PATH = 'projects/%s/queries' % self.PROJECT - RESOURCE = self._makeResource(complete=True) - DATASET = 'test_dataset' - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - query = self._makeOne(self.QUERY, client1) - - query.default_dataset = client2.dataset(DATASET) - query.max_results = 100 - query.preserve_nulls = True - query.timeout_ms = 20000 - query.use_query_cache = False - - query.run(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'query': self.QUERY, - 'defaultDataset': { - 'projectId': self.PROJECT, - 'datasetId': DATASET, - }, - 'maxResults': 100, - 'preserveNulls': True, - 'timeoutMs': 20000, - 'useQueryCache': False, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(query, RESOURCE) - - def test_fetch_data_query_not_yet_run(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - query = self._makeOne(self.QUERY, client) - self.assertRaises(ValueError, query.fetch_data) - - def test_fetch_data_w_bound_client(self): - PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME) - BEFORE = self._makeResource(complete=False) - AFTER = self._makeResource(complete=True) - - conn = _Connection(AFTER) - client = _Client(project=self.PROJECT, connection=conn) - query = self._makeOne(self.QUERY, client) - query._set_properties(BEFORE) - self.assertFalse(query.complete) - - rows, total_rows, page_token = query.fetch_data() - - self.assertTrue(query.complete) - self.assertEqual(len(rows), 4) - self.assertEqual(rows[0], ('Phred Phlyntstone', 32)) - self.assertEqual(rows[1], ('Bharney Rhubble', 33)) - self.assertEqual(rows[2], ('Wylma Phlyntstone', 29)) - self.assertEqual(rows[3], ('Bhettye Rhubble', 27)) - self.assertEqual(total_rows, AFTER['totalRows']) - self.assertEqual(page_token, AFTER['pageToken']) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_fetch_data_w_alternate_client(self): - PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME) - MAX = 10 - TOKEN = 'TOKEN' - START = 2257 - TIMEOUT = 20000 - BEFORE = self._makeResource(complete=False) - AFTER = self._makeResource(complete=True) - - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(AFTER) - client2 = _Client(project=self.PROJECT, connection=conn2) - query = self._makeOne(self.QUERY, client1) - query._set_properties(BEFORE) - self.assertFalse(query.complete) - - rows, total_rows, page_token = query.fetch_data( - client=client2, max_results=MAX, page_token=TOKEN, - start_index=START, timeout_ms=TIMEOUT) - - self.assertTrue(query.complete) - self.assertEqual(len(rows), 4) - self.assertEqual(rows[0], ('Phred Phlyntstone', 32)) - self.assertEqual(rows[1], ('Bharney Rhubble', 33)) - self.assertEqual(rows[2], ('Wylma Phlyntstone', 29)) - self.assertEqual(rows[3], ('Bhettye Rhubble', 27)) - self.assertEqual(total_rows, AFTER['totalRows']) - self.assertEqual(page_token, AFTER['pageToken']) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': MAX, - 'pageToken': TOKEN, - 'startIndex': START, - 'timeoutMs': TIMEOUT}) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - def dataset(self, name): - from gcloud.bigquery.dataset import Dataset - return Dataset(name, client=self) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: # pragma: NO COVER - raise NotFound('miss') - else: - return response diff --git a/gcloud/bigquery/test_table.py b/gcloud/bigquery/test_table.py deleted file mode 100644 index dd50ce8ab466..000000000000 --- a/gcloud/bigquery/test_table.py +++ /dev/null @@ -1,1680 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestSchemaField(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigquery.table import SchemaField - return SchemaField - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - field = self._makeOne('test', 'STRING') - self.assertEqual(field.name, 'test') - self.assertEqual(field.field_type, 'STRING') - self.assertEqual(field.mode, 'NULLABLE') - self.assertEqual(field.description, None) - self.assertEqual(field.fields, None) - - def test_ctor_explicit(self): - field = self._makeOne('test', 'STRING', mode='REQUIRED', - description='Testing') - self.assertEqual(field.name, 'test') - self.assertEqual(field.field_type, 'STRING') - self.assertEqual(field.mode, 'REQUIRED') - self.assertEqual(field.description, 'Testing') - self.assertEqual(field.fields, None) - - def test_ctor_subfields(self): - field = self._makeOne('phone_number', 'RECORD', - fields=[self._makeOne('area_code', 'STRING'), - self._makeOne('local_number', 'STRING')]) - self.assertEqual(field.name, 'phone_number') - self.assertEqual(field.field_type, 'RECORD') - self.assertEqual(field.mode, 'NULLABLE') - self.assertEqual(field.description, None) - self.assertEqual(len(field.fields), 2) - self.assertEqual(field.fields[0].name, 'area_code') - self.assertEqual(field.fields[0].field_type, 'STRING') - self.assertEqual(field.fields[0].mode, 'NULLABLE') - self.assertEqual(field.fields[0].description, None) - self.assertEqual(field.fields[0].fields, None) - self.assertEqual(field.fields[1].name, 'local_number') - self.assertEqual(field.fields[1].field_type, 'STRING') - self.assertEqual(field.fields[1].mode, 'NULLABLE') - self.assertEqual(field.fields[1].description, None) - self.assertEqual(field.fields[1].fields, None) - - -class _SchemaBase(object): - - def _verify_field(self, field, r_field): - self.assertEqual(field.name, r_field['name']) - self.assertEqual(field.field_type, r_field['type']) - self.assertEqual(field.mode, r_field.get('mode', 'NULLABLE')) - - def _verifySchema(self, schema, resource): - r_fields = resource['schema']['fields'] - self.assertEqual(len(schema), len(r_fields)) - - for field, r_field in zip(schema, r_fields): - self._verify_field(field, r_field) - - -class TestTable(unittest2.TestCase, _SchemaBase): - PROJECT = 'project' - DS_NAME = 'dataset-name' - TABLE_NAME = 'table-name' - - def _getTargetClass(self): - from gcloud.bigquery.table import Table - return Table - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - import datetime - from gcloud._helpers import UTC - - self.WHEN_TS = 1437767599.006 - self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace( - tzinfo=UTC) - self.ETAG = 'ETAG' - self.TABLE_ID = '%s:%s:%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - self.RESOURCE_URL = 'http://example.com/path/to/resource' - self.NUM_BYTES = 12345 - self.NUM_ROWS = 67 - - def _makeResource(self): - self._setUpConstants() - return { - 'creationTime': self.WHEN_TS * 1000, - 'tableReference': - {'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}]}, - 'etag': 'ETAG', - 'id': self.TABLE_ID, - 'lastModifiedTime': self.WHEN_TS * 1000, - 'location': 'US', - 'selfLink': self.RESOURCE_URL, - 'numRows': self.NUM_ROWS, - 'numBytes': self.NUM_BYTES, - 'type': 'TABLE', - } - - def _verifyReadonlyResourceProperties(self, table, resource): - if 'creationTime' in resource: - self.assertEqual(table.created, self.WHEN) - else: - self.assertEqual(table.created, None) - - if 'etag' in resource: - self.assertEqual(table.etag, self.ETAG) - else: - self.assertEqual(table.etag, None) - - if 'numRows' in resource: - self.assertEqual(table.num_rows, self.NUM_ROWS) - else: - self.assertEqual(table.num_rows, None) - - if 'numBytes' in resource: - self.assertEqual(table.num_bytes, self.NUM_BYTES) - else: - self.assertEqual(table.num_bytes, None) - - if 'selfLink' in resource: - self.assertEqual(table.self_link, self.RESOURCE_URL) - else: - self.assertEqual(table.self_link, None) - - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertEqual(table.table_type, - 'TABLE' if 'view' not in resource else 'VIEW') - - def _verifyResourceProperties(self, table, resource): - - self._verifyReadonlyResourceProperties(table, resource) - - if 'expirationTime' in resource: - self.assertEqual(table.expires, self.EXP_TIME) - else: - self.assertEqual(table.expires, None) - - self.assertEqual(table.description, resource.get('description')) - self.assertEqual(table.friendly_name, resource.get('friendlyName')) - self.assertEqual(table.location, resource.get('location')) - - if 'view' in resource: - self.assertEqual(table.view_query, resource['view']['query']) - else: - self.assertEqual(table.view_query, None) - - if 'schema' in resource: - self._verifySchema(table.schema, resource) - else: - self.assertEqual(table.schema, []) - - def test_ctor(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - self.assertEqual(table.name, self.TABLE_NAME) - self.assertTrue(table._dataset is dataset) - self.assertEqual(table.project, self.PROJECT) - self.assertEqual(table.dataset_name, self.DS_NAME) - self.assertEqual( - table.path, - '/projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME)) - self.assertEqual(table.schema, []) - - self.assertEqual(table.created, None) - self.assertEqual(table.etag, None) - self.assertEqual(table.modified, None) - self.assertEqual(table.num_bytes, None) - self.assertEqual(table.num_rows, None) - self.assertEqual(table.self_link, None) - self.assertEqual(table.table_id, None) - self.assertEqual(table.table_type, None) - - self.assertEqual(table.description, None) - self.assertEqual(table.expires, None) - self.assertEqual(table.friendly_name, None) - self.assertEqual(table.location, None) - self.assertEqual(table.view_query, None) - - def test_ctor_w_schema(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = self._makeOne(self.TABLE_NAME, dataset, - schema=[full_name, age]) - self.assertEqual(table.schema, [full_name, age]) - - def test_num_bytes_getter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - - # Check with no value set. - self.assertEqual(table.num_bytes, None) - - num_bytes = 1337 - # Check with integer value set. - table._properties = {'numBytes': num_bytes} - self.assertEqual(table.num_bytes, num_bytes) - - # Check with a string value set. - table._properties = {'numBytes': str(num_bytes)} - self.assertEqual(table.num_bytes, num_bytes) - - # Check with invalid int value. - table._properties = {'numBytes': 'x'} - with self.assertRaises(ValueError): - getattr(table, 'num_bytes') - - def test_num_rows_getter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - - # Check with no value set. - self.assertEqual(table.num_rows, None) - - num_rows = 42 - # Check with integer value set. - table._properties = {'numRows': num_rows} - self.assertEqual(table.num_rows, num_rows) - - # Check with a string value set. - table._properties = {'numRows': str(num_rows)} - self.assertEqual(table.num_rows, num_rows) - - # Check with invalid int value. - table._properties = {'numRows': 'x'} - with self.assertRaises(ValueError): - getattr(table, 'num_rows') - - def test_schema_setter_non_list(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(TypeError): - table.schema = object() - - def test_schema_setter_invalid_field(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - with self.assertRaises(ValueError): - table.schema = [full_name, object()] - - def test_schema_setter(self): - from gcloud.bigquery.table import SchemaField - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table.schema = [full_name, age] - self.assertEqual(table.schema, [full_name, age]) - - def test_props_set_by_server(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _millis - - CREATED = datetime.datetime(2015, 7, 29, 12, 13, 22, tzinfo=UTC) - MODIFIED = datetime.datetime(2015, 7, 29, 14, 47, 15, tzinfo=UTC) - TABLE_ID = '%s:%s:%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - URL = 'http://example.com/projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table._properties['creationTime'] = _millis(CREATED) - table._properties['etag'] = 'ETAG' - table._properties['lastModifiedTime'] = _millis(MODIFIED) - table._properties['numBytes'] = 12345 - table._properties['numRows'] = 66 - table._properties['selfLink'] = URL - table._properties['id'] = TABLE_ID - table._properties['type'] = 'TABLE' - - self.assertEqual(table.created, CREATED) - self.assertEqual(table.etag, 'ETAG') - self.assertEqual(table.modified, MODIFIED) - self.assertEqual(table.num_bytes, 12345) - self.assertEqual(table.num_rows, 66) - self.assertEqual(table.self_link, URL) - self.assertEqual(table.table_id, TABLE_ID) - self.assertEqual(table.table_type, 'TABLE') - - def test_description_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(ValueError): - table.description = 12345 - - def test_description_setter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.description = 'DESCRIPTION' - self.assertEqual(table.description, 'DESCRIPTION') - - def test_expires_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(ValueError): - table.expires = object() - - def test_expires_setter(self): - import datetime - from gcloud._helpers import UTC - - WHEN = datetime.datetime(2015, 7, 28, 16, 39, tzinfo=UTC) - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.expires = WHEN - self.assertEqual(table.expires, WHEN) - - def test_friendly_name_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(ValueError): - table.friendly_name = 12345 - - def test_friendly_name_setter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.friendly_name = 'FRIENDLY' - self.assertEqual(table.friendly_name, 'FRIENDLY') - - def test_location_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(ValueError): - table.location = 12345 - - def test_location_setter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.location = 'LOCATION' - self.assertEqual(table.location, 'LOCATION') - - def test_view_query_setter_bad_value(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - with self.assertRaises(ValueError): - table.view_query = 12345 - - def test_view_query_setter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.view_query = 'select * from foo' - self.assertEqual(table.view_query, 'select * from foo') - - def test_view_query_deleter(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - table.view_query = 'select * from foo' - del table.view_query - self.assertEqual(table.view_query, None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - dataset = _Dataset(client) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, dataset) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - dataset = _Dataset(client) - RESOURCE = { - 'id': '%s:%s:%s' % (self.PROJECT, self.DS_NAME, self.TABLE_NAME), - 'tableReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - }, - 'type': 'TABLE', - } - klass = self._getTargetClass() - table = klass.from_api_repr(RESOURCE, dataset) - self.assertEqual(table.name, self.TABLE_NAME) - self.assertTrue(table._dataset is dataset) - self._verifyResourceProperties(table, RESOURCE) - - def test_from_api_repr_w_properties(self): - client = _Client(self.PROJECT) - dataset = _Dataset(client) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - table = klass.from_api_repr(RESOURCE, dataset) - self.assertTrue(table._dataset._client is client) - self._verifyResourceProperties(table, RESOURCE) - - def test_create_no_view_query_no_schema(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset) - - with self.assertRaises(ValueError): - table.create() - - def test_create_w_bound_client(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = self._makeOne(self.TABLE_NAME, dataset, - schema=[full_name, age]) - - table.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'tableReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}]}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(table, RESOURCE) - - def test_create_w_alternate_client(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _millis - from gcloud.bigquery.table import SchemaField - - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - QUERY = 'select fullname, age from person_ages' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, - tzinfo=UTC) - RESOURCE['expirationTime'] = _millis(self.EXP_TIME) - RESOURCE['view'] = {} - RESOURCE['view']['query'] = QUERY - RESOURCE['type'] = 'VIEW' - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client=client1) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age]) - table.friendly_name = TITLE - table.description = DESCRIPTION - table.view_query = QUERY - - table.create(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'tableReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'description': DESCRIPTION, - 'friendlyName': TITLE, - 'view': {'query': QUERY}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(table, RESOURCE) - - def test_create_w_missing_output_properties(self): - # In the wild, the resource returned from 'dataset.create' sometimes - # lacks 'creationTime' / 'lastModifiedTime' - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables' % (self.PROJECT, self.DS_NAME) - RESOURCE = self._makeResource() - del RESOURCE['creationTime'] - del RESOURCE['lastModifiedTime'] - self.WHEN = None - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = self._makeOne(self.TABLE_NAME, dataset, - schema=[full_name, age]) - - table.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'tableReference': { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}]}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(table, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - self.assertFalse(table.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - self.assertTrue(table.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(table, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(table, RESOURCE) - - def test_patch_w_invalid_expiration(self): - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - with self.assertRaises(ValueError): - table.patch(expires='BOGUS') - - def test_patch_w_bound_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.patch(description=DESCRIPTION, - friendly_name=TITLE, - view_query=None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PATCH') - SENT = { - 'description': DESCRIPTION, - 'friendlyName': TITLE, - 'view': None, - } - self.assertEqual(req['data'], SENT) - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(table, RESOURCE) - - def test_patch_w_alternate_client(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _millis - from gcloud.bigquery.table import SchemaField - - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - QUERY = 'select fullname, age from person_ages' - LOCATION = 'EU' - RESOURCE = self._makeResource() - RESOURCE['view'] = {'query': QUERY} - RESOURCE['type'] = 'VIEW' - RESOURCE['location'] = LOCATION - self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, - tzinfo=UTC) - RESOURCE['expirationTime'] = _millis(self.EXP_TIME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='NULLABLE') - - table.patch(client=client2, view_query=QUERY, location=LOCATION, - expires=self.EXP_TIME, schema=[full_name, age]) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PATCH') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'view': {'query': QUERY}, - 'location': LOCATION, - 'expirationTime': _millis(self.EXP_TIME), - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'NULLABLE'}]}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(table, RESOURCE) - - def test_patch_w_schema_None(self): - # Simulate deleting schema: not sure if back-end will actually - # allow this operation, but the spec says it is optional. - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.patch(schema=None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PATCH') - SENT = {'schema': None} - self.assertEqual(req['data'], SENT) - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(table, RESOURCE) - - def test_update_w_bound_client(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - DESCRIPTION = 'DESCRIPTION' - TITLE = 'TITLE' - RESOURCE = self._makeResource() - RESOURCE['description'] = DESCRIPTION - RESOURCE['friendlyName'] = TITLE - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age]) - table.description = DESCRIPTION - table.friendly_name = TITLE - - table.update() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - SENT = { - 'tableReference': - {'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}]}, - 'description': DESCRIPTION, - 'friendlyName': TITLE, - } - self.assertEqual(req['data'], SENT) - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(table, RESOURCE) - - def test_update_w_alternate_client(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _millis - from gcloud.bigquery.table import SchemaField - - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - DEF_TABLE_EXP = 12345 - LOCATION = 'EU' - QUERY = 'select fullname, age from person_ages' - RESOURCE = self._makeResource() - RESOURCE['defaultTableExpirationMs'] = 12345 - RESOURCE['location'] = LOCATION - self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, - tzinfo=UTC) - RESOURCE['expirationTime'] = _millis(self.EXP_TIME) - RESOURCE['view'] = {'query': QUERY} - RESOURCE['type'] = 'VIEW' - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - table.default_table_expiration_ms = DEF_TABLE_EXP - table.location = LOCATION - table.expires = self.EXP_TIME - table.view_query = QUERY - - table.update(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'tableReference': - {'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME}, - 'expirationTime': _millis(self.EXP_TIME), - 'location': 'EU', - 'view': {'query': QUERY}, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(table, RESOURCE) - - def test_delete_w_bound_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.delete() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_delete_w_alternate_client(self): - PATH = 'projects/%s/datasets/%s/tables/%s' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - - table.delete(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_fetch_data_w_bound_client(self): - import datetime - from gcloud._helpers import UTC - from gcloud.bigquery.table import SchemaField - - PATH = 'projects/%s/datasets/%s/tables/%s/data' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - WHEN_TS = 1437767599.006 - WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace( - tzinfo=UTC) - WHEN_1 = WHEN + datetime.timedelta(seconds=1) - WHEN_2 = WHEN + datetime.timedelta(seconds=2) - ROWS = 1234 - TOKEN = 'TOKEN' - - def _bigquery_timestamp_float_repr(ts_float): - # Preserve microsecond precision for E+09 timestamps - return '%0.15E' % (ts_float,) - - DATA = { - 'totalRows': ROWS, - 'pageToken': TOKEN, - 'rows': [ - {'f': [ - {'v': 'Phred Phlyntstone'}, - {'v': '32'}, - {'v': _bigquery_timestamp_float_repr(WHEN_TS)}, - ]}, - {'f': [ - {'v': 'Bharney Rhubble'}, - {'v': '33'}, - {'v': _bigquery_timestamp_float_repr(WHEN_TS + 1)}, - ]}, - {'f': [ - {'v': 'Wylma Phlyntstone'}, - {'v': '29'}, - {'v': _bigquery_timestamp_float_repr(WHEN_TS + 2)}, - ]}, - {'f': [ - {'v': 'Bhettye Rhubble'}, - {'v': None}, - {'v': None}, - ]}, - ] - } - - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='NULLABLE') - joined = SchemaField('joined', 'TIMESTAMP', mode='NULLABLE') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age, joined]) - - rows, total_rows, page_token = table.fetch_data() - - self.assertEqual(len(rows), 4) - self.assertEqual(rows[0], ('Phred Phlyntstone', 32, WHEN)) - self.assertEqual(rows[1], ('Bharney Rhubble', 33, WHEN_1)) - self.assertEqual(rows[2], ('Wylma Phlyntstone', 29, WHEN_2)) - self.assertEqual(rows[3], ('Bhettye Rhubble', None, None)) - self.assertEqual(total_rows, ROWS) - self.assertEqual(page_token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_fetch_data_w_alternate_client(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/data' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - MAX = 10 - ROWS = 1234 - TOKEN = 'TOKEN' - DATA = { - 'totalRows': ROWS, - 'rows': [ - {'f': [ - {'v': 'Phred Phlyntstone'}, - {'v': '32'}, - {'v': 'true'}, - {'v': '3.1415926'}, - ]}, - {'f': [ - {'v': 'Bharney Rhubble'}, - {'v': '33'}, - {'v': 'false'}, - {'v': '1.414'}, - ]}, - {'f': [ - {'v': 'Wylma Phlyntstone'}, - {'v': '29'}, - {'v': 'true'}, - {'v': '2.71828'}, - ]}, - {'f': [ - {'v': 'Bhettye Rhubble'}, - {'v': '27'}, - {'v': None}, - {'v': None}, - ]}, - ] - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(DATA) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - voter = SchemaField('voter', 'BOOLEAN', mode='NULLABLE') - score = SchemaField('score', 'FLOAT', mode='NULLABLE') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age, voter, score]) - - rows, total_rows, page_token = table.fetch_data(client=client2, - max_results=MAX, - page_token=TOKEN) - - self.assertEqual(len(rows), 4) - self.assertEqual(rows[0], ('Phred Phlyntstone', 32, True, 3.1415926)) - self.assertEqual(rows[1], ('Bharney Rhubble', 33, False, 1.414)) - self.assertEqual(rows[2], ('Wylma Phlyntstone', 29, True, 2.71828)) - self.assertEqual(rows[3], ('Bhettye Rhubble', 27, None, None)) - self.assertEqual(total_rows, ROWS) - self.assertEqual(page_token, None) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': MAX, 'pageToken': TOKEN}) - - def test_fetch_data_w_repeated_fields(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/data' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - ROWS = 1234 - TOKEN = 'TOKEN' - DATA = { - 'totalRows': ROWS, - 'pageToken': TOKEN, - 'rows': [ - {'f': [ - {'v': ['red', 'green']}, - {'v': [{'f': [{'v': ['1', '2']}, - {'v': ['3.1415', '1.414']}]}]}, - ]}, - ] - } - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('color', 'STRING', mode='REPEATED') - index = SchemaField('index', 'INTEGER', 'REPEATED') - score = SchemaField('score', 'FLOAT', 'REPEATED') - struct = SchemaField('struct', 'RECORD', mode='REPEATED', - fields=[index, score]) - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, struct]) - - rows, total_rows, page_token = table.fetch_data() - - self.assertEqual(len(rows), 1) - self.assertEqual(rows[0][0], ['red', 'green']) - self.assertEqual(rows[0][1], [{'index': [1, 2], - 'score': [3.1415, 1.414]}]) - self.assertEqual(total_rows, ROWS) - self.assertEqual(page_token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_fetch_data_w_record_schema(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/data' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - ROWS = 1234 - TOKEN = 'TOKEN' - DATA = { - 'totalRows': ROWS, - 'pageToken': TOKEN, - 'rows': [ - {'f': [ - {'v': 'Phred Phlyntstone'}, - {'v': {'f': [{'v': '800'}, {'v': '555-1212'}, {'v': 1}]}}, - ]}, - {'f': [ - {'v': 'Bharney Rhubble'}, - {'v': {'f': [{'v': '877'}, {'v': '768-5309'}, {'v': 2}]}}, - ]}, - {'f': [ - {'v': 'Wylma Phlyntstone'}, - {'v': None}, - ]}, - ] - } - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - area_code = SchemaField('area_code', 'STRING', 'REQUIRED') - local_number = SchemaField('local_number', 'STRING', 'REQUIRED') - rank = SchemaField('rank', 'INTEGER', 'REQUIRED') - phone = SchemaField('phone', 'RECORD', mode='NULLABLE', - fields=[area_code, local_number, rank]) - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, phone]) - - rows, total_rows, page_token = table.fetch_data() - - self.assertEqual(len(rows), 3) - self.assertEqual(rows[0][0], 'Phred Phlyntstone') - self.assertEqual(rows[0][1], {'area_code': '800', - 'local_number': '555-1212', - 'rank': 1}) - self.assertEqual(rows[1][0], 'Bharney Rhubble') - self.assertEqual(rows[1][1], {'area_code': '877', - 'local_number': '768-5309', - 'rank': 2}) - self.assertEqual(rows[2][0], 'Wylma Phlyntstone') - self.assertEqual(rows[2][1], None) - self.assertEqual(total_rows, ROWS) - self.assertEqual(page_token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_insert_data_w_bound_client(self): - import datetime - from gcloud._helpers import UTC - from gcloud._helpers import _microseconds_from_datetime - from gcloud.bigquery.table import SchemaField - - WHEN_TS = 1437767599.006 - WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace( - tzinfo=UTC) - PATH = 'projects/%s/datasets/%s/tables/%s/insertAll' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - joined = SchemaField('joined', 'TIMESTAMP', mode='NULLABLE') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age, joined]) - ROWS = [ - ('Phred Phlyntstone', 32, WHEN), - ('Bharney Rhubble', 33, WHEN + datetime.timedelta(seconds=1)), - ('Wylma Phlyntstone', 29, WHEN + datetime.timedelta(seconds=2)), - ('Bhettye Rhubble', 27, None), - ] - - def _row_data(row): - joined = None - if row[2] is not None: - joined = _microseconds_from_datetime(row[2]) * 1e-6 - return {'full_name': row[0], - 'age': row[1], - 'joined': joined} - - SENT = { - 'rows': [{'json': _row_data(row)} for row in ROWS], - } - - errors = table.insert_data(ROWS) - - self.assertEqual(len(errors), 0) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['data'], SENT) - - def test_insert_data_w_alternate_client(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/insertAll' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - RESPONSE = { - 'insertErrors': [ - {'index': 1, - 'errors': [ - {'reason': 'REASON', - 'location': 'LOCATION', - 'debugInfo': 'INFO', - 'message': 'MESSAGE'} - ]}, - ]} - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESPONSE) - client2 = _Client(project=self.PROJECT, connection=conn2) - dataset = _Dataset(client1) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - voter = SchemaField('voter', 'BOOLEAN', mode='NULLABLE') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age, voter]) - ROWS = [ - ('Phred Phlyntstone', 32, True), - ('Bharney Rhubble', 33, False), - ('Wylma Phlyntstone', 29, True), - ('Bhettye Rhubble', 27, True), - ] - - def _row_data(row): - return {'full_name': row[0], 'age': row[1], 'voter': row[2]} - - SENT = { - 'skipInvalidRows': True, - 'ignoreUnknownValues': True, - 'templateSuffix': '20160303', - 'rows': [{'insertId': index, 'json': _row_data(row)} - for index, row in enumerate(ROWS)], - } - - errors = table.insert_data( - client=client2, - rows=ROWS, - row_ids=[index for index, _ in enumerate(ROWS)], - skip_invalid_rows=True, - ignore_unknown_values=True, - template_suffix='20160303', - ) - - self.assertEqual(len(errors), 1) - self.assertEqual(errors[0]['index'], 1) - self.assertEqual(len(errors[0]['errors']), 1) - self.assertEqual(errors[0]['errors'][0], - RESPONSE['insertErrors'][0]['errors'][0]) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['data'], SENT) - - def test_insert_data_w_repeated_fields(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/insertAll' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('color', 'STRING', mode='REPEATED') - index = SchemaField('index', 'INTEGER', 'REPEATED') - score = SchemaField('score', 'FLOAT', 'REPEATED') - struct = SchemaField('struct', 'RECORD', mode='REPEATED', - fields=[index, score]) - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, struct]) - ROWS = [ - (['red', 'green'], [{'index': [1, 2], 'score': [3.1415, 1.414]}]), - ] - - def _row_data(row): - return {'color': row[0], - 'struct': row[1]} - - SENT = { - 'rows': [{'json': _row_data(row)} for row in ROWS], - } - - errors = table.insert_data(ROWS) - - self.assertEqual(len(errors), 0) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['data'], SENT) - - def test_insert_data_w_record_schema(self): - from gcloud.bigquery.table import SchemaField - PATH = 'projects/%s/datasets/%s/tables/%s/insertAll' % ( - self.PROJECT, self.DS_NAME, self.TABLE_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - area_code = SchemaField('area_code', 'STRING', 'REQUIRED') - local_number = SchemaField('local_number', 'STRING', 'REQUIRED') - rank = SchemaField('rank', 'INTEGER', 'REQUIRED') - phone = SchemaField('phone', 'RECORD', mode='NULLABLE', - fields=[area_code, local_number, rank]) - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, phone]) - ROWS = [ - ('Phred Phlyntstone', {'area_code': '800', - 'local_number': '555-1212', - 'rank': 1}), - ('Bharney Rhubble', {'area_code': '877', - 'local_number': '768-5309', - 'rank': 2}), - ('Wylma Phlyntstone', None), - ] - - def _row_data(row): - return {'full_name': row[0], - 'phone': row[1]} - - SENT = { - 'rows': [{'json': _row_data(row)} for row in ROWS], - } - - errors = table.insert_data(ROWS) - - self.assertEqual(len(errors), 0) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['data'], SENT) - - def test_upload_from_file_size_failure(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - dataset = _Dataset(client) - file_obj = object() - table = self._makeOne(self.TABLE_NAME, dataset=dataset) - with self.assertRaises(ValueError): - table.upload_from_file(file_obj, 'CSV', size=None) - - def _upload_from_file_helper(self, **kw): - import csv - import datetime - from six.moves.http_client import OK - from gcloud._helpers import UTC - from gcloud._testing import _NamedTemporaryFile - from gcloud.bigquery.table import SchemaField - - WHEN_TS = 1437767599.006 - WHEN = datetime.datetime.utcfromtimestamp(WHEN_TS).replace( - tzinfo=UTC) - PATH = 'projects/%s/jobs' % (self.PROJECT,) - response = {'status': OK} - conn = _Connection( - (response, b'{}'), - ) - client = _Client(project=self.PROJECT, connection=conn) - expected_job = object() - if 'client' in kw: - kw['client']._job = expected_job - else: - client._job = expected_job - dataset = _Dataset(client) - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - joined = SchemaField('joined', 'TIMESTAMP', mode='NULLABLE') - table = self._makeOne(self.TABLE_NAME, dataset=dataset, - schema=[full_name, age, joined]) - ROWS = [ - ('Phred Phlyntstone', 32, WHEN), - ('Bharney Rhubble', 33, WHEN + datetime.timedelta(seconds=1)), - ('Wylma Phlyntstone', 29, WHEN + datetime.timedelta(seconds=2)), - ('Bhettye Rhubble', 27, None), - ] - - with _NamedTemporaryFile() as temp: - with open(temp.name, 'w') as file_obj: - writer = csv.writer(file_obj) - writer.writerow(('full_name', 'age', 'joined')) - writer.writerows(ROWS) - - with open(temp.name, 'rb') as file_obj: - BODY = file_obj.read() - explicit_size = kw.pop('_explicit_size', False) - if explicit_size: - kw['size'] = len(BODY) - job = table.upload_from_file( - file_obj, 'CSV', rewind=True, **kw) - - self.assertTrue(job is expected_job) - return conn.http._requested, PATH, BODY - - def test_upload_from_file_w_bound_client_multipart(self): - from email.parser import Parser - import json - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - - requested, PATH, BODY = self._upload_from_file_helper() - - self.assertEqual(len(requested), 1) - req = requested[0] - self.assertEqual(req['method'], 'POST') - uri = req['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/%s' % PATH) - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'multipart'}) - - parser = Parser() - ctype, boundary = [x.strip() - for x in req['headers']['content-type'].split(';')] - self.assertEqual(ctype, 'multipart/related') - self.assertTrue(boundary.startswith('boundary="==')) - self.assertTrue(boundary.endswith('=="')) - - divider = '--' + boundary[len('boundary="'):-1] - chunks = req['body'].split(divider)[1:-1] # discard prolog / epilog - self.assertEqual(len(chunks), 2) - - text_msg = parser.parsestr(chunks[0].strip()) - self.assertEqual(dict(text_msg._headers), - {'Content-Type': 'application/json', - 'MIME-Version': '1.0'}) - metadata = json.loads(text_msg._payload) - load_config = metadata['configuration']['load'] - DESTINATION_TABLE = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - } - self.assertEqual(load_config['destinationTable'], DESTINATION_TABLE) - self.assertEqual(load_config['sourceFormat'], 'CSV') - - app_msg = parser.parsestr(chunks[1].strip()) - self.assertEqual(dict(app_msg._headers), - {'Content-Type': 'application/octet-stream', - 'Content-Transfer-Encoding': 'binary', - 'MIME-Version': '1.0'}) - body = BODY.decode('ascii').rstrip() - body_lines = [line.strip() for line in body.splitlines()] - payload_lines = app_msg._payload.rstrip().splitlines() - self.assertEqual(payload_lines, body_lines) - - # pylint: disable=R0915 - def test_upload_from_file_w_explicit_client_resumable(self): - import json - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud._testing import _Monkey - from gcloud.bigquery import table as MUT - - UPLOAD_PATH = 'https://example.com/upload/test' - initial_response = {'status': OK, 'location': UPLOAD_PATH} - upload_response = {'status': OK} - conn = _Connection( - (initial_response, b'{}'), - (upload_response, b'{}'), - ) - client = _Client(project=self.PROJECT, connection=conn) - - class _UploadConfig(object): - accept = ['*/*'] - max_size = None - resumable_multipart = True - resumable_path = u'/upload/bigquery/v2/projects/{project}/jobs' - simple_multipart = True - simple_path = u'' # force resumable - - with _Monkey(MUT, _UploadConfig=_UploadConfig): - orig_requested, PATH, BODY = self._upload_from_file_helper( - allow_jagged_rows=False, - allow_quoted_newlines=False, - create_disposition='CREATE_IF_NEEDED', - encoding='utf8', - field_delimiter=',', - ignore_unknown_values=False, - max_bad_records=0, - quote_character='"', - skip_leading_rows=1, - write_disposition='WRITE_APPEND', - client=client, - _explicit_size=True) - - self.assertEqual(len(orig_requested), 0) - - requested = conn.http._requested - self.assertEqual(len(requested), 2) - req = requested[0] - self.assertEqual(req['method'], 'POST') - uri = req['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/%s' % PATH) - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'resumable'}) - - self.assertEqual(req['headers']['content-type'], 'application/json') - metadata = json.loads(req['body']) - load_config = metadata['configuration']['load'] - DESTINATION_TABLE = { - 'projectId': self.PROJECT, - 'datasetId': self.DS_NAME, - 'tableId': self.TABLE_NAME, - } - self.assertEqual(load_config['destinationTable'], DESTINATION_TABLE) - self.assertEqual(load_config['sourceFormat'], 'CSV') - self.assertEqual(load_config['allowJaggedRows'], False) - self.assertEqual(load_config['allowQuotedNewlines'], False) - self.assertEqual(load_config['createDisposition'], 'CREATE_IF_NEEDED') - self.assertEqual(load_config['encoding'], 'utf8') - self.assertEqual(load_config['fieldDelimiter'], ',') - self.assertEqual(load_config['ignoreUnknownValues'], False) - self.assertEqual(load_config['maxBadRecords'], 0) - self.assertEqual(load_config['quote'], '"') - self.assertEqual(load_config['skipLeadingRows'], 1) - self.assertEqual(load_config['writeDisposition'], 'WRITE_APPEND') - - req = requested[1] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['uri'], UPLOAD_PATH) - headers = req['headers'] - length = len(BODY) - self.assertEqual(headers['Content-Type'], 'application/octet-stream') - self.assertEqual(headers['Content-Range'], - 'bytes 0-%d/%d' % (length - 1, length)) - self.assertEqual(headers['content-length'], '%d' % (length,)) - self.assertEqual(req['body'], BODY) - - -class Test_parse_schema_resource(unittest2.TestCase, _SchemaBase): - - def _callFUT(self, resource): - from gcloud.bigquery.table import _parse_schema_resource - return _parse_schema_resource(resource) - - def _makeResource(self): - return { - 'schema': {'fields': [ - {'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'}, - {'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'}, - ]}, - } - - def test__parse_schema_resource_defaults(self): - RESOURCE = self._makeResource() - schema = self._callFUT(RESOURCE['schema']) - self._verifySchema(schema, RESOURCE) - - def test__parse_schema_resource_subfields(self): - RESOURCE = self._makeResource() - RESOURCE['schema']['fields'].append( - {'name': 'phone', - 'type': 'RECORD', - 'mode': 'REPEATABLE', - 'fields': [{'name': 'type', - 'type': 'STRING', - 'mode': 'REQUIRED'}, - {'name': 'number', - 'type': 'STRING', - 'mode': 'REQUIRED'}]}) - schema = self._callFUT(RESOURCE['schema']) - self._verifySchema(schema, RESOURCE) - - def test__parse_schema_resource_fields_without_mode(self): - RESOURCE = self._makeResource() - RESOURCE['schema']['fields'].append( - {'name': 'phone', - 'type': 'STRING'}) - - schema = self._callFUT(RESOURCE['schema']) - self._verifySchema(schema, RESOURCE) - - -class Test_build_schema_resource(unittest2.TestCase, _SchemaBase): - - def _callFUT(self, resource): - from gcloud.bigquery.table import _build_schema_resource - return _build_schema_resource(resource) - - def test_defaults(self): - from gcloud.bigquery.table import SchemaField - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - resource = self._callFUT([full_name, age]) - self.assertEqual(len(resource), 2) - self.assertEqual(resource[0], - {'name': 'full_name', - 'type': 'STRING', - 'mode': 'REQUIRED'}) - self.assertEqual(resource[1], - {'name': 'age', - 'type': 'INTEGER', - 'mode': 'REQUIRED'}) - - def test_w_description(self): - from gcloud.bigquery.table import SchemaField - DESCRIPTION = 'DESCRIPTION' - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED', - description=DESCRIPTION) - age = SchemaField('age', 'INTEGER', mode='REQUIRED') - resource = self._callFUT([full_name, age]) - self.assertEqual(len(resource), 2) - self.assertEqual(resource[0], - {'name': 'full_name', - 'type': 'STRING', - 'mode': 'REQUIRED', - 'description': DESCRIPTION}) - self.assertEqual(resource[1], - {'name': 'age', - 'type': 'INTEGER', - 'mode': 'REQUIRED'}) - - def test_w_subfields(self): - from gcloud.bigquery.table import SchemaField - full_name = SchemaField('full_name', 'STRING', mode='REQUIRED') - ph_type = SchemaField('type', 'STRING', 'REQUIRED') - ph_num = SchemaField('number', 'STRING', 'REQUIRED') - phone = SchemaField('phone', 'RECORD', mode='REPEATABLE', - fields=[ph_type, ph_num]) - resource = self._callFUT([full_name, phone]) - self.assertEqual(len(resource), 2) - self.assertEqual(resource[0], - {'name': 'full_name', - 'type': 'STRING', - 'mode': 'REQUIRED'}) - self.assertEqual(resource[1], - {'name': 'phone', - 'type': 'RECORD', - 'mode': 'REPEATABLE', - 'fields': [{'name': 'type', - 'type': 'STRING', - 'mode': 'REQUIRED'}, - {'name': 'number', - 'type': 'STRING', - 'mode': 'REQUIRED'}]}) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - def job_from_resource(self, resource): # pylint: disable=W0613 - return self._job - - -class _Dataset(object): - - def __init__(self, client, name=TestTable.DS_NAME): - self._client = client - self.name = name - - @property - def path(self): - return '/projects/%s/datasets/%s' % ( - self._client.project, self.name) - - @property - def project(self): - return self._client.project - - -class _Responder(object): - - def __init__(self, *responses): - self._responses = responses[:] - self._requested = [] - - def _respond(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _HTTP(_Responder): - - connections = {} # For google-apitools debugging. - - def request(self, uri, method, headers, body, **kw): - if hasattr(body, 'read'): - body = body.read() - return self._respond(uri=uri, method=method, headers=headers, - body=body, **kw) - - -class _Connection(_Responder): - - API_BASE_URL = 'http://example.com' - USER_AGENT = 'testing 1.2.3' - - def __init__(self, *responses): - super(_Connection, self).__init__(*responses) - self.http = _HTTP(*responses) - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response - - def build_api_url(self, path, query_params=None, - api_base_url=API_BASE_URL): - from six.moves.urllib.parse import urlencode - from six.moves.urllib.parse import urlsplit - from six.moves.urllib.parse import urlunsplit - # Mimic the build_api_url interface. - qs = urlencode(query_params or {}) - scheme, netloc, _, _, _ = urlsplit(api_base_url) - return urlunsplit((scheme, netloc, path, qs, '')) diff --git a/gcloud/bigtable/__init__.py b/gcloud/bigtable/__init__.py deleted file mode 100644 index 2533a0c69c5b..000000000000 --- a/gcloud/bigtable/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable API package.""" - - -from gcloud.bigtable.client import Client diff --git a/gcloud/bigtable/_generated/__init__.py b/gcloud/bigtable/_generated/__init__.py deleted file mode 100644 index ad35adcf05ae..000000000000 --- a/gcloud/bigtable/_generated/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generated protobuf modules for Google Cloud Bigtable API.""" diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_data.proto b/gcloud/bigtable/_generated/_bigtable_cluster_data.proto deleted file mode 100644 index c0f8a93f2862..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_data.proto +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_service.proto b/gcloud/bigtable/_generated/_bigtable_cluster_service.proto deleted file mode 100644 index e65bca4de740..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_service.proto +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "null" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto deleted file mode 100644 index 3291969375c5..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} diff --git a/gcloud/bigtable/_generated/_bigtable_data.proto b/gcloud/bigtable/_generated/_bigtable_data.proto deleted file mode 100644 index 290eb9116ad0..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_data.proto +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_service.proto b/gcloud/bigtable/_generated/_bigtable_service.proto deleted file mode 100644 index f1a83d351b63..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_service.proto +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/v1/bigtable_data.proto"; -import "google/bigtable/v1/bigtable_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" }; - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_service_messages.proto deleted file mode 100644 index 1479fb65eebf..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_service_messages.proto +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/bigtable/v1/bigtable_data.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - - // A set of rows from which to read. Entries need not be in order, and will - // be deduplicated before reading. - // The total serialized size of the set must not exceed 1MB. - RowSet row_set = 8; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // At least one mutation must be specified. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys/mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The results for each Entry from the request, presented in the order - // in which the entries were originally given. - repeated google.rpc.Status statuses = 1; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/gcloud/bigtable/_generated/_bigtable_table_data.proto b/gcloud/bigtable/_generated/_bigtable_table_data.proto deleted file mode 100644 index f81c878f03b5..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_data.proto +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_table_service.proto b/gcloud/bigtable/_generated/_bigtable_table_service.proto deleted file mode 100644 index 417409c4093b..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_service.proto +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto deleted file mode 100644 index 73f2a8cfbf2b..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} diff --git a/gcloud/bigtable/_generated/_operations.proto b/gcloud/bigtable/_generated/_operations.proto deleted file mode 100644 index a358d0a38787..000000000000 --- a/gcloud/bigtable/_generated/_operations.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.longrunning; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "OperationsProto"; -option java_package = "com.google.longrunning"; - - -// Manages long-running operations with an API service. -// -// When an API method normally takes long time to complete, it can be designed -// to return [Operation][google.longrunning.Operation] to the client, and the client can use this -// interface to receive the real response asynchronously by polling the -// operation resource, or using `google.watcher.v1.Watcher` interface to watch -// the response, or pass the operation resource to another API (such as Google -// Cloud Pub/Sub API) to receive the response. Any API service that returns -// long-running operations should implement the `Operations` interface so -// developers can have a consistent client experience. -service Operations { - // Gets the latest state of a long-running operation. Clients may use this - // method to poll the operation result at intervals as recommended by the API - // service. - rpc GetOperation(GetOperationRequest) returns (Operation) { - option (google.api.http) = { get: "/v1/{name=operations/**}" }; - } - - // Lists operations that match the specified filter in the request. If the - // server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. - rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { - option (google.api.http) = { get: "/v1/{name=operations}" }; - } - - // Starts asynchronous cancellation on a long-running operation. The server - // makes a best effort to cancel the operation, but success is not - // guaranteed. If the server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. Clients may use - // [Operations.GetOperation] or other methods to check whether the - // cancellation succeeded or the operation completed despite cancellation. - rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; - } - - // Deletes a long-running operation. It indicates the client is no longer - // interested in the operation result. It does not cancel the operation. - rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=operations/**}" }; - } -} - -// This resource represents a long-running operation that is the result of a -// network API call. -message Operation { - // The name of the operation resource, which is only unique within the same - // service that originally returns it. - string name = 1; - - // Some service-specific metadata associated with the operation. It typically - // contains progress information and common metadata such as create time. - // Some services may not provide such metadata. Any method that returns a - // long-running operation should document the metadata type, if any. - google.protobuf.Any metadata = 2; - - // If the value is false, it means the operation is still in progress. - // If true, the operation is completed and the `result` is available. - bool done = 3; - - oneof result { - // The error result of the operation in case of failure. - google.rpc.Status error = 4; - - // The normal response of the operation in case of success. If the original - // method returns no data on success, such as `Delete`, the response will be - // `google.protobuf.Empty`. If the original method is standard - // `Get`/`Create`/`Update`, the response should be the resource. For other - // methods, the response should have the type `XxxResponse`, where `Xxx` - // is the original method name. For example, if the original method name - // is `TakeSnapshot()`, the inferred response type will be - // `TakeSnapshotResponse`. - google.protobuf.Any response = 5; - } -} - -// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. -message GetOperationRequest { - // The name of the operation resource. - string name = 1; -} - -// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsRequest { - // The name of the operation collection. - string name = 4; - - // The standard List filter. - string filter = 1; - - // The standard List page size. - int32 page_size = 2; - - // The standard List page token. - string page_token = 3; -} - -// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsResponse { - // A list of operations that match the specified filter in the request. - repeated Operation operations = 1; - - // The standard List next-page token. - string next_page_token = 2; -} - -// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. -message CancelOperationRequest { - // The name of the operation resource to be cancelled. - string name = 1; -} - -// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. -message DeleteOperationRequest { - // The name of the operation resource to be deleted. - string name = 1; -} diff --git a/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py b/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py deleted file mode 100644 index 4106aabd082d..000000000000 --- a/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py +++ /dev/null @@ -1,221 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto', - package='google.bigtable.admin.cluster.v1', - syntax='proto3', - serialized_pb=b'\n""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ListZones(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def GetCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ListClusters(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CreateCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UpdateCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UndeleteCluster(self, request, context): - raise NotImplementedError() - -class BetaBigtableClusterServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ListZones(self, request, timeout): - raise NotImplementedError() - ListZones.future = None - @abc.abstractmethod - def GetCluster(self, request, timeout): - raise NotImplementedError() - GetCluster.future = None - @abc.abstractmethod - def ListClusters(self, request, timeout): - raise NotImplementedError() - ListClusters.future = None - @abc.abstractmethod - def CreateCluster(self, request, timeout): - raise NotImplementedError() - CreateCluster.future = None - @abc.abstractmethod - def UpdateCluster(self, request, timeout): - raise NotImplementedError() - UpdateCluster.future = None - @abc.abstractmethod - def DeleteCluster(self, request, timeout): - raise NotImplementedError() - DeleteCluster.future = None - @abc.abstractmethod - def UndeleteCluster(self, request, timeout): - raise NotImplementedError() - UndeleteCluster.future = None - -def beta_create_BigtableClusterService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.longrunning.operations_pb2 - request_deserializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - } - response_serializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): face_utilities.unary_unary_inline(servicer.ListZones), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): face_utilities.unary_unary_inline(servicer.UndeleteCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableClusterService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.longrunning.operations_pb2 - request_serializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - } - cardinalities = { - 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, - 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, - 'GetCluster': cardinality.Cardinality.UNARY_UNARY, - 'ListClusters': cardinality.Cardinality.UNARY_UNARY, - 'ListZones': cardinality.Cardinality.UNARY_UNARY, - 'UndeleteCluster': cardinality.Cardinality.UNARY_UNARY, - 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.cluster.v1.BigtableClusterService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_data_pb2.py b/gcloud/bigtable/_generated/bigtable_data_pb2.py deleted file mode 100644 index 47eb8756c7d9..000000000000 --- a/gcloud/bigtable/_generated/bigtable_data_pb2.py +++ /dev/null @@ -1,1226 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_data.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_data.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n&google/bigtable/v1/bigtable_data.proto\x12\x12google.bigtable.v1\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v1.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v1.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v1.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\".\n\x08RowRange\x12\x11\n\tstart_key\x18\x02 \x01(\x0c\x12\x0f\n\x07\x65nd_key\x18\x03 \x01(\x0c\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.RowRange\"\xd6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12#\n\x19start_qualifier_inclusive\x18\x02 \x01(\x0cH\x00\x12#\n\x19start_qualifier_exclusive\x18\x03 \x01(\x0cH\x00\x12!\n\x17\x65nd_qualifier_inclusive\x18\x04 \x01(\x0cH\x01\x12!\n\x17\x65nd_qualifier_exclusive\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\xa8\x01\n\nValueRange\x12\x1f\n\x15start_value_inclusive\x18\x01 \x01(\x0cH\x00\x12\x1f\n\x15start_value_exclusive\x18\x02 \x01(\x0cH\x00\x12\x1d\n\x13\x65nd_value_inclusive\x18\x03 \x01(\x0cH\x01\x12\x1d\n\x13\x65nd_value_exclusive\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v1.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v1.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v1.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v1.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v1.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v1.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v1.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v1.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v1.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v1.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v1.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB-\n\x16\x63om.google.bigtable.v1B\x11\x42igtableDataProtoP\x01\x62\x06proto3' -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_ROW = _descriptor.Descriptor( - name='Row', - full_name='google.bigtable.v1.Row', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.v1.Row.key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='families', full_name='google.bigtable.v1.Row.families', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=62, - serialized_end=126, -) - - -_FAMILY = _descriptor.Descriptor( - name='Family', - full_name='google.bigtable.v1.Family', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.v1.Family.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='columns', full_name='google.bigtable.v1.Family.columns', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=128, - serialized_end=195, -) - - -_COLUMN = _descriptor.Descriptor( - name='Column', - full_name='google.bigtable.v1.Column', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='qualifier', full_name='google.bigtable.v1.Column.qualifier', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells', full_name='google.bigtable.v1.Column.cells', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=197, - serialized_end=265, -) - - -_CELL = _descriptor.Descriptor( - name='Cell', - full_name='google.bigtable.v1.Cell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v1.Cell.timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v1.Cell.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.v1.Cell.labels', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=267, - serialized_end=330, -) - - -_ROWRANGE = _descriptor.Descriptor( - name='RowRange', - full_name='google.bigtable.v1.RowRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_key', full_name='google.bigtable.v1.RowRange.start_key', index=0, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_key', full_name='google.bigtable.v1.RowRange.end_key', index=1, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=332, - serialized_end=378, -) - - -_ROWSET = _descriptor.Descriptor( - name='RowSet', - full_name='google.bigtable.v1.RowSet', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_keys', full_name='google.bigtable.v1.RowSet.row_keys', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_ranges', full_name='google.bigtable.v1.RowSet.row_ranges', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=380, - serialized_end=456, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name='ColumnRange', - full_name='google.bigtable.v1.ColumnRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.ColumnRange.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_qualifier_inclusive', full_name='google.bigtable.v1.ColumnRange.start_qualifier_inclusive', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_qualifier_exclusive', full_name='google.bigtable.v1.ColumnRange.start_qualifier_exclusive', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_qualifier_inclusive', full_name='google.bigtable.v1.ColumnRange.end_qualifier_inclusive', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_qualifier_exclusive', full_name='google.bigtable.v1.ColumnRange.end_qualifier_exclusive', index=4, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_qualifier', full_name='google.bigtable.v1.ColumnRange.start_qualifier', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_qualifier', full_name='google.bigtable.v1.ColumnRange.end_qualifier', - index=1, containing_type=None, fields=[]), - ], - serialized_start=459, - serialized_end=673, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name='TimestampRange', - full_name='google.bigtable.v1.TimestampRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_timestamp_micros', full_name='google.bigtable.v1.TimestampRange.start_timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_timestamp_micros', full_name='google.bigtable.v1.TimestampRange.end_timestamp_micros', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=675, - serialized_end=753, -) - - -_VALUERANGE = _descriptor.Descriptor( - name='ValueRange', - full_name='google.bigtable.v1.ValueRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_value_inclusive', full_name='google.bigtable.v1.ValueRange.start_value_inclusive', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_value_exclusive', full_name='google.bigtable.v1.ValueRange.start_value_exclusive', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_value_inclusive', full_name='google.bigtable.v1.ValueRange.end_value_inclusive', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_value_exclusive', full_name='google.bigtable.v1.ValueRange.end_value_exclusive', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_value', full_name='google.bigtable.v1.ValueRange.start_value', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_value', full_name='google.bigtable.v1.ValueRange.end_value', - index=1, containing_type=None, fields=[]), - ], - serialized_start=756, - serialized_end=924, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name='Chain', - full_name='google.bigtable.v1.RowFilter.Chain', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v1.RowFilter.Chain.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1743, - serialized_end=1798, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name='Interleave', - full_name='google.bigtable.v1.RowFilter.Interleave', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v1.RowFilter.Interleave.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1800, - serialized_end=1860, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name='Condition', - full_name='google.bigtable.v1.RowFilter.Condition', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v1.RowFilter.Condition.predicate_filter', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='true_filter', full_name='google.bigtable.v1.RowFilter.Condition.true_filter', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='false_filter', full_name='google.bigtable.v1.RowFilter.Condition.false_filter', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1863, - serialized_end=2036, -) - -_ROWFILTER = _descriptor.Descriptor( - name='RowFilter', - full_name='google.bigtable.v1.RowFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chain', full_name='google.bigtable.v1.RowFilter.chain', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='interleave', full_name='google.bigtable.v1.RowFilter.interleave', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='condition', full_name='google.bigtable.v1.RowFilter.condition', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sink', full_name='google.bigtable.v1.RowFilter.sink', index=3, - number=16, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pass_all_filter', full_name='google.bigtable.v1.RowFilter.pass_all_filter', index=4, - number=17, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='block_all_filter', full_name='google.bigtable.v1.RowFilter.block_all_filter', index=5, - number=18, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key_regex_filter', full_name='google.bigtable.v1.RowFilter.row_key_regex_filter', index=6, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_sample_filter', full_name='google.bigtable.v1.RowFilter.row_sample_filter', index=7, - number=14, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='family_name_regex_filter', full_name='google.bigtable.v1.RowFilter.family_name_regex_filter', index=8, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier_regex_filter', full_name='google.bigtable.v1.RowFilter.column_qualifier_regex_filter', index=9, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_range_filter', full_name='google.bigtable.v1.RowFilter.column_range_filter', index=10, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_range_filter', full_name='google.bigtable.v1.RowFilter.timestamp_range_filter', index=11, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value_regex_filter', full_name='google.bigtable.v1.RowFilter.value_regex_filter', index=12, - number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value_range_filter', full_name='google.bigtable.v1.RowFilter.value_range_filter', index=13, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_row_offset_filter', full_name='google.bigtable.v1.RowFilter.cells_per_row_offset_filter', index=14, - number=10, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_row_limit_filter', full_name='google.bigtable.v1.RowFilter.cells_per_row_limit_filter', index=15, - number=11, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_column_limit_filter', full_name='google.bigtable.v1.RowFilter.cells_per_column_limit_filter', index=16, - number=12, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='strip_value_transformer', full_name='google.bigtable.v1.RowFilter.strip_value_transformer', index=17, - number=13, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='apply_label_transformer', full_name='google.bigtable.v1.RowFilter.apply_label_transformer', index=18, - number=19, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='filter', full_name='google.bigtable.v1.RowFilter.filter', - index=0, containing_type=None, fields=[]), - ], - serialized_start=927, - serialized_end=2046, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name='SetCell', - full_name='google.bigtable.v1.Mutation.SetCell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.SetCell.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.Mutation.SetCell.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v1.Mutation.SetCell.timestamp_micros', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v1.Mutation.SetCell.value', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2344, - serialized_end=2441, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name='DeleteFromColumn', - full_name='google.bigtable.v1.Mutation.DeleteFromColumn', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='time_range', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.time_range', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2443, - serialized_end=2564, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name='DeleteFromFamily', - full_name='google.bigtable.v1.Mutation.DeleteFromFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.DeleteFromFamily.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2566, - serialized_end=2605, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name='DeleteFromRow', - full_name='google.bigtable.v1.Mutation.DeleteFromRow', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2607, - serialized_end=2622, -) - -_MUTATION = _descriptor.Descriptor( - name='Mutation', - full_name='google.bigtable.v1.Mutation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='set_cell', full_name='google.bigtable.v1.Mutation.set_cell', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_column', full_name='google.bigtable.v1.Mutation.delete_from_column', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_family', full_name='google.bigtable.v1.Mutation.delete_from_family', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_row', full_name='google.bigtable.v1.Mutation.delete_from_row', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='mutation', full_name='google.bigtable.v1.Mutation.mutation', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2049, - serialized_end=2634, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name='ReadModifyWriteRule', - full_name='google.bigtable.v1.ReadModifyWriteRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.ReadModifyWriteRule.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.ReadModifyWriteRule.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='append_value', full_name='google.bigtable.v1.ReadModifyWriteRule.append_value', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='increment_amount', full_name='google.bigtable.v1.ReadModifyWriteRule.increment_amount', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.v1.ReadModifyWriteRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2637, - serialized_end=2765, -) - -_ROW.fields_by_name['families'].message_type = _FAMILY -_FAMILY.fields_by_name['columns'].message_type = _COLUMN -_COLUMN.fields_by_name['cells'].message_type = _CELL -_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_inclusive']) -_COLUMNRANGE.fields_by_name['start_qualifier_inclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_exclusive']) -_COLUMNRANGE.fields_by_name['start_qualifier_exclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_inclusive']) -_COLUMNRANGE.fields_by_name['end_qualifier_inclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_exclusive']) -_COLUMNRANGE.fields_by_name['end_qualifier_exclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_inclusive']) -_VALUERANGE.fields_by_name['start_value_inclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_exclusive']) -_VALUERANGE.fields_by_name['start_value_exclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_inclusive']) -_VALUERANGE.fields_by_name['end_value_inclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_exclusive']) -_VALUERANGE.fields_by_name['end_value_exclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['chain']) -_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['interleave']) -_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['condition']) -_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['sink']) -_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['pass_all_filter']) -_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['block_all_filter']) -_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_key_regex_filter']) -_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_sample_filter']) -_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['family_name_regex_filter']) -_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) -_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_range_filter']) -_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['timestamp_range_filter']) -_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_regex_filter']) -_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_range_filter']) -_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) -_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['strip_value_transformer']) -_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['apply_label_transformer']) -_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['set_cell']) -_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_column']) -_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_family']) -_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_row']) -_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['append_value']) -_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['increment_amount']) -_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Row'] = _ROW -DESCRIPTOR.message_types_by_name['Family'] = _FAMILY -DESCRIPTOR.message_types_by_name['Column'] = _COLUMN -DESCRIPTOR.message_types_by_name['Cell'] = _CELL -DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE -DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET -DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE -DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER -DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION -DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE - -Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( - DESCRIPTOR = _ROW, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Row) - )) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( - DESCRIPTOR = _FAMILY, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Family) - )) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( - DESCRIPTOR = _COLUMN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Column) - )) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( - DESCRIPTOR = _CELL, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Cell) - )) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( - DESCRIPTOR = _ROWRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowRange) - )) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( - DESCRIPTOR = _ROWSET, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowSet) - )) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( - DESCRIPTOR = _COLUMNRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ColumnRange) - )) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( - DESCRIPTOR = _TIMESTAMPRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.TimestampRange) - )) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( - DESCRIPTOR = _VALUERANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ValueRange) - )) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( - - Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CHAIN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Chain) - )) - , - - Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_INTERLEAVE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Interleave) - )) - , - - Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CONDITION, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Condition) - )) - , - DESCRIPTOR = _ROWFILTER, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter) - )) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( - - SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_SETCELL, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.SetCell) - )) - , - - DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromColumn) - )) - , - - DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromFamily) - )) - , - - DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMROW, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromRow) - )) - , - DESCRIPTOR = _MUTATION, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation) - )) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITERULE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRule) - )) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\021BigtableDataProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py b/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py deleted file mode 100644 index 38a478aded74..000000000000 --- a/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py +++ /dev/null @@ -1,678 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_service_messages.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_service_messages.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n2google/bigtable/v1/bigtable_service_messages.proto\x12\x12google.bigtable.v1\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x17google/rpc/status.proto\"\x8b\x02\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x11\n\x07row_key\x18\x02 \x01(\x0cH\x00\x12\x31\n\trow_range\x18\x03 \x01(\x0b\x32\x1c.google.bigtable.v1.RowRangeH\x00\x12-\n\x07row_set\x18\x08 \x01(\x0b\x32\x1a.google.bigtable.v1.RowSetH\x00\x12-\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x1e\n\x16\x61llow_row_interleaving\x18\x06 \x01(\x08\x12\x16\n\x0enum_rows_limit\x18\x07 \x01(\x03\x42\x08\n\x06target\"\xd0\x01\n\x10ReadRowsResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12:\n\x06\x63hunks\x18\x02 \x03(\x0b\x32*.google.bigtable.v1.ReadRowsResponse.Chunk\x1ao\n\x05\x43hunk\x12\x32\n\x0crow_contents\x18\x01 \x01(\x0b\x32\x1a.google.bigtable.v1.FamilyH\x00\x12\x13\n\treset_row\x18\x02 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\x03 \x01(\x08H\x00\x42\x07\n\x05\x63hunk\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v1.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\":\n\x12MutateRowsResponse\x12$\n\x08statuses\x18\x01 \x03(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v1.ReadModifyWriteRuleB8\n\x16\x63om.google.bigtable.v1B\x1c\x42igtableServiceMessagesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_READROWSREQUEST = _descriptor.Descriptor( - name='ReadRowsRequest', - full_name='google.bigtable.v1.ReadRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.ReadRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadRowsRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_range', full_name='google.bigtable.v1.ReadRowsRequest.row_range', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_set', full_name='google.bigtable.v1.ReadRowsRequest.row_set', index=3, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filter', full_name='google.bigtable.v1.ReadRowsRequest.filter', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='allow_row_interleaving', full_name='google.bigtable.v1.ReadRowsRequest.allow_row_interleaving', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_rows_limit', full_name='google.bigtable.v1.ReadRowsRequest.num_rows_limit', index=6, - number=7, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='target', full_name='google.bigtable.v1.ReadRowsRequest.target', - index=0, containing_type=None, fields=[]), - ], - serialized_start=140, - serialized_end=407, -) - - -_READROWSRESPONSE_CHUNK = _descriptor.Descriptor( - name='Chunk', - full_name='google.bigtable.v1.ReadRowsResponse.Chunk', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_contents', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.row_contents', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reset_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.reset_row', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='commit_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.commit_row', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='chunk', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.chunk', - index=0, containing_type=None, fields=[]), - ], - serialized_start=507, - serialized_end=618, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name='ReadRowsResponse', - full_name='google.bigtable.v1.ReadRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadRowsResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chunks', full_name='google.bigtable.v1.ReadRowsResponse.chunks', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_READROWSRESPONSE_CHUNK, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=410, - serialized_end=618, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name='SampleRowKeysRequest', - full_name='google.bigtable.v1.SampleRowKeysRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.SampleRowKeysRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=620, - serialized_end=662, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name='SampleRowKeysResponse', - full_name='google.bigtable.v1.SampleRowKeysResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.SampleRowKeysResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset_bytes', full_name='google.bigtable.v1.SampleRowKeysResponse.offset_bytes', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=664, - serialized_end=726, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name='MutateRowRequest', - full_name='google.bigtable.v1.MutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.MutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.MutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v1.MutateRowRequest.mutations', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=728, - serialized_end=832, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v1.MutateRowsRequest.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.MutateRowsRequest.Entry.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v1.MutateRowsRequest.Entry.mutations', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=938, - serialized_end=1011, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name='MutateRowsRequest', - full_name='google.bigtable.v1.MutateRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.MutateRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v1.MutateRowsRequest.entries', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSREQUEST_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=835, - serialized_end=1011, -) - - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name='MutateRowsResponse', - full_name='google.bigtable.v1.MutateRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='statuses', full_name='google.bigtable.v1.MutateRowsResponse.statuses', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1013, - serialized_end=1071, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name='CheckAndMutateRowRequest', - full_name='google.bigtable.v1.CheckAndMutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.CheckAndMutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.CheckAndMutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v1.CheckAndMutateRowRequest.predicate_filter', index=2, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='true_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.true_mutations', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='false_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.false_mutations', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1074, - serialized_end=1303, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name='CheckAndMutateRowResponse', - full_name='google.bigtable.v1.CheckAndMutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_matched', full_name='google.bigtable.v1.CheckAndMutateRowResponse.predicate_matched', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1305, - serialized_end=1359, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name='ReadModifyWriteRowRequest', - full_name='google.bigtable.v1.ReadModifyWriteRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.rules', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1361, - serialized_end=1481, -) - -_READROWSREQUEST.fields_by_name['row_range'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWRANGE -_READROWSREQUEST.fields_by_name['row_set'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWSET -_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_key']) -_READROWSREQUEST.fields_by_name['row_key'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_range']) -_READROWSREQUEST.fields_by_name['row_range'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_set']) -_READROWSREQUEST.fields_by_name['row_set'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._FAMILY -_READROWSRESPONSE_CHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['row_contents']) -_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['reset_row']) -_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['commit_row']) -_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CHUNK -_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE.fields_by_name['statuses'].message_type = google_dot_rpc_dot_status__pb2._STATUS -_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._READMODIFYWRITERULE -DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( - DESCRIPTOR = _READROWSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsRequest) - )) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( - - Chunk = _reflection.GeneratedProtocolMessageType('Chunk', (_message.Message,), dict( - DESCRIPTOR = _READROWSRESPONSE_CHUNK, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse.Chunk) - )) - , - DESCRIPTOR = _READROWSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse) - )) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.Chunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysRequest) - )) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysResponse) - )) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowRequest) - )) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest) - )) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsResponse) - )) -_sym_db.RegisterMessage(MutateRowsResponse) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowRequest) - )) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowResponse) - )) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRowRequest) - )) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\034BigtableServiceMessagesProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_service_pb2.py b/gcloud/bigtable/_generated/bigtable_service_pb2.py deleted file mode 100644 index 901ffb6103c5..000000000000 --- a/gcloud/bigtable/_generated/bigtable_service_pb2.py +++ /dev/null @@ -1,167 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2 -from gcloud.bigtable._generated import bigtable_service_messages_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_service.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n)google/bigtable/v1/bigtable_service.proto\x12\x12google.bigtable.v1\x1a\x1cgoogle/api/annotations.proto\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x32google/bigtable/v1/bigtable_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\xdd\x08\n\x0f\x42igtableService\x12\xa5\x01\n\x08ReadRows\x12#.google.bigtable.v1.ReadRowsRequest\x1a$.google.bigtable.v1.ReadRowsResponse\"L\x82\xd3\xe4\x93\x02\x46\"A/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read:\x01*0\x01\x12\xb7\x01\n\rSampleRowKeys\x12(.google.bigtable.v1.SampleRowKeysRequest\x1a).google.bigtable.v1.SampleRowKeysResponse\"O\x82\xd3\xe4\x93\x02I\x12G/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys0\x01\x12\xa3\x01\n\tMutateRow\x12$.google.bigtable.v1.MutateRowRequest\x1a\x16.google.protobuf.Empty\"X\x82\xd3\xe4\x93\x02R\"M/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate:\x01*\x12\xaa\x01\n\nMutateRows\x12%.google.bigtable.v1.MutateRowsRequest\x1a&.google.bigtable.v1.MutateRowsResponse\"M\x82\xd3\xe4\x93\x02G\"B/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows:\x01*\x12\xd2\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v1.CheckAndMutateRowRequest\x1a-.google.bigtable.v1.CheckAndMutateRowResponse\"`\x82\xd3\xe4\x93\x02Z\"U/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate:\x01*\x12\xbf\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v1.ReadModifyWriteRowRequest\x1a\x17.google.bigtable.v1.Row\"a\x82\xd3\xe4\x93\x02[\"V/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite:\x01*B4\n\x16\x63om.google.bigtable.v1B\x15\x42igtableServicesProtoP\x01\x88\x01\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\025BigtableServicesProtoP\001\210\001\001') -import abc -from grpc.beta import implementations as beta_implementations -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -class BetaBigtableServiceServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ReadRows(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def SampleRowKeys(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def MutateRow(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def MutateRows(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CheckAndMutateRow(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ReadModifyWriteRow(self, request, context): - raise NotImplementedError() - -class BetaBigtableServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ReadRows(self, request, timeout): - raise NotImplementedError() - @abc.abstractmethod - def SampleRowKeys(self, request, timeout): - raise NotImplementedError() - @abc.abstractmethod - def MutateRow(self, request, timeout): - raise NotImplementedError() - MutateRow.future = None - @abc.abstractmethod - def MutateRows(self, request, timeout): - raise NotImplementedError() - MutateRows.future = None - @abc.abstractmethod - def CheckAndMutateRow(self, request, timeout): - raise NotImplementedError() - CheckAndMutateRow.future = None - @abc.abstractmethod - def ReadModifyWriteRow(self, request, timeout): - raise NotImplementedError() - ReadModifyWriteRow.future = None - -def beta_create_BigtableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_data_pb2 - request_deserializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.FromString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.FromString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.FromString, - } - response_serializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.SerializeToString, - } - method_implementations = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), - ('google.bigtable.v1.BigtableService', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), - ('google.bigtable.v1.BigtableService', 'MutateRows'): face_utilities.unary_unary_inline(servicer.MutateRows), - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), - ('google.bigtable.v1.BigtableService', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_data_pb2 - request_serializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.SerializeToString, - } - response_deserializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.FromString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.FromString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.FromString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.FromString, - } - cardinalities = { - 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRows': cardinality.Cardinality.UNARY_UNARY, - 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, - 'ReadRows': cardinality.Cardinality.UNARY_STREAM, - 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.v1.BigtableService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_table_data_pb2.py b/gcloud/bigtable/_generated/bigtable_table_data_pb2.py deleted file mode 100644 index fd47b567b3c5..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_data_pb2.py +++ /dev/null @@ -1,377 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_data.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_data.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\n8google/bigtable/admin/table/v1/bigtable_table_data.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xfd\x02\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x02 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12R\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x39.google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry\x12O\n\x0bgranularity\x18\x04 \x01(\x0e\x32:.google.bigtable.admin.table.v1.Table.TimestampGranularity\x1a\x63\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily:\x02\x38\x01\"\"\n\x14TimestampGranularity\x12\n\n\x06MILLIS\x10\x00\"l\n\x0c\x43olumnFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rgc_expression\x18\x02 \x01(\t\x12\x37\n\x07gc_rule\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\"\xed\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12K\n\x0cintersection\x18\x03 \x01(\x0b\x32\x33.google.bigtable.admin.table.v1.GcRule.IntersectionH\x00\x12=\n\x05union\x18\x04 \x01(\x0b\x32,.google.bigtable.admin.table.v1.GcRule.UnionH\x00\x1a\x45\n\x0cIntersection\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\x1a>\n\x05Union\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRuleB\x06\n\x04ruleB>\n\"com.google.bigtable.admin.table.v1B\x16\x42igtableTableDataProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name='TimestampGranularity', - full_name='google.bigtable.admin.table.v1.Table.TimestampGranularity', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MILLIS', index=0, number=0, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=509, - serialized_end=543, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name='ColumnFamiliesEntry', - full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=408, - serialized_end=507, -) - -_TABLE = _descriptor.Descriptor( - name='Table', - full_name='google.bigtable.admin.table.v1.Table', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.Table.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='current_operation', full_name='google.bigtable.admin.table.v1.Table.current_operation', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_families', full_name='google.bigtable.admin.table.v1.Table.column_families', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='granularity', full_name='google.bigtable.admin.table.v1.Table.granularity', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=162, - serialized_end=543, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name='ColumnFamily', - full_name='google.bigtable.admin.table.v1.ColumnFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.ColumnFamily.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gc_expression', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_expression', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gc_rule', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_rule', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=545, - serialized_end=653, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name='Intersection', - full_name='google.bigtable.admin.table.v1.GcRule.Intersection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Intersection.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=880, - serialized_end=949, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name='Union', - full_name='google.bigtable.admin.table.v1.GcRule.Union', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Union.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=951, - serialized_end=1013, -) - -_GCRULE = _descriptor.Descriptor( - name='GcRule', - full_name='google.bigtable.admin.table.v1.GcRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='max_num_versions', full_name='google.bigtable.admin.table.v1.GcRule.max_num_versions', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_age', full_name='google.bigtable.admin.table.v1.GcRule.max_age', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='intersection', full_name='google.bigtable.admin.table.v1.GcRule.intersection', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='union', full_name='google.bigtable.admin.table.v1.GcRule.union', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.admin.table.v1.GcRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=656, - serialized_end=1021, -) - -_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION -_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_num_versions']) -_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_age']) -_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['intersection']) -_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['union']) -_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Table'] = _TABLE -DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE - -Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( - - ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry) - )) - , - DESCRIPTOR = _TABLE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table) - )) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( - DESCRIPTOR = _COLUMNFAMILY, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ColumnFamily) - )) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( - - Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_INTERSECTION, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Intersection) - )) - , - - Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_UNION, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Union) - )) - , - DESCRIPTOR = _GCRULE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule) - )) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\026BigtableTableDataProtoP\001') -_TABLE_COLUMNFAMILIESENTRY.has_options = True -_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py b/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py deleted file mode 100644 index 582dfed94612..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py +++ /dev/null @@ -1,389 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_service_messages.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\nDgoogle/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\"\x86\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x34\n\x05table\x18\x03 \x01(\x0b\x32%.google.bigtable.admin.table.v1.Table\x12\x1a\n\x12initial_split_keys\x18\x04 \x03(\t\"!\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"K\n\x12ListTablesResponse\x12\x35\n\x06tables\x18\x01 \x03(\x0b\x32%.google.bigtable.admin.table.v1.Table\"\x1f\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"2\n\x12RenameTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06new_id\x18\x02 \x01(\t\"\x88\x01\n\x19\x43reateColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_family_id\x18\x02 \x01(\t\x12\x43\n\rcolumn_family\x18\x03 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily\")\n\x19\x44\x65leteColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\tBI\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name='CreateTableRequest', - full_name='google.bigtable.admin.table.v1.CreateTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.CreateTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table_id', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='initial_split_keys', full_name='google.bigtable.admin.table.v1.CreateTableRequest.initial_split_keys', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=163, - serialized_end=297, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name='ListTablesRequest', - full_name='google.bigtable.admin.table.v1.ListTablesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.ListTablesRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=299, - serialized_end=332, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name='ListTablesResponse', - full_name='google.bigtable.admin.table.v1.ListTablesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tables', full_name='google.bigtable.admin.table.v1.ListTablesResponse.tables', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=334, - serialized_end=409, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name='GetTableRequest', - full_name='google.bigtable.admin.table.v1.GetTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.GetTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=411, - serialized_end=442, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name='DeleteTableRequest', - full_name='google.bigtable.admin.table.v1.DeleteTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.DeleteTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=444, - serialized_end=478, -) - - -_RENAMETABLEREQUEST = _descriptor.Descriptor( - name='RenameTableRequest', - full_name='google.bigtable.admin.table.v1.RenameTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.RenameTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_id', full_name='google.bigtable.admin.table.v1.RenameTableRequest.new_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=480, - serialized_end=530, -) - - -_CREATECOLUMNFAMILYREQUEST = _descriptor.Descriptor( - name='CreateColumnFamilyRequest', - full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_family_id', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_family', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=533, - serialized_end=669, -) - - -_DELETECOLUMNFAMILYREQUEST = _descriptor.Descriptor( - name='DeleteColumnFamilyRequest', - full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=671, - serialized_end=712, -) - -_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE -_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE -_CREATECOLUMNFAMILYREQUEST.fields_by_name['column_family'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._COLUMNFAMILY -DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name['RenameTableRequest'] = _RENAMETABLEREQUEST -DESCRIPTOR.message_types_by_name['CreateColumnFamilyRequest'] = _CREATECOLUMNFAMILYREQUEST -DESCRIPTOR.message_types_by_name['DeleteColumnFamilyRequest'] = _DELETECOLUMNFAMILYREQUEST - -CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateTableRequest) - )) -_sym_db.RegisterMessage(CreateTableRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesRequest) - )) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESRESPONSE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesResponse) - )) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( - DESCRIPTOR = _GETTABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GetTableRequest) - )) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteTableRequest) - )) -_sym_db.RegisterMessage(DeleteTableRequest) - -RenameTableRequest = _reflection.GeneratedProtocolMessageType('RenameTableRequest', (_message.Message,), dict( - DESCRIPTOR = _RENAMETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.RenameTableRequest) - )) -_sym_db.RegisterMessage(RenameTableRequest) - -CreateColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('CreateColumnFamilyRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATECOLUMNFAMILYREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateColumnFamilyRequest) - )) -_sym_db.RegisterMessage(CreateColumnFamilyRequest) - -DeleteColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('DeleteColumnFamilyRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETECOLUMNFAMILYREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteColumnFamilyRequest) - )) -_sym_db.RegisterMessage(DeleteColumnFamilyRequest) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_table_service_pb2.py b/gcloud/bigtable/_generated/bigtable_table_service_pb2.py deleted file mode 100644 index c77a09296fa4..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_service_pb2.py +++ /dev/null @@ -1,203 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2 -from gcloud.bigtable._generated import bigtable_table_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_service.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\n;google/bigtable/admin/table/v1/bigtable_table_service.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\x1a\x44google/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\x89\x0b\n\x14\x42igtableTableService\x12\xa4\x01\n\x0b\x43reateTable\x12\x32.google.bigtable.admin.table.v1.CreateTableRequest\x1a%.google.bigtable.admin.table.v1.Table\":\x82\xd3\xe4\x93\x02\x34\"//v1/{name=projects/*/zones/*/clusters/*}/tables:\x01*\x12\xac\x01\n\nListTables\x12\x31.google.bigtable.admin.table.v1.ListTablesRequest\x1a\x32.google.bigtable.admin.table.v1.ListTablesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{name=projects/*/zones/*/clusters/*}/tables\x12\x9d\x01\n\x08GetTable\x12/.google.bigtable.admin.table.v1.GetTableRequest\x1a%.google.bigtable.admin.table.v1.Table\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x94\x01\n\x0b\x44\x65leteTable\x12\x32.google.bigtable.admin.table.v1.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33*1/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x9e\x01\n\x0bRenameTable\x12\x32.google.bigtable.admin.table.v1.RenameTableRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02=\"8/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename:\x01*\x12\xca\x01\n\x12\x43reateColumnFamily\x12\x39.google.bigtable.admin.table.v1.CreateColumnFamilyRequest\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"K\x82\xd3\xe4\x93\x02\x45\"@/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies:\x01*\x12\xbf\x01\n\x12UpdateColumnFamily\x12,.google.bigtable.admin.table.v1.ColumnFamily\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"M\x82\xd3\xe4\x93\x02G\x1a\x42/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}:\x01*\x12\xb3\x01\n\x12\x44\x65leteColumnFamily\x12\x39.google.bigtable.admin.table.v1.DeleteColumnFamilyRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44*B/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}BB\n\"com.google.bigtable.admin.table.v1B\x1a\x42igtableTableServicesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\032BigtableTableServicesProtoP\001') -import abc -from grpc.beta import implementations as beta_implementations -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -class BetaBigtableTableServiceServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def CreateTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ListTables(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def GetTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def RenameTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CreateColumnFamily(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UpdateColumnFamily(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteColumnFamily(self, request, context): - raise NotImplementedError() - -class BetaBigtableTableServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def CreateTable(self, request, timeout): - raise NotImplementedError() - CreateTable.future = None - @abc.abstractmethod - def ListTables(self, request, timeout): - raise NotImplementedError() - ListTables.future = None - @abc.abstractmethod - def GetTable(self, request, timeout): - raise NotImplementedError() - GetTable.future = None - @abc.abstractmethod - def DeleteTable(self, request, timeout): - raise NotImplementedError() - DeleteTable.future = None - @abc.abstractmethod - def RenameTable(self, request, timeout): - raise NotImplementedError() - RenameTable.future = None - @abc.abstractmethod - def CreateColumnFamily(self, request, timeout): - raise NotImplementedError() - CreateColumnFamily.future = None - @abc.abstractmethod - def UpdateColumnFamily(self, request, timeout): - raise NotImplementedError() - UpdateColumnFamily.future = None - @abc.abstractmethod - def DeleteColumnFamily(self, request, timeout): - raise NotImplementedError() - DeleteColumnFamily.future = None - -def beta_create_BigtableTableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - request_deserializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - } - response_serializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): face_utilities.unary_unary_inline(servicer.CreateColumnFamily), - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): face_utilities.unary_unary_inline(servicer.DeleteColumnFamily), - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): face_utilities.unary_unary_inline(servicer.RenameTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): face_utilities.unary_unary_inline(servicer.UpdateColumnFamily), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableTableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - request_serializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - } - cardinalities = { - 'CreateColumnFamily': cardinality.Cardinality.UNARY_UNARY, - 'CreateTable': cardinality.Cardinality.UNARY_UNARY, - 'DeleteColumnFamily': cardinality.Cardinality.UNARY_UNARY, - 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, - 'GetTable': cardinality.Cardinality.UNARY_UNARY, - 'ListTables': cardinality.Cardinality.UNARY_UNARY, - 'RenameTable': cardinality.Cardinality.UNARY_UNARY, - 'UpdateColumnFamily': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.table.v1.BigtableTableService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/operations_grpc_pb2.py b/gcloud/bigtable/_generated/operations_grpc_pb2.py deleted file mode 100644 index e4911b389f25..000000000000 --- a/gcloud/bigtable/_generated/operations_grpc_pb2.py +++ /dev/null @@ -1,100 +0,0 @@ -import abc -from grpc.beta import implementations as beta_implementations -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -class BetaOperationsServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def GetOperation(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ListOperations(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CancelOperation(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteOperation(self, request, context): - raise NotImplementedError() - -class BetaOperationsStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def GetOperation(self, request, timeout): - raise NotImplementedError() - GetOperation.future = None - @abc.abstractmethod - def ListOperations(self, request, timeout): - raise NotImplementedError() - ListOperations.future = None - @abc.abstractmethod - def CancelOperation(self, request, timeout): - raise NotImplementedError() - CancelOperation.future = None - @abc.abstractmethod - def DeleteOperation(self, request, timeout): - raise NotImplementedError() - DeleteOperation.future = None - -def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - request_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.FromString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.FromString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.FromString, - } - response_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.SerializeToString, - } - method_implementations = { - ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), - ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), - ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), - ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - request_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.SerializeToString, - } - response_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.FromString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.FromString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.FromString, - } - cardinalities = { - 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, - 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, - 'GetOperation': cardinality.Cardinality.UNARY_UNARY, - 'ListOperations': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/gcloud/bigtable/_testing.py b/gcloud/bigtable/_testing.py deleted file mode 100644 index d5f13c15d3a6..000000000000 --- a/gcloud/bigtable/_testing.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Mocks used to emulate gRPC generated objects.""" - - -class _FakeStub(object): - """Acts as a gPRC stub.""" - - def __init__(self, *results): - self.results = results - self.method_calls = [] - self._entered = 0 - self._exited = [] - - def __enter__(self): - self._entered += 1 - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self._exited.append((exc_type, exc_val, exc_tb)) - return True - - def __getattr__(self, name): - # We need not worry about attributes set in constructor - # since __getattribute__ will handle them. - return _MethodMock(name, self) - - -class _MethodMock(object): - """Mock for API method attached to a gRPC stub. - - In the beta implementation, these are of type. - :class:`grpc.framework.crust.implementations._UnaryUnaryMultiCallable` - """ - - def __init__(self, name, factory): - self._name = name - self._factory = factory - - def __call__(self, *args, **kwargs): - """Sync method meant to mock a gRPC stub request.""" - self._factory.method_calls.append((self._name, args, kwargs)) - curr_result, self._factory.results = (self._factory.results[0], - self._factory.results[1:]) - return curr_result diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py deleted file mode 100644 index 073aec4e99c4..000000000000 --- a/gcloud/bigtable/client.py +++ /dev/null @@ -1,481 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parent client for calling the Google Cloud Bigtable API. - -This is the base from which all interactions with the API occur. - -In the hierarchy of API concepts - -* a :class:`Client` owns a :class:`.Cluster` -* a :class:`.Cluster` owns a :class:`Table ` -* a :class:`Table ` owns a - :class:`ColumnFamily <.column_family.ColumnFamily>` -* a :class:`Table ` owns a :class:`Row <.row.Row>` - (and all the cells in the row) -""" - - -from pkg_resources import get_distribution - -from grpc.beta import implementations - -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 -from gcloud.bigtable._generated import bigtable_cluster_service_pb2 -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import bigtable_service_pb2 -from gcloud.bigtable._generated import bigtable_table_service_pb2 -from gcloud.bigtable._generated import operations_grpc_pb2 -from gcloud.bigtable.cluster import Cluster -from gcloud.client import _ClientFactoryMixin -from gcloud.client import _ClientProjectMixin -from gcloud.credentials import get_credentials - - -TABLE_STUB_FACTORY = ( - bigtable_table_service_pb2.beta_create_BigtableTableService_stub) -TABLE_ADMIN_HOST = 'bigtabletableadmin.googleapis.com' -"""Table Admin API request host.""" -TABLE_ADMIN_PORT = 443 -"""Table Admin API request port.""" - -CLUSTER_STUB_FACTORY = ( - bigtable_cluster_service_pb2.beta_create_BigtableClusterService_stub) -CLUSTER_ADMIN_HOST = 'bigtableclusteradmin.googleapis.com' -"""Cluster Admin API request host.""" -CLUSTER_ADMIN_PORT = 443 -"""Cluster Admin API request port.""" - -DATA_STUB_FACTORY = bigtable_service_pb2.beta_create_BigtableService_stub -DATA_API_HOST = 'bigtable.googleapis.com' -"""Data API request host.""" -DATA_API_PORT = 443 -"""Data API request port.""" - -OPERATIONS_STUB_FACTORY = operations_grpc_pb2.beta_create_Operations_stub - -ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' -"""Scope for interacting with the Cluster Admin and Table Admin APIs.""" -DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data' -"""Scope for reading and writing table data.""" -READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' -"""Scope for reading table data.""" - -DEFAULT_TIMEOUT_SECONDS = 10 -"""The default timeout to use for API requests.""" - -DEFAULT_USER_AGENT = 'gcloud-python/{0}'.format( - get_distribution('gcloud').version) -"""The default user agent for API requests.""" - - -class Client(_ClientFactoryMixin, _ClientProjectMixin): - """Client for interacting with Google Cloud Bigtable API. - - .. note:: - - Since the Cloud Bigtable API requires the gRPC transport, no - ``http`` argument is accepted by this class. - - :type project: :class:`str` or :func:`unicode ` - :param project: (Optional) The ID of the project which owns the - clusters, tables and data. If not provided, will - attempt to determine from the environment. - - :type credentials: - :class:`OAuth2Credentials ` or - :data:`NoneType ` - :param credentials: (Optional) The OAuth2 Credentials to use for this - cluster. If not provided, defaults to the Google - Application Default Credentials. - - :type read_only: bool - :param read_only: (Optional) Boolean indicating if the data scope should be - for reading only (or for writing as well). Defaults to - :data:`False`. - - :type admin: bool - :param admin: (Optional) Boolean indicating if the client will be used to - interact with the Cluster Admin or Table Admin APIs. This - requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. - - :type user_agent: str - :param user_agent: (Optional) The user agent to be used with API request. - Defaults to :const:`DEFAULT_USER_AGENT`. - - :type timeout_seconds: int - :param timeout_seconds: Number of seconds for request time-out. If not - passed, defaults to - :const:`DEFAULT_TIMEOUT_SECONDS`. - - :raises: :class:`ValueError ` if both ``read_only`` - and ``admin`` are :data:`True` - """ - - def __init__(self, project=None, credentials=None, - read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT, - timeout_seconds=DEFAULT_TIMEOUT_SECONDS): - _ClientProjectMixin.__init__(self, project=project) - if credentials is None: - credentials = get_credentials() - - if read_only and admin: - raise ValueError('A read-only client cannot also perform' - 'administrative actions.') - - scopes = [] - if read_only: - scopes.append(READ_ONLY_SCOPE) - else: - scopes.append(DATA_SCOPE) - - if admin: - scopes.append(ADMIN_SCOPE) - - self._admin = bool(admin) - try: - credentials = credentials.create_scoped(scopes) - except AttributeError: - pass - self._credentials = credentials - self.user_agent = user_agent - self.timeout_seconds = timeout_seconds - - # These will be set in start(). - self._data_stub_internal = None - self._cluster_stub_internal = None - self._operations_stub_internal = None - self._table_stub_internal = None - - def copy(self): - """Make a copy of this client. - - Copies the local data stored as simple types but does not copy the - current state of any open connections with the Cloud Bigtable API. - - :rtype: :class:`.Client` - :returns: A copy of the current client. - """ - credentials = self._credentials - copied_creds = credentials.create_scoped(credentials.scopes) - return self.__class__( - self.project, - copied_creds, - READ_ONLY_SCOPE in copied_creds.scopes, - self._admin, - self.user_agent, - self.timeout_seconds, - ) - - @property - def credentials(self): - """Getter for client's credentials. - - :rtype: - :class:`OAuth2Credentials ` - :returns: The credentials stored on the client. - """ - return self._credentials - - @property - def project_name(self): - """Project name to be used with Cluster Admin API. - - .. note:: - - This property will not change if ``project`` does not, but the - return value is not cached. - - The project name is of the form - - ``"projects/{project}"`` - - :rtype: str - :returns: The project name to be used with the Cloud Bigtable Admin - API RPC service. - """ - return 'projects/' + self.project - - @property - def _data_stub(self): - """Getter for the gRPC stub used for the Data API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - :raises: :class:`ValueError ` if the current - client has not been :meth:`start`-ed. - """ - if self._data_stub_internal is None: - raise ValueError('Client has not been started.') - return self._data_stub_internal - - @property - def _cluster_stub(self): - """Getter for the gRPC stub used for the Cluster Admin API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if not self._admin: - raise ValueError('Client is not an admin client.') - if self._cluster_stub_internal is None: - raise ValueError('Client has not been started.') - return self._cluster_stub_internal - - @property - def _operations_stub(self): - """Getter for the gRPC stub used for the Operations API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if not self._admin: - raise ValueError('Client is not an admin client.') - if self._operations_stub_internal is None: - raise ValueError('Client has not been started.') - return self._operations_stub_internal - - @property - def _table_stub(self): - """Getter for the gRPC stub used for the Table Admin API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. - """ - if not self._admin: - raise ValueError('Client is not an admin client.') - if self._table_stub_internal is None: - raise ValueError('Client has not been started.') - return self._table_stub_internal - - def _make_data_stub(self): - """Creates gRPC stub to make requests to the Data API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - """ - return _make_stub(self, DATA_STUB_FACTORY, - DATA_API_HOST, DATA_API_PORT) - - def _make_cluster_stub(self): - """Creates gRPC stub to make requests to the Cluster Admin API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - """ - return _make_stub(self, CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) - - def _make_operations_stub(self): - """Creates gRPC stub to make requests to the Operations API. - - These are for long-running operations of the Cluster Admin API, - hence the host and port matching. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - """ - return _make_stub(self, OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, CLUSTER_ADMIN_PORT) - - def _make_table_stub(self): - """Creates gRPC stub to make requests to the Table Admin API. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: A gRPC stub object. - """ - return _make_stub(self, TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, TABLE_ADMIN_PORT) - - def is_started(self): - """Check if the client has been started. - - :rtype: bool - :returns: Boolean indicating if the client has been started. - """ - return self._data_stub_internal is not None - - def start(self): - """Prepare the client to make requests. - - Activates gRPC contexts for making requests to the Bigtable - Service(s). - """ - if self.is_started(): - return - - # NOTE: We __enter__ the stubs more-or-less permanently. This is - # because only after entering the context managers is the - # connection created. We don't want to immediately close - # those connections since the client will make many - # requests with it over HTTP/2. - self._data_stub_internal = self._make_data_stub() - self._data_stub_internal.__enter__() - if self._admin: - self._cluster_stub_internal = self._make_cluster_stub() - self._operations_stub_internal = self._make_operations_stub() - self._table_stub_internal = self._make_table_stub() - - self._cluster_stub_internal.__enter__() - self._operations_stub_internal.__enter__() - self._table_stub_internal.__enter__() - - def stop(self): - """Closes all the open gRPC clients.""" - if not self.is_started(): - return - - # When exit-ing, we pass None as the exception type, value and - # traceback to __exit__. - self._data_stub_internal.__exit__(None, None, None) - if self._admin: - self._cluster_stub_internal.__exit__(None, None, None) - self._operations_stub_internal.__exit__(None, None, None) - self._table_stub_internal.__exit__(None, None, None) - - self._data_stub_internal = None - self._cluster_stub_internal = None - self._operations_stub_internal = None - self._table_stub_internal = None - - def cluster(self, zone, cluster_id, display_name=None, serve_nodes=3): - """Factory to create a cluster associated with this client. - - :type zone: str - :param zone: The name of the zone where the cluster resides. - - :type cluster_id: str - :param cluster_id: The ID of the cluster. - - :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the cluster ID. - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to 3. - - :rtype: :class:`.Cluster` - :returns: The cluster owned by this client. - """ - return Cluster(zone, cluster_id, self, - display_name=display_name, serve_nodes=serve_nodes) - - def list_zones(self): - """Lists zones associated with project. - - :rtype: list - :returns: The names (as :class:`str`) of the zones - :raises: :class:`ValueError ` if one of the - zones is not in ``OK`` state. - """ - request_pb = messages_pb2.ListZonesRequest(name=self.project_name) - # We expect a `.messages_pb2.ListZonesResponse` - list_zones_response = self._cluster_stub.ListZones( - request_pb, self.timeout_seconds) - - result = [] - for zone in list_zones_response.zones: - if zone.status != data_pb2.Zone.OK: - raise ValueError('Zone %s not in OK state' % ( - zone.display_name,)) - result.append(zone.display_name) - return result - - def list_clusters(self): - """Lists clusters owned by the project. - - :rtype: tuple - :returns: A pair of results, the first is a list of :class:`.Cluster` s - returned and the second is a list of strings (the failed - zones in the request). - """ - request_pb = messages_pb2.ListClustersRequest(name=self.project_name) - # We expect a `.messages_pb2.ListClustersResponse` - list_clusters_response = self._cluster_stub.ListClusters( - request_pb, self.timeout_seconds) - - failed_zones = [zone.display_name - for zone in list_clusters_response.failed_zones] - clusters = [Cluster.from_pb(cluster_pb, self) - for cluster_pb in list_clusters_response.clusters] - return clusters, failed_zones - - -class _MetadataPlugin(object): - """Callable class to transform metadata for gRPC requests. - - :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. - """ - - def __init__(self, client): - self._credentials = client.credentials - self._user_agent = client.user_agent - - def __call__(self, unused_context, callback): - """Adds authorization header to request metadata.""" - access_token = self._credentials.get_access_token().access_token - headers = [ - ('Authorization', 'Bearer ' + access_token), - ('User-agent', self._user_agent), - ] - callback(headers, None) - - -def _make_stub(client, stub_factory, host, port): - """Makes a stub for an RPC service. - - Uses / depends on the beta implementation of gRPC. - - :type client: :class:`.client.Client` - :param client: The client that owns the cluster. Provides authorization and - user agent. - - :type stub_factory: callable - :param stub_factory: A factory which will create a gRPC stub for - a given service. - - :type host: str - :param host: The host for the service. - - :type port: int - :param port: The port for the service. - - :rtype: :class:`grpc.beta._stub._AutoIntermediary` - :returns: The stub object used to make gRPC requests to a given API. - """ - # Leaving the first argument to ssl_channel_credentials() as None - # loads root certificates from `grpc/_adapter/credentials/roots.pem`. - transport_creds = implementations.ssl_channel_credentials(None, None, None) - custom_metadata_plugin = _MetadataPlugin(client) - auth_creds = implementations.metadata_call_credentials( - custom_metadata_plugin, name='google_creds') - channel_creds = implementations.composite_channel_credentials( - transport_creds, auth_creds) - channel = implementations.secure_channel(host, port, channel_creds) - return stub_factory(channel) diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py deleted file mode 100644 index 95be153ad474..000000000000 --- a/gcloud/bigtable/cluster.py +++ /dev/null @@ -1,485 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Cluster.""" - - -import re - -from google.longrunning import operations_pb2 - -from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) -from gcloud.bigtable.table import Table - - -_CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'zones/(?P[^/]+)/clusters/' - r'(?P[a-z][-a-z0-9]*)$') -_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/zones/([^/]+)/' - r'clusters/([a-z][-a-z0-9]*)/operations/' - r'(?P\d+)$') -_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.' -_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.cluster.v1.' -_CLUSTER_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateClusterMetadata' -_UPDATE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UpdateClusterMetadata' -_UNDELETE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'UndeleteClusterMetadata' -_TYPE_URL_MAP = { - _CLUSTER_CREATE_METADATA: messages_pb2.CreateClusterMetadata, - _UPDATE_CREATE_METADATA: messages_pb2.UpdateClusterMetadata, - _UNDELETE_CREATE_METADATA: messages_pb2.UndeleteClusterMetadata, -} - -DEFAULT_SERVE_NODES = 3 -"""Default number of nodes to use when creating a cluster.""" - - -def _prepare_create_request(cluster): - """Creates a protobuf request for a CreateCluster request. - - :type cluster: :class:`Cluster` - :param cluster: The cluster to be created. - - :rtype: :class:`.messages_pb2.CreateClusterRequest` - :returns: The CreateCluster request object containing the cluster info. - """ - zone_full_name = ('projects/' + cluster._client.project + - '/zones/' + cluster.zone) - return messages_pb2.CreateClusterRequest( - name=zone_full_name, - cluster_id=cluster.cluster_id, - cluster=data_pb2.Cluster( - display_name=cluster.display_name, - serve_nodes=cluster.serve_nodes, - ), - ) - - -def _parse_pb_any_to_native(any_val, expected_type=None): - """Convert a serialized "google.protobuf.Any" value to actual type. - - :type any_val: :class:`google.protobuf.any_pb2.Any` - :param any_val: A serialized protobuf value container. - - :type expected_type: str - :param expected_type: (Optional) The type URL we expect ``any_val`` - to have. - - :rtype: object - :returns: The de-serialized object. - :raises: :class:`ValueError ` if the - ``expected_type`` does not match the ``type_url`` on the input. - """ - if expected_type is not None and expected_type != any_val.type_url: - raise ValueError('Expected type: %s, Received: %s' % ( - expected_type, any_val.type_url)) - container_class = _TYPE_URL_MAP[any_val.type_url] - return container_class.FromString(any_val.value) - - -def _process_operation(operation_pb): - """Processes a create protobuf response. - - :type operation_pb: :class:`google.longrunning.operations_pb2.Operation` - :param operation_pb: The long-running operation response from a - Create/Update/Undelete cluster request. - - :rtype: tuple - :returns: A pair of an integer and datetime stamp. The integer is the ID - of the operation (``operation_id``) and the timestamp when - the create operation began (``operation_begin``). - :raises: :class:`ValueError ` if the operation name - doesn't match the :data:`_OPERATION_NAME_RE` regex. - """ - match = _OPERATION_NAME_RE.match(operation_pb.name) - if match is None: - raise ValueError('Operation name was not in the expected ' - 'format after a cluster modification.', - operation_pb.name) - operation_id = int(match.group('operation_id')) - - request_metadata = _parse_pb_any_to_native(operation_pb.metadata) - operation_begin = _pb_timestamp_to_datetime( - request_metadata.request_time) - - return operation_id, operation_begin - - -class Operation(object): - """Representation of a Google API Long-Running Operation. - - In particular, these will be the result of operations on - clusters using the Cloud Bigtable API. - - :type op_type: str - :param op_type: The type of operation being performed. Expect - ``create``, ``update`` or ``undelete``. - - :type op_id: int - :param op_id: The ID of the operation. - - :type begin: :class:`datetime.datetime` - :param begin: The time when the operation was started. - - :type cluster: :class:`Cluster` - :param cluster: The cluster that created the operation. - """ - - def __init__(self, op_type, op_id, begin, cluster=None): - self.op_type = op_type - self.op_id = op_id - self.begin = begin - self._cluster = cluster - self._complete = False - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.op_type == self.op_type and - other.op_id == self.op_id and - other.begin == self.begin and - other._cluster == self._cluster and - other._complete == self._complete) - - def __ne__(self, other): - return not self.__eq__(other) - - def finished(self): - """Check if the operation has finished. - - :rtype: bool - :returns: A boolean indicating if the current operation has completed. - :raises: :class:`ValueError ` if the operation - has already completed. - """ - if self._complete: - raise ValueError('The operation has completed.') - - operation_name = ('operations/' + self._cluster.name + - '/operations/%d' % (self.op_id,)) - request_pb = operations_pb2.GetOperationRequest(name=operation_name) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._cluster._client._operations_stub.GetOperation( - request_pb, self._cluster._client.timeout_seconds) - - if operation_pb.done: - self._complete = True - return True - else: - return False - - -class Cluster(object): - """Representation of a Google Cloud Bigtable Cluster. - - We can use a :class:`Cluster` to: - - * :meth:`reload` itself - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - * :meth:`undelete` itself - - .. note:: - - For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_pb2.STORAGE_SSD`. - - :type zone: str - :param zone: The name of the zone where the cluster resides. - - :type cluster_id: str - :param cluster_id: The ID of the cluster. - - :type client: :class:`Client ` - :param client: The client that owns the cluster. Provides - authorization and a project ID. - - :type display_name: str - :param display_name: (Optional) The display name for the cluster in the - Cloud Console UI. (Must be between 4 and 30 - characters.) If this value is not set in the - constructor, will fall back to the cluster ID. - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to :data:`DEFAULT_SERVE_NODES`. - """ - - def __init__(self, zone, cluster_id, client, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES): - self.zone = zone - self.cluster_id = cluster_id - self.display_name = display_name or cluster_id - self.serve_nodes = serve_nodes - self._client = client - - def table(self, table_id): - """Factory to create a table associated with this cluster. - - :type table_id: str - :param table_id: The ID of the table. - - :rtype: :class:`Table ` - :returns: The table owned by this cluster. - """ - return Table(table_id, self) - - def _update_from_pb(self, cluster_pb): - if not cluster_pb.display_name: # Simple field (string) - raise ValueError('Cluster protobuf does not contain display_name') - if not cluster_pb.serve_nodes: # Simple field (int32) - raise ValueError('Cluster protobuf does not contain serve_nodes') - self.display_name = cluster_pb.display_name - self.serve_nodes = cluster_pb.serve_nodes - - @classmethod - def from_pb(cls, cluster_pb, client): - """Creates a cluster instance from a protobuf. - - :type cluster_pb: :class:`bigtable_cluster_data_pb2.Cluster` - :param cluster_pb: A cluster protobuf object. - - :type client: :class:`Client ` - :param client: The client that owns the cluster. - - :rtype: :class:`Cluster` - :returns: The cluster parsed from the protobuf response. - :raises: :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/zones/{zone}/clusters/{cluster_id}`` - or if the parsed project ID does not match the project ID - on the client. - """ - match = _CLUSTER_NAME_RE.match(cluster_pb.name) - if match is None: - raise ValueError('Cluster protobuf name was not in the ' - 'expected format.', cluster_pb.name) - if match.group('project') != client.project: - raise ValueError('Project ID on cluster does not match the ' - 'project ID on the client') - - result = cls(match.group('zone'), match.group('cluster_id'), client) - result._update_from_pb(cluster_pb) - return result - - def copy(self): - """Make a copy of this cluster. - - Copies the local data stored as simple types and copies the client - attached to this instance. - - :rtype: :class:`.Cluster` - :returns: A copy of the current cluster. - """ - new_client = self._client.copy() - return self.__class__(self.zone, self.cluster_id, new_client, - display_name=self.display_name, - serve_nodes=self.serve_nodes) - - @property - def name(self): - """Cluster name used in requests. - - .. note:: - This property will not change if ``zone`` and ``cluster_id`` do not, - but the return value is not cached. - - The cluster name is of the form - - ``"projects/{project}/zones/{zone}/clusters/{cluster_id}"`` - - :rtype: str - :returns: The cluster name. - """ - return (self._client.project_name + '/zones/' + self.zone + - '/clusters/' + self.cluster_id) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - # NOTE: This does not compare the configuration values, such as - # the serve_nodes or display_name. Instead, it only compares - # identifying values zone, cluster ID and client. This is - # intentional, since the same cluster can be in different states - # if not synchronized. Clusters with similar zone/cluster - # settings but different clients can't be used in the same way. - return (other.zone == self.zone and - other.cluster_id == self.cluster_id and - other._client == self._client) - - def __ne__(self, other): - return not self.__eq__(other) - - def reload(self): - """Reload the metadata for this cluster.""" - request_pb = messages_pb2.GetClusterRequest(name=self.name) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.GetCluster( - request_pb, self._client.timeout_seconds) - - # NOTE: _update_from_pb does not check that the project, zone and - # cluster ID on the response match the request. - self._update_from_pb(cluster_pb) - - def create(self): - """Create this cluster. - - .. note:: - - Uses the ``project``, ``zone`` and ``cluster_id`` on the current - :class:`Cluster` in addition to the ``display_name`` and - ``serve_nodes``. If you'd like to change them before creating, - reset the values via - - .. code:: python - - cluster.display_name = 'New display name' - cluster.cluster_id = 'i-changed-my-mind' - - before calling :meth:`create`. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - create operation. - """ - request_pb = _prepare_create_request(self) - # We expect a `google.longrunning.operations_pb2.Operation`. - cluster_pb = self._client._cluster_stub.CreateCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('create', op_id, op_begin, cluster=self) - - def update(self): - """Update this cluster. - - .. note:: - - Updates the ``display_name`` and ``serve_nodes``. If you'd like to - change them before updating, reset the values via - - .. code:: python - - cluster.display_name = 'New display name' - cluster.serve_nodes = 3 - - before calling :meth:`update`. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - update operation. - """ - request_pb = data_pb2.Cluster( - name=self.name, - display_name=self.display_name, - serve_nodes=self.serve_nodes, - ) - # We expect a `._generated.bigtable_cluster_data_pb2.Cluster`. - cluster_pb = self._client._cluster_stub.UpdateCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(cluster_pb.current_operation) - return Operation('update', op_id, op_begin, cluster=self) - - def delete(self): - """Delete this cluster. - - Marks a cluster and all of its tables for permanent deletion in 7 days. - - Immediately upon completion of the request: - - * Billing will cease for all of the cluster's reserved resources. - * The cluster's ``delete_time`` field will be set 7 days in the future. - - Soon afterward: - - * All tables within the cluster will become unavailable. - - Prior to the cluster's ``delete_time``: - - * The cluster can be recovered with a call to ``UndeleteCluster``. - * All other attempts to modify or delete the cluster will be rejected. - - At the cluster's ``delete_time``: - - * The cluster and **all of its tables** will immediately and - irrevocably disappear from the API, and their data will be - permanently deleted. - """ - request_pb = messages_pb2.DeleteClusterRequest(name=self.name) - # We expect a `google.protobuf.empty_pb2.Empty` - self._client._cluster_stub.DeleteCluster( - request_pb, self._client.timeout_seconds) - - def undelete(self): - """Undelete this cluster. - - Cancels the scheduled deletion of an cluster and begins preparing it to - resume serving. The returned operation will also be embedded as the - cluster's ``current_operation``. - - Immediately upon completion of this request: - - * The cluster's ``delete_time`` field will be unset, protecting it from - automatic deletion. - - Until completion of the returned operation: - - * The operation cannot be cancelled. - - Upon completion of the returned operation: - - * Billing for the cluster's resources will resume. - * All tables within the cluster will be available. - - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - undelete operation. - """ - request_pb = messages_pb2.UndeleteClusterRequest(name=self.name) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb2 = self._client._cluster_stub.UndeleteCluster( - request_pb, self._client.timeout_seconds) - - op_id, op_begin = _process_operation(operation_pb2) - return Operation('undelete', op_id, op_begin, cluster=self) - - def list_tables(self): - """List the tables in this cluster. - - :rtype: list of :class:`Table ` - :returns: The list of tables owned by the cluster. - :raises: :class:`ValueError ` if one of the - returned tables has a name that is not of the expected format. - """ - request_pb = table_messages_pb2.ListTablesRequest(name=self.name) - # We expect a `table_messages_pb2.ListTablesResponse` - table_list_pb = self._client._table_stub.ListTables( - request_pb, self._client.timeout_seconds) - - result = [] - for table_pb in table_list_pb.tables: - table_prefix = self.name + '/tables/' - if not table_pb.name.startswith(table_prefix): - raise ValueError('Table name %s not of expected format' % ( - table_pb.name,)) - table_id = table_pb.name[len(table_prefix):] - result.append(self.table(table_id)) - - return result diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py deleted file mode 100644 index c0d9060316a4..000000000000 --- a/gcloud/bigtable/column_family.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Column Family.""" - - -import datetime - -from google.protobuf import duration_pb2 - -from gcloud._helpers import _total_seconds -from gcloud.bigtable._generated import bigtable_table_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - - -def _timedelta_to_duration_pb(timedelta_val): - """Convert a Python timedelta object to a duration protobuf. - - .. note:: - - The Python timedelta has a granularity of microseconds while - the protobuf duration type has a duration of nanoseconds. - - :type timedelta_val: :class:`datetime.timedelta` - :param timedelta_val: A timedelta object. - - :rtype: :class:`google.protobuf.duration_pb2.Duration` - :returns: A duration object equivalent to the time delta. - """ - seconds_decimal = _total_seconds(timedelta_val) - # Truncate the parts other than the integer. - seconds = int(seconds_decimal) - if seconds_decimal < 0: - signed_micros = timedelta_val.microseconds - 10**6 - else: - signed_micros = timedelta_val.microseconds - # Convert nanoseconds to microseconds. - nanos = 1000 * signed_micros - return duration_pb2.Duration(seconds=seconds, nanos=nanos) - - -def _duration_pb_to_timedelta(duration_pb): - """Convert a duration protobuf to a Python timedelta object. - - .. note:: - - The Python timedelta has a granularity of microseconds while - the protobuf duration type has a duration of nanoseconds. - - :type duration_pb: :class:`google.protobuf.duration_pb2.Duration` - :param duration_pb: A protobuf duration object. - - :rtype: :class:`datetime.timedelta` - :returns: The converted timedelta object. - """ - return datetime.timedelta( - seconds=duration_pb.seconds, - microseconds=(duration_pb.nanos / 1000.0), - ) - - -class GarbageCollectionRule(object): - """Garbage collection rule for column families within a table. - - Cells in the column family (within a table) fitting the rule will be - deleted during garbage collection. - - .. note:: - - This class is a do-nothing base class for all GC rules. - - .. note:: - - A string ``gc_expression`` can also be used with API requests, but - that value would be superceded by a ``gc_rule``. As a result, we - don't support that feature and instead support via native classes. - """ - - def __ne__(self, other): - return not self.__eq__(other) - - -class MaxVersionsGCRule(GarbageCollectionRule): - """Garbage collection limiting the number of versions of a cell. - - :type max_num_versions: int - :param max_num_versions: The maximum number of versions - """ - - def __init__(self, max_num_versions): - self.max_num_versions = max_num_versions - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.max_num_versions == self.max_num_versions - - def to_pb(self): - """Converts the garbage collection rule to a protobuf. - - :rtype: :class:`.data_pb2.GcRule` - :returns: The converted current object. - """ - return data_pb2.GcRule(max_num_versions=self.max_num_versions) - - -class MaxAgeGCRule(GarbageCollectionRule): - """Garbage collection limiting the age of a cell. - - :type max_age: :class:`datetime.timedelta` - :param max_age: The maximum age allowed for a cell in the table. - """ - - def __init__(self, max_age): - self.max_age = max_age - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.max_age == self.max_age - - def to_pb(self): - """Converts the garbage collection rule to a protobuf. - - :rtype: :class:`.data_pb2.GcRule` - :returns: The converted current object. - """ - max_age = _timedelta_to_duration_pb(self.max_age) - return data_pb2.GcRule(max_age=max_age) - - -class GCRuleUnion(GarbageCollectionRule): - """Union of garbage collection rules. - - :type rules: list - :param rules: List of :class:`GarbageCollectionRule`. - """ - - def __init__(self, rules): - self.rules = rules - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.rules == self.rules - - def to_pb(self): - """Converts the union into a single GC rule as a protobuf. - - :rtype: :class:`.data_pb2.GcRule` - :returns: The converted current object. - """ - union = data_pb2.GcRule.Union( - rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(union=union) - - -class GCRuleIntersection(GarbageCollectionRule): - """Intersection of garbage collection rules. - - :type rules: list - :param rules: List of :class:`GarbageCollectionRule`. - """ - - def __init__(self, rules): - self.rules = rules - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.rules == self.rules - - def to_pb(self): - """Converts the intersection into a single GC rule as a protobuf. - - :rtype: :class:`.data_pb2.GcRule` - :returns: The converted current object. - """ - intersection = data_pb2.GcRule.Intersection( - rules=[rule.to_pb() for rule in self.rules]) - return data_pb2.GcRule(intersection=intersection) - - -class ColumnFamily(object): - """Representation of a Google Cloud Bigtable Column Family. - - We can use a :class:`ColumnFamily` to: - - * :meth:`create` itself - * :meth:`update` itself - * :meth:`delete` itself - - :type column_family_id: str - :param column_family_id: The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type table: :class:`Table ` - :param table: The table that owns the column family. - - :type gc_rule: :class:`GarbageCollectionRule` - :param gc_rule: (Optional) The garbage collection settings for this - column family. - """ - - def __init__(self, column_family_id, table, gc_rule=None): - self.column_family_id = column_family_id - self._table = table - self.gc_rule = gc_rule - - @property - def name(self): - """Column family name used in requests. - - .. note:: - - This property will not change if ``column_family_id`` does not, but - the return value is not cached. - - The table name is of the form - - ``"projects/../zones/../clusters/../tables/../columnFamilies/.."`` - - :rtype: str - :returns: The column family name. - """ - return self._table.name + '/columnFamilies/' + self.column_family_id - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.column_family_id == self.column_family_id and - other._table == self._table and - other.gc_rule == self.gc_rule) - - def __ne__(self, other): - return not self.__eq__(other) - - def create(self): - """Create this column family.""" - if self.gc_rule is None: - column_family = data_pb2.ColumnFamily() - else: - column_family = data_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( - name=self._table.name, - column_family_id=self.column_family_id, - column_family=column_family, - ) - client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only - # data it contains are the GC rule and the column family ID already - # stored on this instance. - client._table_stub.CreateColumnFamily(request_pb, - client.timeout_seconds) - - def update(self): - """Update this column family. - - .. note:: - - Only the GC rule can be updated. By changing the column family ID, - you will simply be referring to a different column family. - """ - request_kwargs = {'name': self.name} - if self.gc_rule is not None: - request_kwargs['gc_rule'] = self.gc_rule.to_pb() - request_pb = data_pb2.ColumnFamily(**request_kwargs) - client = self._table._cluster._client - # We expect a `.data_pb2.ColumnFamily`. We ignore it since the only - # data it contains are the GC rule and the column family ID already - # stored on this instance. - client._table_stub.UpdateColumnFamily(request_pb, - client.timeout_seconds) - - def delete(self): - """Delete this column family.""" - request_pb = messages_pb2.DeleteColumnFamilyRequest(name=self.name) - client = self._table._cluster._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.DeleteColumnFamily(request_pb, - client.timeout_seconds) - - -def _gc_rule_from_pb(gc_rule_pb): - """Convert a protobuf GC rule to a native object. - - :type gc_rule_pb: :class:`.data_pb2.GcRule` - :param gc_rule_pb: The GC rule to convert. - - :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` - :returns: An instance of one of the native rules defined - in :module:`column_family` or :data:`None` if no values were - set on the protobuf passed in. - :raises: :class:`ValueError ` if the rule name - is unexpected. - """ - rule_name = gc_rule_pb.WhichOneof('rule') - if rule_name is None: - return None - - if rule_name == 'max_num_versions': - return MaxVersionsGCRule(gc_rule_pb.max_num_versions) - elif rule_name == 'max_age': - max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age) - return MaxAgeGCRule(max_age) - elif rule_name == 'union': - return GCRuleUnion([_gc_rule_from_pb(rule) - for rule in gc_rule_pb.union.rules]) - elif rule_name == 'intersection': - rules = [_gc_rule_from_pb(rule) - for rule in gc_rule_pb.intersection.rules] - return GCRuleIntersection(rules) - else: - raise ValueError('Unexpected rule name', rule_name) diff --git a/gcloud/bigtable/happybase/__init__.py b/gcloud/bigtable/happybase/__init__.py deleted file mode 100644 index 03e4d9215ff1..000000000000 --- a/gcloud/bigtable/happybase/__init__.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable HappyBase package. - -This package is intended to emulate the HappyBase library using -Google Cloud Bigtable as the backing store. - -Differences in Public API -------------------------- - -Some concepts from HBase/Thrift do not map directly to the Cloud -Bigtable API. As a result, the following instance methods and functions -could not be implemented: - -* :meth:`Connection.enable_table() \ - ` - no - concept of enabled/disabled -* :meth:`Connection.disable_table() \ - ` - no - concept of enabled/disabled -* :meth:`Connection.is_table_enabled() \ - ` - - no concept of enabled/disabled -* :meth:`Connection.compact_table() \ - ` - - table storage is opaque to user -* :func:`make_row() ` - helper - needed for Thrift library -* :func:`make_ordered_row() ` - - helper needed for Thrift library -* :meth:`Table.regions() ` - - tables in Cloud Bigtable do not expose internal storage details -* :meth:`Table.counter_set() \ - ` - method can't - be atomic, so we disable it -* The ``__version__`` value for the HappyBase package is :data:`None`. - However, it's worth nothing this implementation was based off HappyBase - 0.9. - -In addition, many of the constants from -:mod:`connection ` -are specific to HBase and are defined as :data:`None` in our module: - -* ``COMPAT_MODES`` -* ``THRIFT_TRANSPORTS`` -* ``THRIFT_PROTOCOLS`` -* ``DEFAULT_HOST`` -* ``DEFAULT_PORT`` -* ``DEFAULT_TRANSPORT`` -* ``DEFAULT_COMPAT`` -* ``DEFAULT_PROTOCOL`` - -Two of these ``DEFAULT_HOST`` and ``DEFAULT_PORT``, are even imported in -the main :mod:`happybase ` package. - -Finally, we do not provide the ``util`` module. Though it is public in the -HappyBase library, it provides no core functionality. - -API Behavior Changes --------------------- - -* Since there is no concept of an enabled / disabled table, calling - :meth:`Connection.delete_table() \ - ` - with ``disable=True`` can't be supported. - Using that argument will result in a warning. -* The :class:`Connection ` - constructor **disables** the use of several - arguments and will print a warning if any of them are passed in as keyword - arguments. The arguments are: - - * ``host`` - * ``port`` - * ``compat`` - * ``transport`` - * ``protocol`` -* In order to make - :class:`Connection ` - compatible with Cloud Bigtable, we add a ``cluster`` keyword argument to - allow users to pass in their own - :class:`Cluster ` (which they can - construct beforehand). - - For example: - - .. code:: python - - from gcloud.bigtable.client import Client - client = Client(project=PROJECT_ID, admin=True) - cluster = client.cluster(zone, cluster_id) - cluster.reload() - - from gcloud.bigtable.happybase import Connection - connection = Connection(cluster=cluster) - -* Any uses of the ``wal`` (Write Ahead Log) argument will result in a - warning as well. This includes uses in: - - * :class:`Batch ` - * :meth:`Batch.put() ` - * :meth:`Batch.delete() ` - * :meth:`Table.put() ` - * :meth:`Table.delete() ` - * :meth:`Table.batch() ` factory -* When calling - :meth:`Connection.create_table() \ - `, the - majority of HBase column family options cannot be used. Among - - * ``max_versions`` - * ``compression`` - * ``in_memory`` - * ``bloom_filter_type`` - * ``bloom_filter_vector_size`` - * ``bloom_filter_nb_hashes`` - * ``block_cache_enabled`` - * ``time_to_live`` - - Only ``max_versions`` and ``time_to_live`` are availabe in Cloud Bigtable - (as - :class:`MaxVersionsGCRule ` - and - :class:`MaxAgeGCRule `). - - In addition to using a dictionary for specifying column family options, - we also accept instances of :class:`.GarbageCollectionRule` or subclasses. -* :meth:`Table.scan() ` no longer - accepts the following arguments (which will result in a warning): - - * ``batch_size`` - * ``scan_batching`` - * ``sorted_columns`` - -* Using a HBase filter string in - :meth:`Table.scan() ` is - not possible with Cloud Bigtable and will result in a - :class:`TypeError `. However, the method now accepts - instances of :class:`.RowFilter` and subclasses. -* :meth:`Batch.delete() ` (and - hence - :meth:`Table.delete() `) - will fail with a :class:`ValueError ` when either a - row or column family delete is attempted with a ``timestamp``. This is - because the Cloud Bigtable API uses the ``DeleteFromFamily`` and - ``DeleteFromRow`` mutations for these deletes, and neither of these - mutations support a timestamp. -""" - -from gcloud.bigtable.happybase.batch import Batch -from gcloud.bigtable.happybase.connection import Connection -from gcloud.bigtable.happybase.connection import DEFAULT_HOST -from gcloud.bigtable.happybase.connection import DEFAULT_PORT -from gcloud.bigtable.happybase.pool import ConnectionPool -from gcloud.bigtable.happybase.pool import NoConnectionsAvailable -from gcloud.bigtable.happybase.table import Table - - -# Values from HappyBase that we don't reproduce / are not relevant. -__version__ = None diff --git a/gcloud/bigtable/happybase/batch.py b/gcloud/bigtable/happybase/batch.py deleted file mode 100644 index 25e6d073fc66..000000000000 --- a/gcloud/bigtable/happybase/batch.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable HappyBase batch module.""" - - -import datetime -import warnings - -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud.bigtable.row_filters import TimestampRange - - -_WAL_SENTINEL = object() -# Assumed granularity of timestamps in Cloud Bigtable. -_ONE_MILLISECOND = datetime.timedelta(microseconds=1000) -_WARN = warnings.warn -_WAL_WARNING = ('The wal argument (Write-Ahead-Log) is not ' - 'supported by Cloud Bigtable.') - - -class Batch(object): - """Batch class for accumulating mutations. - - .. note:: - - When using a batch with ``transaction=False`` as a context manager - (i.e. in a ``with`` statement), mutations will still be sent as - row mutations even if the context manager exits with an error. - This behavior is in place to match the behavior in the HappyBase - HBase / Thrift implementation. - - :type table: :class:`Table ` - :param table: The table where mutations will be applied. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the epoch) - that all mutations will be applied at. - - :type batch_size: int - :param batch_size: (Optional) The maximum number of mutations to allow - to accumulate before committing them. - - :type transaction: bool - :param transaction: Flag indicating if the mutations should be sent - transactionally or not. If ``transaction=True`` and - an error occurs while a :class:`Batch` is active, - then none of the accumulated mutations will be - committed. If ``batch_size`` is set, the mutation - can't be transactional. - - :type wal: object - :param wal: Unused parameter (Boolean for using the HBase Write Ahead Log). - Provided for compatibility with HappyBase, but irrelevant for - Cloud Bigtable since it does not have a Write Ahead Log. - - :raises: :class:`TypeError ` if ``batch_size`` - is set and ``transaction=True``. - :class:`ValueError ` if ``batch_size`` - is not positive. - """ - - def __init__(self, table, timestamp=None, batch_size=None, - transaction=False, wal=_WAL_SENTINEL): - if wal is not _WAL_SENTINEL: - _WARN(_WAL_WARNING) - - if batch_size is not None: - if transaction: - raise TypeError('When batch_size is set, a Batch cannot be ' - 'transactional') - if batch_size <= 0: - raise ValueError('batch_size must be positive') - - self._table = table - self._batch_size = batch_size - self._timestamp = self._delete_range = None - - # Timestamp is in milliseconds, convert to microseconds. - if timestamp is not None: - self._timestamp = _datetime_from_microseconds(1000 * timestamp) - # For deletes, we get the very next timestamp (assuming timestamp - # granularity is milliseconds). This is because HappyBase users - # expect HBase deletes to go **up to** and **including** the - # timestamp while Cloud Bigtable Time Ranges **exclude** the - # final timestamp. - next_timestamp = self._timestamp + _ONE_MILLISECOND - self._delete_range = TimestampRange(end=next_timestamp) - - self._transaction = transaction - - # Internal state for tracking mutations. - self._row_map = {} - self._mutation_count = 0 - - def send(self): - """Send / commit the batch of mutations to the server.""" - for row in self._row_map.values(): - # commit() does nothing if row hasn't accumulated any mutations. - row.commit() - - self._row_map.clear() - self._mutation_count = 0 - - def _try_send(self): - """Send / commit the batch if mutations have exceeded batch size.""" - if self._batch_size and self._mutation_count >= self._batch_size: - self.send() - - def _get_row(self, row_key): - """Gets a row that will hold mutations. - - If the row is not already cached on the current batch, a new row will - be created. - - :type row_key: str - :param row_key: The row key for a row stored in the map. - - :rtype: :class:`Row ` - :returns: The newly created or stored row that will hold mutations. - """ - if row_key not in self._row_map: - table = self._table._low_level_table - self._row_map[row_key] = table.row(row_key) - - return self._row_map[row_key] - - def put(self, row, data, wal=_WAL_SENTINEL): - """Insert data into a row in the table owned by this batch. - - :type row: str - :param row: The row key where the mutation will be "put". - - :type data: dict - :param data: Dictionary containing the data to be inserted. The keys - are columns names (of the form ``fam:col``) and the values - are strings (bytes) to be stored in those columns. - - :type wal: object - :param wal: Unused parameter (to over-ride the default on the - instance). Provided for compatibility with HappyBase, but - irrelevant for Cloud Bigtable since it does not have a - Write Ahead Log. - """ - if wal is not _WAL_SENTINEL: - _WARN(_WAL_WARNING) - - row_object = self._get_row(row) - # Make sure all the keys are valid before beginning - # to add mutations. - column_pairs = _get_column_pairs(six.iterkeys(data), - require_qualifier=True) - for column_family_id, column_qualifier in column_pairs: - value = data[column_family_id + ':' + column_qualifier] - row_object.set_cell(column_family_id, column_qualifier, - value, timestamp=self._timestamp) - - self._mutation_count += len(data) - self._try_send() - - def _delete_columns(self, columns, row_object): - """Adds delete mutations for a list of columns and column families. - - :type columns: list - :param columns: Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type row_object: :class:`Row ` - :param row_object: The row which will hold the delete mutations. - - :raises: :class:`ValueError ` if the delete - timestamp range is set on the current batch, but a - column family delete is attempted. - """ - column_pairs = _get_column_pairs(columns) - for column_family_id, column_qualifier in column_pairs: - if column_qualifier is None: - if self._delete_range is not None: - raise ValueError('The Cloud Bigtable API does not support ' - 'adding a timestamp to ' - '"DeleteFromFamily" ') - row_object.delete_cells(column_family_id, - columns=row_object.ALL_COLUMNS) - else: - row_object.delete_cell(column_family_id, - column_qualifier, - time_range=self._delete_range) - - def delete(self, row, columns=None, wal=_WAL_SENTINEL): - """Delete data from a row in the table owned by this batch. - - :type row: str - :param row: The row key where the delete will occur. - - :type columns: list - :param columns: (Optional) Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - If not used, will delete the entire row. - - :type wal: object - :param wal: Unused parameter (to over-ride the default on the - instance). Provided for compatibility with HappyBase, but - irrelevant for Cloud Bigtable since it does not have a - Write Ahead Log. - - :raises: If the delete timestamp range is set on the - current batch, but a full row delete is attempted. - """ - if wal is not _WAL_SENTINEL: - _WARN(_WAL_WARNING) - - row_object = self._get_row(row) - - if columns is None: - # Delete entire row. - if self._delete_range is not None: - raise ValueError('The Cloud Bigtable API does not support ' - 'adding a timestamp to "DeleteFromRow" ' - 'mutations') - row_object.delete() - self._mutation_count += 1 - else: - self._delete_columns(columns, row_object) - self._mutation_count += len(columns) - - self._try_send() - - def __enter__(self): - """Enter context manager, no set-up required.""" - return self - - def __exit__(self, exc_type, exc_value, traceback): - """Exit context manager, no set-up required. - - :type exc_type: type - :param exc_type: The type of the exception if one occurred while the - context manager was active. Otherwise, :data:`None`. - - :type exc_value: :class:`Exception ` - :param exc_value: An instance of ``exc_type`` if an exception occurred - while the context was active. - Otherwise, :data:`None`. - - :type traceback: ``traceback`` type - :param traceback: The traceback where the exception occurred (if one - did occur). Otherwise, :data:`None`. - """ - # If the context manager encountered an exception and the batch is - # transactional, we don't commit the mutations. - if self._transaction and exc_type is not None: - return - - # NOTE: For non-transactional batches, this will even commit mutations - # if an error occurred during the context manager. - self.send() - - -def _get_column_pairs(columns, require_qualifier=False): - """Turns a list of column or column families into parsed pairs. - - Turns a column family (``fam`` or ``fam:``) into a pair such - as ``['fam', None]`` and turns a column (``fam:col``) into - ``['fam', 'col']``. - - :type columns: list - :param columns: Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type require_qualifier: bool - :param require_qualifier: Boolean indicating if the columns should - all have a qualifier or not. - - :rtype: list - :returns: List of pairs, where the first element in each pair is the - column family and the second is the column qualifier - (or :data:`None`). - :raises: :class:`ValueError ` if any of the columns - are not of the expected format. - :class:`ValueError ` if - ``require_qualifier`` is :data:`True` and one of the values is - for an entire column family - """ - column_pairs = [] - for column in columns: - if isinstance(column, six.binary_type): - column = column.decode('utf-8') - # Remove trailing colons (i.e. for standalone column family). - if column.endswith(u':'): - column = column[:-1] - num_colons = column.count(u':') - if num_colons == 0: - # column is a column family. - if require_qualifier: - raise ValueError('column does not contain a qualifier', - column) - else: - column_pairs.append([column, None]) - elif num_colons == 1: - column_pairs.append(column.split(u':')) - else: - raise ValueError('Column contains the : separator more than once') - - return column_pairs diff --git a/gcloud/bigtable/happybase/connection.py b/gcloud/bigtable/happybase/connection.py deleted file mode 100644 index c6ba0b2f8f96..000000000000 --- a/gcloud/bigtable/happybase/connection.py +++ /dev/null @@ -1,470 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable HappyBase connection module.""" - - -import datetime -import warnings - -import six - -from gcloud.bigtable.client import Client -from gcloud.bigtable.column_family import GCRuleIntersection -from gcloud.bigtable.column_family import MaxAgeGCRule -from gcloud.bigtable.column_family import MaxVersionsGCRule -from gcloud.bigtable.happybase.table import Table -from gcloud.bigtable.table import Table as _LowLevelTable - - -# Constants reproduced here for HappyBase compatibility, though values -# are all null. -COMPAT_MODES = None -THRIFT_TRANSPORTS = None -THRIFT_PROTOCOLS = None -DEFAULT_HOST = None -DEFAULT_PORT = None -DEFAULT_TRANSPORT = None -DEFAULT_COMPAT = None -DEFAULT_PROTOCOL = None - -_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol')) -_WARN = warnings.warn -_DISABLE_DELETE_MSG = ('The disable argument should not be used in ' - 'delete_table(). Cloud Bigtable has no concept ' - 'of enabled / disabled tables.') - - -def _get_cluster(timeout=None): - """Gets cluster for the default project. - - Creates a client with the inferred credentials and project ID from - the local environment. Then uses - :meth:`.bigtable.client.Client.list_clusters` to - get the unique cluster owned by the project. - - If the request fails for any reason, or if there isn't exactly one cluster - owned by the project, then this function will fail. - - :type timeout: int - :param timeout: (Optional) The socket timeout in milliseconds. - - :rtype: :class:`gcloud.bigtable.cluster.Cluster` - :returns: The unique cluster owned by the project inferred from - the environment. - :raises: :class:`ValueError ` if there is a failed - zone or any number of clusters other than one. - """ - client_kwargs = {'admin': True} - if timeout is not None: - client_kwargs['timeout_seconds'] = timeout / 1000.0 - client = Client(**client_kwargs) - try: - client.start() - clusters, failed_zones = client.list_clusters() - finally: - client.stop() - - if len(failed_zones) != 0: - raise ValueError('Determining cluster via ListClusters encountered ' - 'failed zones.') - if len(clusters) == 0: - raise ValueError('This client doesn\'t have access to any clusters.') - if len(clusters) > 1: - raise ValueError('This client has access to more than one cluster. ' - 'Please directly pass the cluster you\'d ' - 'like to use.') - return clusters[0] - - -class Connection(object): - """Connection to Cloud Bigtable backend. - - .. note:: - - If you pass a ``cluster``, it will be :meth:`.Cluster.copy`-ed before - being stored on the new connection. This also copies the - :class:`Client ` that created the - :class:`Cluster ` instance and the - :class:`Credentials ` stored on the - client. - - The arguments ``host``, ``port``, ``compat``, ``transport`` and - ``protocol`` are allowed (as keyword arguments) for compatibility with - HappyBase. However, they will not be used in any way, and will cause a - warning if passed. - - :type timeout: int - :param timeout: (Optional) The socket timeout in milliseconds. - - :type autoconnect: bool - :param autoconnect: (Optional) Whether the connection should be - :meth:`open`-ed during construction. - - :type table_prefix: str - :param table_prefix: (Optional) Prefix used to construct table names. - - :type table_prefix_separator: str - :param table_prefix_separator: (Optional) Separator used with - ``table_prefix``. Defaults to ``_``. - - :type cluster: :class:`Cluster ` - :param cluster: (Optional) A Cloud Bigtable cluster. The instance also - owns a client for making gRPC requests to the Cloud - Bigtable API. If not passed in, defaults to creating client - with ``admin=True`` and using the ``timeout`` here for the - ``timeout_seconds`` argument to the - :class:`Client ` - constructor. The credentials for the client - will be the implicit ones loaded from the environment. - Then that client is used to retrieve all the clusters - owned by the client's project. - - :type kwargs: dict - :param kwargs: Remaining keyword arguments. Provided for HappyBase - compatibility. - """ - - _cluster = None - - def __init__(self, timeout=None, autoconnect=True, table_prefix=None, - table_prefix_separator='_', cluster=None, **kwargs): - self._handle_legacy_args(kwargs) - if table_prefix is not None: - if not isinstance(table_prefix, six.string_types): - raise TypeError('table_prefix must be a string', 'received', - table_prefix, type(table_prefix)) - - if not isinstance(table_prefix_separator, six.string_types): - raise TypeError('table_prefix_separator must be a string', - 'received', table_prefix_separator, - type(table_prefix_separator)) - - self.table_prefix = table_prefix - self.table_prefix_separator = table_prefix_separator - - if cluster is None: - self._cluster = _get_cluster(timeout=timeout) - else: - if timeout is not None: - raise ValueError('Timeout cannot be used when an existing ' - 'cluster is passed') - self._cluster = cluster.copy() - - if autoconnect: - self.open() - - self._initialized = True - - @staticmethod - def _handle_legacy_args(arguments_dict): - """Check legacy HappyBase arguments and warn if set. - - :type arguments_dict: dict - :param arguments_dict: Unused keyword arguments. - - :raises: :class:`TypeError ` if a keyword other - than ``host``, ``port``, ``compat``, ``transport`` or - ``protocol`` is used. - """ - common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict)) - if common_args: - all_args = ', '.join(common_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by gcloud.' % (all_args,)) - _WARN(message) - for arg_name in common_args: - arguments_dict.pop(arg_name) - if arguments_dict: - unexpected_names = arguments_dict.keys() - raise TypeError('Received unexpected arguments', unexpected_names) - - def open(self): - """Open the underlying transport to Cloud Bigtable. - - This method opens the underlying HTTP/2 gRPC connection using a - :class:`Client ` bound to the - :class:`Cluster ` owned by - this connection. - """ - self._cluster._client.start() - - def close(self): - """Close the underlying transport to Cloud Bigtable. - - This method closes the underlying HTTP/2 gRPC connection using a - :class:`Client ` bound to the - :class:`Cluster ` owned by - this connection. - """ - self._cluster._client.stop() - - def __del__(self): - if self._cluster is not None: - self.close() - - def _table_name(self, name): - """Construct a table name by optionally adding a table name prefix. - - :type name: str - :param name: The name to have a prefix added to it. - - :rtype: str - :returns: The prefixed name, if the current connection has a table - prefix set. - """ - if self.table_prefix is None: - return name - - return self.table_prefix + self.table_prefix_separator + name - - def table(self, name, use_prefix=True): - """Table factory. - - :type name: str - :param name: The name of the table to be created. - - :type use_prefix: bool - :param use_prefix: Whether to use the table prefix (if any). - - :rtype: :class:`Table ` - :returns: Table instance owned by this connection. - """ - if use_prefix: - name = self._table_name(name) - return Table(name, self) - - def tables(self): - """Return a list of table names available to this connection. - - .. note:: - - This lists every table in the cluster owned by this connection, - **not** every table that a given user may have access to. - - .. note:: - - If ``table_prefix`` is set on this connection, only returns the - table names which match that prefix. - - :rtype: list - :returns: List of string table names. - """ - low_level_table_instances = self._cluster.list_tables() - table_names = [table_instance.table_id - for table_instance in low_level_table_instances] - - # Filter using prefix, and strip prefix from names - if self.table_prefix is not None: - prefix = self._table_name('') - offset = len(prefix) - table_names = [name[offset:] for name in table_names - if name.startswith(prefix)] - - return table_names - - def create_table(self, name, families): - """Create a table. - - .. warning:: - - The only column family options from HappyBase that are able to be - used with Cloud Bigtable are ``max_versions`` and ``time_to_live``. - - .. note:: - - This method is **not** atomic. The Cloud Bigtable API separates - the creation of a table from the creation of column families. Thus - this method needs to send 1 request for the table creation and 1 - request for each column family. If any of these fails, the method - will fail, but the progress made towards completion cannot be - rolled back. - - Values in ``families`` represent column family options. In HappyBase, - these are dictionaries, corresponding to the ``ColumnDescriptor`` - structure in the Thrift API. The accepted keys are: - - * ``max_versions`` (``int``) - * ``compression`` (``str``) - * ``in_memory`` (``bool``) - * ``bloom_filter_type`` (``str``) - * ``bloom_filter_vector_size`` (``int``) - * ``bloom_filter_nb_hashes`` (``int``) - * ``block_cache_enabled`` (``bool``) - * ``time_to_live`` (``int``) - - :type name: str - :param name: The name of the table to be created. - - :type families: dict - :param families: Dictionary with column family names as keys and column - family options as the values. The options can be among - - * :class:`dict` - * :class:`.GarbageCollectionRule` - - :raises: :class:`TypeError ` if ``families`` is - not a dictionary, - :class:`ValueError ` if ``families`` - has no entries - """ - if not isinstance(families, dict): - raise TypeError('families arg must be a dictionary') - - if not families: - raise ValueError('Cannot create table %r (no column ' - 'families specified)' % (name,)) - - # Parse all keys before making any API requests. - gc_rule_dict = {} - for column_family_name, option in families.items(): - if isinstance(column_family_name, six.binary_type): - column_family_name = column_family_name.decode('utf-8') - if column_family_name.endswith(':'): - column_family_name = column_family_name[:-1] - gc_rule_dict[column_family_name] = _parse_family_option(option) - - # Create table instance and then make API calls. - name = self._table_name(name) - low_level_table = _LowLevelTable(name, self._cluster) - low_level_table.create() - - for column_family_name, gc_rule in gc_rule_dict.items(): - column_family = low_level_table.column_family( - column_family_name, gc_rule=gc_rule) - column_family.create() - - def delete_table(self, name, disable=False): - """Delete the specified table. - - :type name: str - :param name: The name of the table to be deleted. If ``table_prefix`` - is set, a prefix will be added to the ``name``. - - :type disable: bool - :param disable: Whether to first disable the table if needed. This - is provided for compatibility with HappyBase, but is - not relevant for Cloud Bigtable since it has no concept - of enabled / disabled tables. - """ - if disable: - _WARN(_DISABLE_DELETE_MSG) - - name = self._table_name(name) - _LowLevelTable(name, self._cluster).delete() - - def enable_table(self, name): - """Enable the specified table. - - .. warning:: - - Cloud Bigtable has no concept of enabled / disabled tables so this - method does not work. It is provided simply for compatibility. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API has no concept of ' - 'enabled or disabled tables.') - - def disable_table(self, name): - """Disable the specified table. - - .. warning:: - - Cloud Bigtable has no concept of enabled / disabled tables so this - method does not work. It is provided simply for compatibility. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API has no concept of ' - 'enabled or disabled tables.') - - def is_table_enabled(self, name): - """Return whether the specified table is enabled. - - .. warning:: - - Cloud Bigtable has no concept of enabled / disabled tables so this - method does not work. It is provided simply for compatibility. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API has no concept of ' - 'enabled or disabled tables.') - - def compact_table(self, name, major=False): - """Compact the specified table. - - .. warning:: - - Cloud Bigtable does not support compacting a table, so this - method does not work. It is provided simply for compatibility. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API does not support ' - 'compacting a table.') - - -def _parse_family_option(option): - """Parses a column family option into a garbage collection rule. - - .. note:: - - If ``option`` is not a dictionary, the type is not checked. - If ``option`` is :data:`None`, there is nothing to do, since this - is the correct output. - - :type option: :class:`dict`, - :data:`NoneType `, - :class:`.GarbageCollectionRule` - :param option: A column family option passes as a dictionary value in - :meth:`Connection.create_table`. - - :rtype: :class:`.GarbageCollectionRule` - :returns: A garbage collection rule parsed from the input. - """ - result = option - if isinstance(result, dict): - if not set(result.keys()) <= set(['max_versions', 'time_to_live']): - all_keys = ', '.join(repr(key) for key in result.keys()) - warning_msg = ('Cloud Bigtable only supports max_versions and ' - 'time_to_live column family settings. ' - 'Received: %s' % (all_keys,)) - _WARN(warning_msg) - - max_num_versions = result.get('max_versions') - max_age = None - if 'time_to_live' in result: - max_age = datetime.timedelta(seconds=result['time_to_live']) - - versions_rule = age_rule = None - if max_num_versions is not None: - versions_rule = MaxVersionsGCRule(max_num_versions) - if max_age is not None: - age_rule = MaxAgeGCRule(max_age) - - if versions_rule is None: - result = age_rule - else: - if age_rule is None: - result = versions_rule - else: - result = GCRuleIntersection(rules=[age_rule, versions_rule]) - - return result diff --git a/gcloud/bigtable/happybase/pool.py b/gcloud/bigtable/happybase/pool.py deleted file mode 100644 index ab84724740a2..000000000000 --- a/gcloud/bigtable/happybase/pool.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable HappyBase pool module.""" - - -import contextlib -import threading - -import six - -from gcloud.bigtable.happybase.connection import Connection -from gcloud.bigtable.happybase.connection import _get_cluster - - -_MIN_POOL_SIZE = 1 -"""Minimum allowable size of a connection pool.""" - - -class NoConnectionsAvailable(RuntimeError): - """Exception raised when no connections are available. - - This happens if a timeout was specified when obtaining a connection, - and no connection became available within the specified timeout. - """ - - -class ConnectionPool(object): - """Thread-safe connection pool. - - .. note:: - - All keyword arguments are passed unmodified to the - :class:`Connection <.happybase.connection.Connection>` constructor - **except** for ``autoconnect``. This is because the ``open`` / - ``closed`` status of a connection is managed by the pool. In addition, - if ``cluster`` is not passed, the default / inferred cluster is - determined by the pool and then passed to each - :class:`Connection <.happybase.connection.Connection>` that is created. - - :type size: int - :param size: The maximum number of concurrently open connections. - - :type kwargs: dict - :param kwargs: Keyword arguments passed to - :class:`Connection <.happybase.Connection>` - constructor. - - :raises: :class:`TypeError ` if ``size`` - is non an integer. - :class:`ValueError ` if ``size`` - is not positive. - """ - def __init__(self, size, **kwargs): - if not isinstance(size, six.integer_types): - raise TypeError('Pool size arg must be an integer') - - if size < _MIN_POOL_SIZE: - raise ValueError('Pool size must be positive') - - self._lock = threading.Lock() - self._queue = six.moves.queue.LifoQueue(maxsize=size) - self._thread_connections = threading.local() - - connection_kwargs = kwargs - connection_kwargs['autoconnect'] = False - if 'cluster' not in connection_kwargs: - connection_kwargs['cluster'] = _get_cluster( - timeout=kwargs.get('timeout')) - - for _ in six.moves.range(size): - connection = Connection(**connection_kwargs) - self._queue.put(connection) - - def _acquire_connection(self, timeout=None): - """Acquire a connection from the pool. - - :type timeout: int - :param timeout: (Optional) Time (in seconds) to wait for a connection - to open. - - :rtype: :class:`Connection <.happybase.Connection>` - :returns: An active connection from the queue stored on the pool. - :raises: :class:`NoConnectionsAvailable` if ``Queue.get`` fails - before the ``timeout`` (only if a timeout is specified). - """ - try: - return self._queue.get(block=True, timeout=timeout) - except six.moves.queue.Empty: - raise NoConnectionsAvailable('No connection available from pool ' - 'within specified timeout') - - @contextlib.contextmanager - def connection(self, timeout=None): - """Obtain a connection from the pool. - - Must be used as a context manager, for example:: - - with pool.connection() as connection: - pass # do something with the connection - - If ``timeout`` is omitted, this method waits forever for a connection - to become available from the local queue. - - :type timeout: int - :param timeout: (Optional) Time (in seconds) to wait for a connection - to open. - - :rtype: :class:`Connection <.happybase.connection.Connection>` - :returns: An active connection from the pool. - :raises: :class:`NoConnectionsAvailable` if no connection can be - retrieved from the pool before the ``timeout`` (only if - a timeout is specified). - """ - connection = getattr(self._thread_connections, 'current', None) - - retrieved_new_cnxn = False - if connection is None: - # In this case we need to actually grab a connection from the - # pool. After retrieval, the connection is stored on a thread - # local so that nested connection requests from the same - # thread can re-use the same connection instance. - # - # NOTE: This code acquires a lock before assigning to the - # thread local; see - # ('https://emptysqua.re/blog/' - # 'another-thing-about-pythons-threadlocals/') - retrieved_new_cnxn = True - connection = self._acquire_connection(timeout) - with self._lock: - self._thread_connections.current = connection - - # This is a no-op for connections that have already been opened - # since they just call Client.start(). - connection.open() - yield connection - - # Remove thread local reference after the outermost 'with' block - # ends. Afterwards the thread no longer owns the connection. - if retrieved_new_cnxn: - del self._thread_connections.current - self._queue.put(connection) diff --git a/gcloud/bigtable/happybase/table.py b/gcloud/bigtable/happybase/table.py deleted file mode 100644 index bf018ad1647f..000000000000 --- a/gcloud/bigtable/happybase/table.py +++ /dev/null @@ -1,967 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Bigtable HappyBase table module.""" - - -import struct -import warnings - -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud._helpers import _microseconds_from_datetime -from gcloud._helpers import _to_bytes -from gcloud._helpers import _total_seconds -from gcloud.bigtable.column_family import GCRuleIntersection -from gcloud.bigtable.column_family import MaxAgeGCRule -from gcloud.bigtable.column_family import MaxVersionsGCRule -from gcloud.bigtable.happybase.batch import _get_column_pairs -from gcloud.bigtable.happybase.batch import _WAL_SENTINEL -from gcloud.bigtable.happybase.batch import Batch -from gcloud.bigtable.row_filters import CellsColumnLimitFilter -from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter -from gcloud.bigtable.row_filters import FamilyNameRegexFilter -from gcloud.bigtable.row_filters import RowFilterChain -from gcloud.bigtable.row_filters import RowFilterUnion -from gcloud.bigtable.row_filters import RowKeyRegexFilter -from gcloud.bigtable.row_filters import TimestampRange -from gcloud.bigtable.row_filters import TimestampRangeFilter -from gcloud.bigtable.table import Table as _LowLevelTable - - -_WARN = warnings.warn -_UNPACK_I64 = struct.Struct('>q').unpack -_SIMPLE_GC_RULES = (MaxAgeGCRule, MaxVersionsGCRule) - - -def make_row(cell_map, include_timestamp): - """Make a row dict for a Thrift cell mapping. - - .. warning:: - - This method is only provided for HappyBase compatibility, but does not - actually work. - - :type cell_map: dict - :param cell_map: Dictionary with ``fam:col`` strings as keys and ``TCell`` - instances as values. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API output is not the same ' - 'as the output from the Thrift server, so this ' - 'helper can not be implemented.', 'Called with', - cell_map, include_timestamp) - - -def make_ordered_row(sorted_columns, include_timestamp): - """Make a row dict for sorted Thrift column results from scans. - - .. warning:: - - This method is only provided for HappyBase compatibility, but does not - actually work. - - :type sorted_columns: list - :param sorted_columns: List of ``TColumn`` instances from Thrift. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API output is not the same ' - 'as the output from the Thrift server, so this ' - 'helper can not be implemented.', 'Called with', - sorted_columns, include_timestamp) - - -class Table(object): - """Representation of Cloud Bigtable table. - - Used for adding data and - - :type name: str - :param name: The name of the table. - - :type connection: :class:`Connection <.happybase.connection.Connection>` - :param connection: The connection which has access to the table. - """ - - def __init__(self, name, connection): - self.name = name - # This remains as legacy for HappyBase, but only the cluster - # from the connection is needed. - self.connection = connection - self._low_level_table = None - if self.connection is not None: - self._low_level_table = _LowLevelTable(self.name, - self.connection._cluster) - - def __repr__(self): - return '' % (self.name,) - - def families(self): - """Retrieve the column families for this table. - - :rtype: dict - :returns: Mapping from column family name to garbage collection rule - for a column family. - """ - column_family_map = self._low_level_table.list_column_families() - result = {} - for col_fam, col_fam_obj in six.iteritems(column_family_map): - result[col_fam] = _gc_rule_to_dict(col_fam_obj.gc_rule) - return result - - def regions(self): - """Retrieve the regions for this table. - - .. warning:: - - Cloud Bigtable does not give information about how a table is laid - out in memory, so this method does not work. It is - provided simply for compatibility. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('The Cloud Bigtable API does not have a ' - 'concept of splitting a table into regions.') - - def row(self, row, columns=None, timestamp=None, include_timestamp=False): - """Retrieve a single row of data. - - Returns the latest cells in each column (or all columns if ``columns`` - is not specified). If a ``timestamp`` is set, then **latest** becomes - **latest** up until ``timestamp``. - - :type row: str - :param row: Row key for the row we are reading from. - - :type columns: list - :param columns: (Optional) Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). If specified, only cells returned before the - the timestamp will be returned. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :rtype: dict - :returns: Dictionary containing all the latest column values in - the row. - """ - filters = [] - if columns is not None: - filters.append(_columns_filter_helper(columns)) - # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) - - partial_row_data = self._low_level_table.read_row( - row, filter_=filter_) - if partial_row_data is None: - return {} - - return _partial_row_to_dict(partial_row_data, - include_timestamp=include_timestamp) - - def rows(self, rows, columns=None, timestamp=None, - include_timestamp=False): - """Retrieve multiple rows of data. - - All optional arguments behave the same in this method as they do in - :meth:`row`. - - :type rows: list - :param rows: Iterable of the row keys for the rows we are reading from. - - :type columns: list - :param columns: (Optional) Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). If specified, only cells returned before (or - at) the timestamp will be returned. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :rtype: list - :returns: A list of pairs, where the first is the row key and the - second is a dictionary with the filtered values returned. - """ - if not rows: - # Avoid round-trip if the result is empty anyway - return [] - - filters = [] - if columns is not None: - filters.append(_columns_filter_helper(columns)) - filters.append(_row_keys_filter_helper(rows)) - # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) - - partial_rows_data = self._low_level_table.read_rows(filter_=filter_) - # NOTE: We could use max_loops = 1000 or some similar value to ensure - # that the stream isn't open too long. - partial_rows_data.consume_all() - - result = [] - for row_key in rows: - if row_key not in partial_rows_data.rows: - continue - curr_row_data = partial_rows_data.rows[row_key] - curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) - result.append((row_key, curr_row_dict)) - - return result - - def cells(self, row, column, versions=None, timestamp=None, - include_timestamp=False): - """Retrieve multiple versions of a single cell from the table. - - :type row: str - :param row: Row key for the row we are reading from. - - :type column: str - :param column: Column we are reading from; of the form ``fam:col``. - - :type versions: int - :param versions: (Optional) The maximum number of cells to return. If - not set, returns all cells found. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). If specified, only cells returned before (or - at) the timestamp will be returned. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :rtype: list - :returns: List of values in the cell (with timestamps if - ``include_timestamp`` is :data:`True`). - """ - filter_ = _filter_chain_helper(column=column, versions=versions, - timestamp=timestamp) - partial_row_data = self._low_level_table.read_row(row, filter_=filter_) - if partial_row_data is None: - return [] - else: - cells = partial_row_data._cells - # We know that `_filter_chain_helper` has already verified that - # column will split as such. - column_family_id, column_qualifier = column.split(':') - # NOTE: We expect the only key in `cells` is `column_family_id` - # and the only key `cells[column_family_id]` is - # `column_qualifier`. But we don't check that this is true. - curr_cells = cells[column_family_id][column_qualifier] - return _cells_to_pairs( - curr_cells, include_timestamp=include_timestamp) - - def scan(self, row_start=None, row_stop=None, row_prefix=None, - columns=None, filter=None, timestamp=None, - include_timestamp=False, limit=None, **kwargs): - """Create a scanner for data in this table. - - This method returns a generator that can be used for looping over the - matching rows. - - If ``row_prefix`` is specified, only rows with row keys matching the - prefix will be returned. If given, ``row_start`` and ``row_stop`` - cannot be used. - - .. note:: - - Both ``row_start`` and ``row_stop`` can be :data:`None` to specify - the start and the end of the table respectively. If both are - omitted, a full table scan is done. Note that this usually results - in severe performance problems. - - The arguments ``batch_size``, ``scan_batching`` and ``sorted_columns`` - are allowed (as keyword arguments) for compatibility with - HappyBase. However, they will not be used in any way, and will cause a - warning if passed. (The ``batch_size`` determines the number of - results to retrieve per request. The HBase scanner defaults to reading - one record at a time, so this argument allows HappyBase to increase - that number. However, the Cloud Bigtable API uses HTTP/2 streaming so - there is no concept of a batched scan. The ``sorted_columns`` flag - tells HBase to return columns in order, but Cloud Bigtable doesn't - have this feature.) - - :type row_start: str - :param row_start: (Optional) Row key where the scanner should start - (includes ``row_start``). If not specified, reads - from the first key. If the table does not contain - ``row_start``, it will start from the next key after - it that **is** contained in the table. - - :type row_stop: str - :param row_stop: (Optional) Row key where the scanner should stop - (excludes ``row_stop``). If not specified, reads - until the last key. The table does not have to contain - ``row_stop``. - - :type row_prefix: str - :param row_prefix: (Optional) Prefix to match row keys. - - :type columns: list - :param columns: (Optional) Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type filter: :class:`RowFilter ` - :param filter: (Optional) An additional filter (beyond column and - row range filters supported here). HappyBase / HBase - users will have used this as an HBase filter string. See - http://hbase.apache.org/0.94/book/thrift.html - for more details on those filters. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). If specified, only cells returned before (or - at) the timestamp will be returned. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :type limit: int - :param limit: (Optional) Maximum number of rows to return. - - :type kwargs: dict - :param kwargs: Remaining keyword arguments. Provided for HappyBase - compatibility. - - :raises: If ``limit`` is set but non-positive, or if ``row_prefix`` is - used with row start/stop, - :class:`TypeError ` if a string - ``filter`` is used. - """ - legacy_args = [] - for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): - if kw_name in kwargs: - legacy_args.append(kw_name) - kwargs.pop(kw_name) - if legacy_args: - legacy_args = ', '.join(legacy_args) - message = ('The HappyBase legacy arguments %s were used. These ' - 'arguments are unused by gcloud.' % (legacy_args,)) - _WARN(message) - if kwargs: - raise TypeError('Received unexpected arguments', kwargs.keys()) - - if limit is not None and limit < 1: - raise ValueError('limit must be positive') - if row_prefix is not None: - if row_start is not None or row_stop is not None: - raise ValueError('row_prefix cannot be combined with ' - 'row_start or row_stop') - row_start = row_prefix - row_stop = _string_successor(row_prefix) - - filters = [] - if isinstance(filter, six.string_types): - raise TypeError('Specifying filters as a string is not supported ' - 'by Cloud Bigtable. Use a ' - 'gcloud.bigtable.row.RowFilter instead.') - elif filter is not None: - filters.append(filter) - - if columns is not None: - filters.append(_columns_filter_helper(columns)) - # versions == 1 since we only want the latest. - filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, - filters=filters) - - partial_rows_data = self._low_level_table.read_rows( - start_key=row_start, end_key=row_stop, - limit=limit, filter_=filter_) - - # Mutable copy of data. - rows_dict = partial_rows_data.rows - while True: - try: - partial_rows_data.consume_next() - row_key, curr_row_data = rows_dict.popitem() - # NOTE: We expect len(rows_dict) == 0, but don't check it. - curr_row_dict = _partial_row_to_dict( - curr_row_data, include_timestamp=include_timestamp) - yield (row_key, curr_row_dict) - except StopIteration: - break - - def put(self, row, data, timestamp=None, wal=_WAL_SENTINEL): - """Insert data into a row in this table. - - .. note:: - - This method will send a request with a single "put" mutation. - In many situations, :meth:`batch` is a more appropriate - method to manipulate data since it helps combine many mutations - into a single request. - - :type row: str - :param row: The row key where the mutation will be "put". - - :type data: dict - :param data: Dictionary containing the data to be inserted. The keys - are columns names (of the form ``fam:col``) and the values - are strings (bytes) to be stored in those columns. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch) that the mutation will be applied at. - - :type wal: object - :param wal: Unused parameter (to be passed to a created batch). - Provided for compatibility with HappyBase, but irrelevant - for Cloud Bigtable since it does not have a Write Ahead - Log. - """ - with self.batch(timestamp=timestamp, wal=wal) as batch: - batch.put(row, data) - - def delete(self, row, columns=None, timestamp=None, wal=_WAL_SENTINEL): - """Delete data from a row in this table. - - This method deletes the entire ``row`` if ``columns`` is not - specified. - - .. note:: - - This method will send a request with a single delete mutation. - In many situations, :meth:`batch` is a more appropriate - method to manipulate data since it helps combine many mutations - into a single request. - - :type row: str - :param row: The row key where the delete will occur. - - :type columns: list - :param columns: (Optional) Iterable containing column names (as - strings). Each column name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch) that the mutation will be applied at. - - :type wal: object - :param wal: Unused parameter (to be passed to a created batch). - Provided for compatibility with HappyBase, but irrelevant - for Cloud Bigtable since it does not have a Write Ahead - Log. - """ - with self.batch(timestamp=timestamp, wal=wal) as batch: - batch.delete(row, columns) - - def batch(self, timestamp=None, batch_size=None, transaction=False, - wal=_WAL_SENTINEL): - """Create a new batch operation for this table. - - This method returns a new - :class:`Batch <.happybase.batch.Batch>` instance that can be - used for mass data manipulation. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch) that all mutations will be applied at. - - :type batch_size: int - :param batch_size: (Optional) The maximum number of mutations to allow - to accumulate before committing them. - - :type transaction: bool - :param transaction: Flag indicating if the mutations should be sent - transactionally or not. If ``transaction=True`` and - an error occurs while a - :class:`Batch <.happybase.batch.Batch>` is - active, then none of the accumulated mutations will - be committed. If ``batch_size`` is set, the - mutation can't be transactional. - - :type wal: object - :param wal: Unused parameter (to be passed to the created batch). - Provided for compatibility with HappyBase, but irrelevant - for Cloud Bigtable since it does not have a Write Ahead - Log. - - :rtype: :class:`Batch ` - :returns: A batch bound to this table. - """ - return Batch(self, timestamp=timestamp, batch_size=batch_size, - transaction=transaction, wal=wal) - - def counter_get(self, row, column): - """Retrieve the current value of a counter column. - - This method retrieves the current value of a counter column. If the - counter column does not exist, this function initializes it to ``0``. - - .. note:: - - Application code should **never** store a counter value directly; - use the atomic :meth:`counter_inc` and :meth:`counter_dec` methods - for that. - - :type row: str - :param row: Row key for the row we are getting a counter from. - - :type column: str - :param column: Column we are ``get``-ing from; of the form ``fam:col``. - - :rtype: int - :returns: Counter value (after initializing / incrementing by 0). - """ - # Don't query directly, but increment with value=0 so that the counter - # is correctly initialized if didn't exist yet. - return self.counter_inc(row, column, value=0) - - def counter_set(self, row, column, value=0): - """Set a counter column to a specific value. - - This method is provided in HappyBase, but we do not provide it here - because it defeats the purpose of using atomic increment and decrement - of a counter. - - :type row: str - :param row: Row key for the row we are setting a counter in. - - :type column: str - :param column: Column we are setting a value in; of - the form ``fam:col``. - - :type value: int - :param value: Value to set the counter to. - - :raises: :class:`NotImplementedError ` - always - """ - raise NotImplementedError('Table.counter_set will not be implemented. ' - 'Instead use the increment/decrement ' - 'methods along with counter_get.') - - def counter_inc(self, row, column, value=1): - """Atomically increment a counter column. - - This method atomically increments a counter column in ``row``. - If the counter column does not exist, it is automatically initialized - to ``0`` before being incremented. - - :type row: str - :param row: Row key for the row we are incrementing a counter in. - - :type column: str - :param column: Column we are incrementing a value in; of the - form ``fam:col``. - - :type value: int - :param value: Amount to increment the counter by. (If negative, - this is equivalent to decrement.) - - :rtype: int - :returns: Counter value after incrementing. - """ - row = self._low_level_table.row(row, append=True) - if isinstance(column, six.binary_type): - column = column.decode('utf-8') - column_family_id, column_qualifier = column.split(':') - row.increment_cell_value(column_family_id, column_qualifier, value) - # See AppendRow.commit() will return a dictionary: - # { - # u'col-fam-id': { - # b'col-name1': [ - # (b'cell-val', datetime.datetime(...)), - # ... - # ], - # ... - # }, - # } - modified_cells = row.commit() - # Get the cells in the modified column, - column_cells = modified_cells[column_family_id][column_qualifier] - # Make sure there is exactly one cell in the column. - if len(column_cells) != 1: - raise ValueError('Expected server to return one modified cell.') - column_cell = column_cells[0] - # Get the bytes value from the column and convert it to an integer. - bytes_value = column_cell[0] - int_value, = _UNPACK_I64(bytes_value) - return int_value - - def counter_dec(self, row, column, value=1): - """Atomically decrement a counter column. - - This method atomically decrements a counter column in ``row``. - If the counter column does not exist, it is automatically initialized - to ``0`` before being decremented. - - :type row: str - :param row: Row key for the row we are decrementing a counter in. - - :type column: str - :param column: Column we are decrementing a value in; of the - form ``fam:col``. - - :type value: int - :param value: Amount to decrement the counter by. (If negative, - this is equivalent to increment.) - - :rtype: int - :returns: Counter value after decrementing. - """ - return self.counter_inc(row, column, -value) - - -def _gc_rule_to_dict(gc_rule): - """Converts garbage collection rule to dictionary if possible. - - This is in place to support dictionary values as was done - in HappyBase, which has somewhat different garbage collection rule - settings for column families. - - Only does this if the garbage collection rule is: - - * :class:`gcloud.bigtable.column_family.MaxAgeGCRule` - * :class:`gcloud.bigtable.column_family.MaxVersionsGCRule` - * Composite :class:`gcloud.bigtable.column_family.GCRuleIntersection` - with two rules, one each of type - :class:`gcloud.bigtable.column_family.MaxAgeGCRule` and - :class:`gcloud.bigtable.column_family.MaxVersionsGCRule` - - Otherwise, just returns the input without change. - - :type gc_rule: :data:`NoneType `, - :class:`.GarbageCollectionRule` - :param gc_rule: A garbage collection rule to convert to a dictionary - (if possible). - - :rtype: dict or - :class:`gcloud.bigtable.column_family.GarbageCollectionRule` - :returns: The converted garbage collection rule. - """ - result = gc_rule - if gc_rule is None: - result = {} - elif isinstance(gc_rule, MaxAgeGCRule): - result = {'time_to_live': _total_seconds(gc_rule.max_age)} - elif isinstance(gc_rule, MaxVersionsGCRule): - result = {'max_versions': gc_rule.max_num_versions} - elif isinstance(gc_rule, GCRuleIntersection): - if len(gc_rule.rules) == 2: - rule1, rule2 = gc_rule.rules - if (isinstance(rule1, _SIMPLE_GC_RULES) and - isinstance(rule2, _SIMPLE_GC_RULES)): - rule1 = _gc_rule_to_dict(rule1) - rule2 = _gc_rule_to_dict(rule2) - key1, = rule1.keys() - key2, = rule2.keys() - if key1 != key2: - result = {key1: rule1[key1], key2: rule2[key2]} - return result - - -def _next_char(str_val, index): - """Gets the next character based on a position in a string. - - :type str_val: str - :param str_val: A string containing the character to update. - - :type index: int - :param index: An integer index in ``str_val``. - - :rtype: str - :returns: The next character after the character at ``index`` - in ``str_val``. - """ - ord_val = six.indexbytes(str_val, index) - return _to_bytes(chr(ord_val + 1), encoding='latin-1') - - -def _string_successor(str_val): - """Increment and truncate a byte string. - - Determines shortest string that sorts after the given string when - compared using regular string comparison semantics. - - Modeled after implementation in ``gcloud-golang``. - - Increments the last byte that is smaller than ``0xFF``, and - drops everything after it. If the string only contains ``0xFF`` bytes, - ``''`` is returned. - - :type str_val: str - :param str_val: String to increment. - - :rtype: str - :returns: The next string in lexical order after ``str_val``. - """ - str_val = _to_bytes(str_val, encoding='latin-1') - if str_val == b'': - return str_val - - index = len(str_val) - 1 - while index >= 0: - if six.indexbytes(str_val, index) != 0xff: - break - index -= 1 - - if index == -1: - return b'' - - return str_val[:index] + _next_char(str_val, index) - - -def _convert_to_time_range(timestamp=None): - """Create a timestamp range from an HBase / HappyBase timestamp. - - HBase uses timestamp as an argument to specify an exclusive end - deadline. Cloud Bigtable also uses exclusive end times, so - the behavior matches. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). Intended to be used as the end of an HBase - time range, which is exclusive. - - :rtype: :class:`gcloud.bigtable.row.TimestampRange`, - :data:`NoneType ` - :returns: The timestamp range corresponding to the passed in - ``timestamp``. - """ - if timestamp is None: - return None - - next_timestamp = _datetime_from_microseconds(1000 * timestamp) - return TimestampRange(end=next_timestamp) - - -def _cells_to_pairs(cells, include_timestamp=False): - """Converts list of cells to HappyBase format. - - For example:: - - >>> import datetime - >>> from gcloud.bigtable.row_data import Cell - >>> cell1 = Cell(b'val1', datetime.datetime.utcnow()) - >>> cell2 = Cell(b'val2', datetime.datetime.utcnow()) - >>> _cells_to_pairs([cell1, cell2]) - [b'val1', b'val2'] - >>> _cells_to_pairs([cell1, cell2], include_timestamp=True) - [(b'val1', 1456361486255), (b'val2', 1456361491927)] - - :type cells: list - :param cells: List of :class:`gcloud.bigtable.row_data.Cell` returned - from a read request. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :rtype: list - :returns: List of values in the cell. If ``include_timestamp=True``, each - value will be a pair, with the first part the bytes value in - the cell and the second part the number of milliseconds in the - timestamp on the cell. - """ - result = [] - for cell in cells: - if include_timestamp: - ts_millis = _microseconds_from_datetime(cell.timestamp) // 1000 - result.append((cell.value, ts_millis)) - else: - result.append(cell.value) - return result - - -def _partial_row_to_dict(partial_row_data, include_timestamp=False): - """Convert a low-level row data object to a dictionary. - - Assumes only the latest value in each row is needed. This assumption - is due to the fact that this method is used by callers which use - a ``CellsColumnLimitFilter(1)`` filter. - - For example:: - - >>> import datetime - >>> from gcloud.bigtable.row_data import Cell, PartialRowData - >>> cell1 = Cell(b'val1', datetime.datetime.utcnow()) - >>> cell2 = Cell(b'val2', datetime.datetime.utcnow()) - >>> row_data = PartialRowData(b'row-key') - >>> _partial_row_to_dict(row_data) - {} - >>> row_data._cells[u'fam1'] = {b'col1': [cell1], b'col2': [cell2]} - >>> _partial_row_to_dict(row_data) - {b'fam1:col2': b'val2', b'fam1:col1': b'val1'} - >>> _partial_row_to_dict(row_data, include_timestamp=True) - {b'fam1:col2': (b'val2', 1456361724480), - b'fam1:col1': (b'val1', 1456361721135)} - - :type partial_row_data: :class:`.row_data.PartialRowData` - :param partial_row_data: Row data consumed from a stream. - - :type include_timestamp: bool - :param include_timestamp: Flag to indicate if cell timestamps should be - included with the output. - - :rtype: dict - :returns: The row data converted to a dictionary. - """ - result = {} - for column, cells in six.iteritems(partial_row_data.to_dict()): - cell_vals = _cells_to_pairs(cells, - include_timestamp=include_timestamp) - # NOTE: We assume there is exactly 1 version since we used that in - # our filter, but we don't check this. - result[column] = cell_vals[0] - return result - - -def _filter_chain_helper(column=None, versions=None, timestamp=None, - filters=None): - """Create filter chain to limit a results set. - - :type column: str - :param column: (Optional) The column (``fam:col``) to be selected - with the filter. - - :type versions: int - :param versions: (Optional) The maximum number of cells to return. - - :type timestamp: int - :param timestamp: (Optional) Timestamp (in milliseconds since the - epoch). If specified, only cells returned before (or - at) the timestamp will be matched. - - :type filters: list - :param filters: (Optional) List of existing filters to be extended. - - :rtype: :class:`RowFilter ` - :returns: The chained filter created, or just a single filter if only - one was needed. - :raises: :class:`ValueError ` if there are no - filters to chain. - """ - if filters is None: - filters = [] - - if column is not None: - if isinstance(column, six.binary_type): - column = column.decode('utf-8') - column_family_id, column_qualifier = column.split(':') - fam_filter = FamilyNameRegexFilter(column_family_id) - qual_filter = ColumnQualifierRegexFilter(column_qualifier) - filters.extend([fam_filter, qual_filter]) - if versions is not None: - filters.append(CellsColumnLimitFilter(versions)) - time_range = _convert_to_time_range(timestamp=timestamp) - if time_range is not None: - filters.append(TimestampRangeFilter(time_range)) - - num_filters = len(filters) - if num_filters == 0: - raise ValueError('Must have at least one filter.') - elif num_filters == 1: - return filters[0] - else: - return RowFilterChain(filters=filters) - - -def _columns_filter_helper(columns): - """Creates a union filter for a list of columns. - - :type columns: list - :param columns: Iterable containing column names (as strings). Each column - name can be either - - * an entire column family: ``fam`` or ``fam:`` - * a single column: ``fam:col`` - - :rtype: :class:`RowFilter ` - :returns: The union filter created containing all of the matched columns. - :raises: :class:`ValueError ` if there are no - filters to union. - """ - filters = [] - for column_family_id, column_qualifier in _get_column_pairs(columns): - fam_filter = FamilyNameRegexFilter(column_family_id) - if column_qualifier is not None: - qual_filter = ColumnQualifierRegexFilter(column_qualifier) - combined_filter = RowFilterChain( - filters=[fam_filter, qual_filter]) - filters.append(combined_filter) - else: - filters.append(fam_filter) - - num_filters = len(filters) - if num_filters == 0: - raise ValueError('Must have at least one filter.') - elif num_filters == 1: - return filters[0] - else: - return RowFilterUnion(filters=filters) - - -def _row_keys_filter_helper(row_keys): - """Creates a union filter for a list of rows. - - :type row_keys: list - :param row_keys: Iterable containing row keys (as strings). - - :rtype: :class:`RowFilter ` - :returns: The union filter created containing all of the row keys. - :raises: :class:`ValueError ` if there are no - filters to union. - """ - filters = [] - for row_key in row_keys: - filters.append(RowKeyRegexFilter(row_key)) - - num_filters = len(filters) - if num_filters == 0: - raise ValueError('Must have at least one filter.') - elif num_filters == 1: - return filters[0] - else: - return RowFilterUnion(filters=filters) diff --git a/gcloud/bigtable/happybase/test_batch.py b/gcloud/bigtable/happybase/test_batch.py deleted file mode 100644 index cf2156f226b4..000000000000 --- a/gcloud/bigtable/happybase/test_batch.py +++ /dev/null @@ -1,568 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class _SendMixin(object): - - _send_called = False - - def send(self): - self._send_called = True - - -class TestBatch(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.happybase.batch import Batch - return Batch - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - table = object() - batch = self._makeOne(table) - self.assertEqual(batch._table, table) - self.assertEqual(batch._batch_size, None) - self.assertEqual(batch._timestamp, None) - self.assertEqual(batch._delete_range, None) - self.assertEqual(batch._transaction, False) - self.assertEqual(batch._row_map, {}) - self.assertEqual(batch._mutation_count, 0) - - def test_constructor_explicit(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable.row_filters import TimestampRange - - table = object() - timestamp = 144185290431 - batch_size = 42 - transaction = False # Must be False when batch_size is non-null - - batch = self._makeOne(table, timestamp=timestamp, - batch_size=batch_size, transaction=transaction) - self.assertEqual(batch._table, table) - self.assertEqual(batch._batch_size, batch_size) - self.assertEqual(batch._timestamp, - _datetime_from_microseconds(1000 * timestamp)) - - next_timestamp = _datetime_from_microseconds(1000 * (timestamp + 1)) - time_range = TimestampRange(end=next_timestamp) - self.assertEqual(batch._delete_range, time_range) - self.assertEqual(batch._transaction, transaction) - self.assertEqual(batch._row_map, {}) - self.assertEqual(batch._mutation_count, 0) - - def test_constructor_with_non_default_wal(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import batch as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - table = object() - wal = object() - with _Monkey(MUT, _WARN=mock_warn): - self._makeOne(table, wal=wal) - - self.assertEqual(warned, [MUT._WAL_WARNING]) - - def test_constructor_with_non_positive_batch_size(self): - table = object() - batch_size = -10 - with self.assertRaises(ValueError): - self._makeOne(table, batch_size=batch_size) - batch_size = 0 - with self.assertRaises(ValueError): - self._makeOne(table, batch_size=batch_size) - - def test_constructor_with_batch_size_and_transactional(self): - table = object() - batch_size = 1 - transaction = True - with self.assertRaises(TypeError): - self._makeOne(table, batch_size=batch_size, - transaction=transaction) - - def test_send(self): - table = object() - batch = self._makeOne(table) - - batch._row_map = row_map = _MockRowMap() - row_map['row-key1'] = row1 = _MockRow() - row_map['row-key2'] = row2 = _MockRow() - batch._mutation_count = 1337 - - self.assertEqual(row_map.clear_count, 0) - self.assertEqual(row1.commits, 0) - self.assertEqual(row2.commits, 0) - self.assertNotEqual(batch._mutation_count, 0) - self.assertNotEqual(row_map, {}) - - batch.send() - self.assertEqual(row_map.clear_count, 1) - self.assertEqual(row1.commits, 1) - self.assertEqual(row2.commits, 1) - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(row_map, {}) - - def test__try_send_no_batch_size(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch = BatchWithSend(table) - - self.assertEqual(batch._batch_size, None) - self.assertFalse(batch._send_called) - batch._try_send() - self.assertFalse(batch._send_called) - - def test__try_send_too_few_mutations(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch_size = 10 - batch = BatchWithSend(table, batch_size=batch_size) - - self.assertEqual(batch._batch_size, batch_size) - self.assertFalse(batch._send_called) - mutation_count = 2 - batch._mutation_count = mutation_count - self.assertTrue(mutation_count < batch_size) - batch._try_send() - self.assertFalse(batch._send_called) - - def test__try_send_actual_send(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch_size = 10 - batch = BatchWithSend(table, batch_size=batch_size) - - self.assertEqual(batch._batch_size, batch_size) - self.assertFalse(batch._send_called) - mutation_count = 12 - batch._mutation_count = mutation_count - self.assertTrue(mutation_count > batch_size) - batch._try_send() - self.assertTrue(batch._send_called) - - def test__get_row_exists(self): - table = object() - batch = self._makeOne(table) - - row_key = 'row-key' - row_obj = object() - batch._row_map[row_key] = row_obj - result = batch._get_row(row_key) - self.assertEqual(result, row_obj) - - def test__get_row_create_new(self): - # Make mock batch and make sure we can create a low-level table. - low_level_table = _MockLowLevelTable() - table = _MockTable(low_level_table) - batch = self._makeOne(table) - - # Make sure row map is empty. - self.assertEqual(batch._row_map, {}) - - # Customize/capture mock table creation. - low_level_table.mock_row = mock_row = object() - - # Actually get the row (which creates a row via a low-level table). - row_key = 'row-key' - result = batch._get_row(row_key) - self.assertEqual(result, mock_row) - - # Check all the things that were constructed. - self.assertEqual(low_level_table.rows_made, [row_key]) - # Check how the batch was updated. - self.assertEqual(batch._row_map, {row_key: mock_row}) - - def test_put_bad_wal(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import batch as MUT - - warned = [] - - def mock_warn(message): - warned.append(message) - # Raise an exception so we don't have to mock the entire - # environment needed for put(). - raise RuntimeError('No need to execute the rest.') - - table = object() - batch = self._makeOne(table) - - row = 'row-key' - data = {} - wal = None - - self.assertNotEqual(wal, MUT._WAL_SENTINEL) - with _Monkey(MUT, _WARN=mock_warn): - with self.assertRaises(RuntimeError): - batch.put(row, data, wal=wal) - - self.assertEqual(warned, [MUT._WAL_WARNING]) - - def test_put(self): - import operator - - table = object() - batch = self._makeOne(table) - batch._timestamp = timestamp = object() - row_key = 'row-key' - batch._row_map[row_key] = row = _MockRow() - - col1_fam = 'cf1' - col1_qual = 'qual1' - value1 = 'value1' - col2_fam = 'cf2' - col2_qual = 'qual2' - value2 = 'value2' - data = {col1_fam + ':' + col1_qual: value1, - col2_fam + ':' + col2_qual: value2} - - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(row.set_cell_calls, []) - batch.put(row_key, data) - self.assertEqual(batch._mutation_count, 2) - # Since the calls depend on data.keys(), the order - # is non-deterministic. - first_elt = operator.itemgetter(0) - ordered_calls = sorted(row.set_cell_calls, key=first_elt) - - cell1_args = (col1_fam, col1_qual, value1) - cell1_kwargs = {'timestamp': timestamp} - cell2_args = (col2_fam, col2_qual, value2) - cell2_kwargs = {'timestamp': timestamp} - self.assertEqual(ordered_calls, [ - (cell1_args, cell1_kwargs), - (cell2_args, cell2_kwargs), - ]) - - def test_put_call_try_send(self): - klass = self._getTargetClass() - - class CallTrySend(klass): - - try_send_calls = 0 - - def _try_send(self): - self.try_send_calls += 1 - - table = object() - batch = CallTrySend(table) - - row_key = 'row-key' - batch._row_map[row_key] = _MockRow() - - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(batch.try_send_calls, 0) - # No data so that nothing happens - batch.put(row_key, data={}) - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(batch.try_send_calls, 1) - - def _delete_columns_test_helper(self, time_range=None): - table = object() - batch = self._makeOne(table) - batch._delete_range = time_range - - col1_fam = 'cf1' - col2_fam = 'cf2' - col2_qual = 'col-name' - columns = [col1_fam + ':', col2_fam + ':' + col2_qual] - row_object = _MockRow() - - batch._delete_columns(columns, row_object) - self.assertEqual(row_object.commits, 0) - - cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {'time_range': time_range} - self.assertEqual(row_object.delete_cell_calls, - [(cell_deleted_args, cell_deleted_kwargs)]) - fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {'columns': row_object.ALL_COLUMNS} - self.assertEqual(row_object.delete_cells_calls, - [(fam_deleted_args, fam_deleted_kwargs)]) - - def test__delete_columns(self): - self._delete_columns_test_helper() - - def test__delete_columns_w_time_and_col_fam(self): - time_range = object() - with self.assertRaises(ValueError): - self._delete_columns_test_helper(time_range=time_range) - - def test_delete_bad_wal(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import batch as MUT - - warned = [] - - def mock_warn(message): - warned.append(message) - # Raise an exception so we don't have to mock the entire - # environment needed for delete(). - raise RuntimeError('No need to execute the rest.') - - table = object() - batch = self._makeOne(table) - - row = 'row-key' - columns = [] - wal = None - - self.assertNotEqual(wal, MUT._WAL_SENTINEL) - with _Monkey(MUT, _WARN=mock_warn): - with self.assertRaises(RuntimeError): - batch.delete(row, columns=columns, wal=wal) - - self.assertEqual(warned, [MUT._WAL_WARNING]) - - def test_delete_entire_row(self): - table = object() - batch = self._makeOne(table) - - row_key = 'row-key' - batch._row_map[row_key] = row = _MockRow() - - self.assertEqual(row.deletes, 0) - self.assertEqual(batch._mutation_count, 0) - batch.delete(row_key, columns=None) - self.assertEqual(row.deletes, 1) - self.assertEqual(batch._mutation_count, 1) - - def test_delete_entire_row_with_ts(self): - table = object() - batch = self._makeOne(table) - batch._delete_range = object() - - row_key = 'row-key' - batch._row_map[row_key] = row = _MockRow() - - self.assertEqual(row.deletes, 0) - self.assertEqual(batch._mutation_count, 0) - with self.assertRaises(ValueError): - batch.delete(row_key, columns=None) - self.assertEqual(row.deletes, 0) - self.assertEqual(batch._mutation_count, 0) - - def test_delete_call_try_send(self): - klass = self._getTargetClass() - - class CallTrySend(klass): - - try_send_calls = 0 - - def _try_send(self): - self.try_send_calls += 1 - - table = object() - batch = CallTrySend(table) - - row_key = 'row-key' - batch._row_map[row_key] = _MockRow() - - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(batch.try_send_calls, 0) - # No columns so that nothing happens - batch.delete(row_key, columns=[]) - self.assertEqual(batch._mutation_count, 0) - self.assertEqual(batch.try_send_calls, 1) - - def test_delete_some_columns(self): - table = object() - batch = self._makeOne(table) - - row_key = 'row-key' - batch._row_map[row_key] = row = _MockRow() - - self.assertEqual(batch._mutation_count, 0) - - col1_fam = 'cf1' - col2_fam = 'cf2' - col2_qual = 'col-name' - columns = [col1_fam + ':', col2_fam + ':' + col2_qual] - batch.delete(row_key, columns=columns) - - self.assertEqual(batch._mutation_count, 2) - cell_deleted_args = (col2_fam, col2_qual) - cell_deleted_kwargs = {'time_range': None} - self.assertEqual(row.delete_cell_calls, - [(cell_deleted_args, cell_deleted_kwargs)]) - fam_deleted_args = (col1_fam,) - fam_deleted_kwargs = {'columns': row.ALL_COLUMNS} - self.assertEqual(row.delete_cells_calls, - [(fam_deleted_args, fam_deleted_kwargs)]) - - def test_context_manager(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch = BatchWithSend(table) - self.assertFalse(batch._send_called) - - with batch: - pass - - self.assertTrue(batch._send_called) - - def test_context_manager_with_exception_non_transactional(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch = BatchWithSend(table) - self.assertFalse(batch._send_called) - - with self.assertRaises(ValueError): - with batch: - raise ValueError('Something bad happened') - - self.assertTrue(batch._send_called) - - def test_context_manager_with_exception_transactional(self): - klass = self._getTargetClass() - - class BatchWithSend(_SendMixin, klass): - pass - - table = object() - batch = BatchWithSend(table, transaction=True) - self.assertFalse(batch._send_called) - - with self.assertRaises(ValueError): - with batch: - raise ValueError('Something bad happened') - - self.assertFalse(batch._send_called) - - # Just to make sure send() actually works (and to make cover happy). - batch.send() - self.assertTrue(batch._send_called) - - -class Test__get_column_pairs(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.batch import _get_column_pairs - return _get_column_pairs(*args, **kwargs) - - def test_it(self): - columns = [b'cf1', u'cf2:', 'cf3::', 'cf3:name1', 'cf3:name2'] - result = self._callFUT(columns) - expected_result = [ - ['cf1', None], - ['cf2', None], - ['cf3', ''], - ['cf3', 'name1'], - ['cf3', 'name2'], - ] - self.assertEqual(result, expected_result) - - def test_bad_column(self): - columns = ['a:b:c'] - with self.assertRaises(ValueError): - self._callFUT(columns) - - def test_bad_column_type(self): - columns = [None] - with self.assertRaises(AttributeError): - self._callFUT(columns) - - def test_bad_columns_var(self): - columns = None - with self.assertRaises(TypeError): - self._callFUT(columns) - - def test_column_family_with_require_qualifier(self): - columns = ['a:'] - with self.assertRaises(ValueError): - self._callFUT(columns, require_qualifier=True) - - -class _MockRowMap(dict): - - clear_count = 0 - - def clear(self): - self.clear_count += 1 - super(_MockRowMap, self).clear() - - -class _MockRow(object): - - ALL_COLUMNS = object() - - def __init__(self): - self.commits = 0 - self.deletes = 0 - self.set_cell_calls = [] - self.delete_cell_calls = [] - self.delete_cells_calls = [] - - def commit(self): - self.commits += 1 - - def delete(self): - self.deletes += 1 - - def set_cell(self, *args, **kwargs): - self.set_cell_calls.append((args, kwargs)) - - def delete_cell(self, *args, **kwargs): - self.delete_cell_calls.append((args, kwargs)) - - def delete_cells(self, *args, **kwargs): - self.delete_cells_calls.append((args, kwargs)) - - -class _MockTable(object): - - def __init__(self, low_level_table): - self._low_level_table = low_level_table - - -class _MockLowLevelTable(object): - - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - self.rows_made = [] - self.mock_row = None - - def row(self, row_key): - self.rows_made.append(row_key) - return self.mock_row diff --git a/gcloud/bigtable/happybase/test_connection.py b/gcloud/bigtable/happybase/test_connection.py deleted file mode 100644 index a07e9d86b7a2..000000000000 --- a/gcloud/bigtable/happybase/test_connection.py +++ /dev/null @@ -1,628 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class Test__get_cluster(unittest2.TestCase): - - def _callFUT(self, timeout=None): - from gcloud.bigtable.happybase.connection import _get_cluster - return _get_cluster(timeout=timeout) - - def _helper(self, timeout=None, clusters=(), failed_zones=()): - from functools import partial - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - client_with_clusters = partial(_Client, clusters=clusters, - failed_zones=failed_zones) - with _Monkey(MUT, Client=client_with_clusters): - result = self._callFUT(timeout=timeout) - - # If we've reached this point, then _callFUT didn't fail, so we know - # there is exactly one cluster. - cluster, = clusters - self.assertEqual(result, cluster) - client = cluster.client - self.assertEqual(client.args, ()) - expected_kwargs = {'admin': True} - if timeout is not None: - expected_kwargs['timeout_seconds'] = timeout / 1000.0 - self.assertEqual(client.kwargs, expected_kwargs) - self.assertEqual(client.start_calls, 1) - self.assertEqual(client.stop_calls, 1) - - def test_default(self): - cluster = _Cluster() - self._helper(clusters=[cluster]) - - def test_with_timeout(self): - cluster = _Cluster() - self._helper(timeout=2103, clusters=[cluster]) - - def test_with_no_clusters(self): - with self.assertRaises(ValueError): - self._helper() - - def test_with_too_many_clusters(self): - clusters = [_Cluster(), _Cluster()] - with self.assertRaises(ValueError): - self._helper(clusters=clusters) - - def test_with_failed_zones(self): - cluster = _Cluster() - failed_zone = 'us-central1-c' - with self.assertRaises(ValueError): - self._helper(clusters=[cluster], - failed_zones=[failed_zone]) - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.happybase.connection import Connection - return Connection - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - cluster = _Cluster() # Avoid implicit environ check. - self.assertEqual(cluster._client.start_calls, 0) - connection = self._makeOne(cluster=cluster) - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) - - self.assertEqual(connection._cluster, cluster) - self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') - - def test_constructor_no_autoconnect(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) - self.assertEqual(cluster._client.stop_calls, 0) - self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') - - def test_constructor_missing_cluster(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - cluster = _Cluster() - timeout = object() - get_cluster_called = [] - - def mock_get_cluster(timeout): - get_cluster_called.append(timeout) - return cluster - - with _Monkey(MUT, _get_cluster=mock_get_cluster): - connection = self._makeOne(autoconnect=False, cluster=None, - timeout=timeout) - self.assertEqual(connection.table_prefix, None) - self.assertEqual(connection.table_prefix_separator, '_') - self.assertEqual(connection._cluster, cluster) - - self.assertEqual(get_cluster_called, [timeout]) - - def test_constructor_explicit(self): - autoconnect = False - table_prefix = 'table-prefix' - table_prefix_separator = 'sep' - cluster_copy = _Cluster() - cluster = _Cluster(copies=[cluster_copy]) - - connection = self._makeOne( - autoconnect=autoconnect, - table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator, - cluster=cluster) - self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, - table_prefix_separator) - - def test_constructor_with_unknown_argument(self): - cluster = _Cluster() - with self.assertRaises(TypeError): - self._makeOne(cluster=cluster, unknown='foo') - - def test_constructor_with_legacy_args(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - cluster = _Cluster() - with _Monkey(MUT, _WARN=mock_warn): - self._makeOne(cluster=cluster, host=object(), - port=object(), compat=object(), - transport=object(), protocol=object()) - - self.assertEqual(len(warned), 1) - self.assertIn('host', warned[0]) - self.assertIn('port', warned[0]) - self.assertIn('compat', warned[0]) - self.assertIn('transport', warned[0]) - self.assertIn('protocol', warned[0]) - - def test_constructor_with_timeout_and_cluster(self): - cluster = _Cluster() - with self.assertRaises(ValueError): - self._makeOne(cluster=cluster, timeout=object()) - - def test_constructor_non_string_prefix(self): - table_prefix = object() - - with self.assertRaises(TypeError): - self._makeOne(autoconnect=False, - table_prefix=table_prefix) - - def test_constructor_non_string_prefix_separator(self): - table_prefix_separator = object() - - with self.assertRaises(TypeError): - self._makeOne(autoconnect=False, - table_prefix_separator=table_prefix_separator) - - def test_open(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.start_calls, 0) - connection.open() - self.assertEqual(cluster._client.start_calls, 1) - self.assertEqual(cluster._client.stop_calls, 0) - - def test_close(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) - connection.close() - self.assertEqual(cluster._client.stop_calls, 1) - self.assertEqual(cluster._client.start_calls, 0) - - def test___del__with_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) - connection.__del__() - self.assertEqual(cluster._client.stop_calls, 1) - - def test___del__no_cluster(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - self.assertEqual(cluster._client.stop_calls, 0) - del connection._cluster - connection.__del__() - self.assertEqual(cluster._client.stop_calls, 0) - - def test__table_name_with_prefix_set(self): - table_prefix = 'table-prefix' - table_prefix_separator = '<>' - cluster = _Cluster() - - connection = self._makeOne( - autoconnect=False, - table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator, - cluster=cluster) - - name = 'some-name' - prefixed = connection._table_name(name) - self.assertEqual(prefixed, - table_prefix + table_prefix_separator + name) - - def test__table_name_with_no_prefix_set(self): - cluster = _Cluster() - connection = self._makeOne(autoconnect=False, - cluster=cluster) - - name = 'some-name' - prefixed = connection._table_name(name) - self.assertEqual(prefixed, name) - - def test_table_factory(self): - from gcloud.bigtable.happybase.table import Table - - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - table = connection.table(name) - - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.name, name) - self.assertEqual(table.connection, connection) - - def _table_factory_prefix_helper(self, use_prefix=True): - from gcloud.bigtable.happybase.table import Table - - cluster = _Cluster() # Avoid implicit environ check. - table_prefix = 'table-prefix' - table_prefix_separator = '<>' - connection = self._makeOne( - autoconnect=False, table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator, - cluster=cluster) - - name = 'table-name' - table = connection.table(name, use_prefix=use_prefix) - - self.assertTrue(isinstance(table, Table)) - prefixed_name = table_prefix + table_prefix_separator + name - if use_prefix: - self.assertEqual(table.name, prefixed_name) - else: - self.assertEqual(table.name, name) - self.assertEqual(table.connection, connection) - - def test_table_factory_with_prefix(self): - self._table_factory_prefix_helper(use_prefix=True) - - def test_table_factory_with_ignored_prefix(self): - self._table_factory_prefix_helper(use_prefix=False) - - def test_tables(self): - from gcloud.bigtable.table import Table - - table_name1 = 'table-name1' - table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ - Table(table_name1, None), - Table(table_name2, None), - ]) - connection = self._makeOne(autoconnect=False, cluster=cluster) - result = connection.tables() - self.assertEqual(result, [table_name1, table_name2]) - - def test_tables_with_prefix(self): - from gcloud.bigtable.table import Table - - table_prefix = 'prefix' - table_prefix_separator = '<>' - unprefixed_table_name1 = 'table-name1' - - table_name1 = (table_prefix + table_prefix_separator + - unprefixed_table_name1) - table_name2 = 'table-name2' - cluster = _Cluster(list_tables_result=[ - Table(table_name1, None), - Table(table_name2, None), - ]) - connection = self._makeOne( - autoconnect=False, cluster=cluster, table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator) - result = connection.tables() - self.assertEqual(result, [unprefixed_table_name1]) - - def test_create_table(self): - import operator - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - mock_gc_rule = object() - called_options = [] - - def mock_parse_family_option(option): - called_options.append(option) - return mock_gc_rule - - name = 'table-name' - col_fam1 = 'cf1' - col_fam_option1 = object() - col_fam2 = u'cf2' - col_fam_option2 = object() - col_fam3 = b'cf3' - col_fam_option3 = object() - families = { - col_fam1: col_fam_option1, - # A trailing colon is also allowed. - col_fam2 + ':': col_fam_option2, - col_fam3 + b':': col_fam_option3, - } - - tables_created = [] - - def make_table(*args, **kwargs): - result = _MockLowLevelTable(*args, **kwargs) - tables_created.append(result) - return result - - with _Monkey(MUT, _LowLevelTable=make_table, - _parse_family_option=mock_parse_family_option): - connection.create_table(name, families) - - # Just one table would have been created. - table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) - self.assertEqual(table_instance.kwargs, {}) - self.assertEqual(table_instance.create_calls, 1) - - # Check if our mock was called twice, but we don't know the order. - self.assertEqual( - set(called_options), - set([col_fam_option1, col_fam_option2, col_fam_option3])) - - # We expect three column family instances created, but don't know the - # order due to non-deterministic dict.items(). - col_fam_created = table_instance.col_fam_created - self.assertEqual(len(col_fam_created), 3) - col_fam_created.sort(key=operator.attrgetter('column_family_id')) - self.assertEqual(col_fam_created[0].column_family_id, col_fam1) - self.assertEqual(col_fam_created[0].gc_rule, mock_gc_rule) - self.assertEqual(col_fam_created[0].create_calls, 1) - self.assertEqual(col_fam_created[1].column_family_id, col_fam2) - self.assertEqual(col_fam_created[1].gc_rule, mock_gc_rule) - self.assertEqual(col_fam_created[1].create_calls, 1) - self.assertEqual(col_fam_created[2].column_family_id, - col_fam3.decode('utf-8')) - self.assertEqual(col_fam_created[2].gc_rule, mock_gc_rule) - self.assertEqual(col_fam_created[2].create_calls, 1) - - def test_create_table_bad_type(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - families = None - with self.assertRaises(TypeError): - connection.create_table(name, families) - - def test_create_table_bad_value(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - families = {} - with self.assertRaises(ValueError): - connection.create_table(name, families) - - def _delete_table_helper(self, disable=False): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - tables_created = [] - - def make_table(*args, **kwargs): - result = _MockLowLevelTable(*args, **kwargs) - tables_created.append(result) - return result - - name = 'table-name' - with _Monkey(MUT, _LowLevelTable=make_table): - connection.delete_table(name, disable=disable) - - # Just one table would have been created. - table_instance, = tables_created - self.assertEqual(table_instance.args, (name, cluster)) - self.assertEqual(table_instance.kwargs, {}) - self.assertEqual(table_instance.delete_calls, 1) - - def test_delete_table(self): - self._delete_table_helper() - - def test_delete_table_disable(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - with _Monkey(MUT, _WARN=mock_warn): - self._delete_table_helper(disable=True) - - self.assertEqual(warned, [MUT._DISABLE_DELETE_MSG]) - - def test_enable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - with self.assertRaises(NotImplementedError): - connection.enable_table(name) - - def test_disable_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - with self.assertRaises(NotImplementedError): - connection.disable_table(name) - - def test_is_table_enabled(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - with self.assertRaises(NotImplementedError): - connection.is_table_enabled(name) - - def test_compact_table(self): - cluster = _Cluster() # Avoid implicit environ check. - connection = self._makeOne(autoconnect=False, cluster=cluster) - - name = 'table-name' - major = True - with self.assertRaises(NotImplementedError): - connection.compact_table(name, major=major) - - -class Test__parse_family_option(unittest2.TestCase): - - def _callFUT(self, option): - from gcloud.bigtable.happybase.connection import _parse_family_option - return _parse_family_option(option) - - def test_dictionary_no_keys(self): - option = {} - result = self._callFUT(option) - self.assertEqual(result, None) - - def test_null(self): - option = None - result = self._callFUT(option) - self.assertEqual(result, None) - - def test_dictionary_bad_key(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import connection as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - option = {'badkey': None} - with _Monkey(MUT, _WARN=mock_warn): - result = self._callFUT(option) - - self.assertEqual(result, None) - self.assertEqual(len(warned), 1) - self.assertIn('badkey', warned[0]) - - def test_dictionary_versions_key(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - - versions = 42 - option = {'max_versions': versions} - result = self._callFUT(option) - - gc_rule = MaxVersionsGCRule(versions) - self.assertEqual(result, gc_rule) - - def test_dictionary_ttl_key(self): - import datetime - from gcloud.bigtable.column_family import MaxAgeGCRule - - time_to_live = 24 * 60 * 60 - max_age = datetime.timedelta(days=1) - option = {'time_to_live': time_to_live} - result = self._callFUT(option) - - gc_rule = MaxAgeGCRule(max_age) - self.assertEqual(result, gc_rule) - - def test_dictionary_both_keys(self): - import datetime - from gcloud.bigtable.column_family import GCRuleIntersection - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - versions = 42 - time_to_live = 24 * 60 * 60 - option = { - 'max_versions': versions, - 'time_to_live': time_to_live, - } - result = self._callFUT(option) - - max_age = datetime.timedelta(days=1) - # NOTE: This relies on the order of the rules in the method we are - # calling matching this order here. - gc_rule1 = MaxAgeGCRule(max_age) - gc_rule2 = MaxVersionsGCRule(versions) - gc_rule = GCRuleIntersection(rules=[gc_rule1, gc_rule2]) - self.assertEqual(result, gc_rule) - - def test_non_dictionary(self): - option = object() - self.assertFalse(isinstance(option, dict)) - result = self._callFUT(option) - self.assertEqual(result, option) - - -class _Client(object): - - def __init__(self, *args, **kwargs): - self.clusters = kwargs.pop('clusters', []) - for cluster in self.clusters: - cluster.client = self - self.failed_zones = kwargs.pop('failed_zones', []) - self.args = args - self.kwargs = kwargs - self.start_calls = 0 - self.stop_calls = 0 - - def start(self): - self.start_calls += 1 - - def stop(self): - self.stop_calls += 1 - - def list_clusters(self): - return self.clusters, self.failed_zones - - -class _Cluster(object): - - def __init__(self, copies=(), list_tables_result=()): - self.copies = list(copies) - # Included to support Connection.__del__ - self._client = _Client() - self.list_tables_result = list_tables_result - - def copy(self): - if self.copies: - result = self.copies[0] - self.copies[:] = self.copies[1:] - return result - else: - return self - - def list_tables(self): - return self.list_tables_result - - -class _MockLowLevelColumnFamily(object): - - def __init__(self, column_family_id, gc_rule=None): - self.column_family_id = column_family_id - self.gc_rule = gc_rule - self.create_calls = 0 - - def create(self): - self.create_calls += 1 - - -class _MockLowLevelTable(object): - - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - self.delete_calls = 0 - self.create_calls = 0 - self.col_fam_created = [] - - def delete(self): - self.delete_calls += 1 - - def create(self): - self.create_calls += 1 - - def column_family(self, column_family_id, gc_rule=None): - result = _MockLowLevelColumnFamily(column_family_id, gc_rule=gc_rule) - self.col_fam_created.append(result) - return result diff --git a/gcloud/bigtable/happybase/test_pool.py b/gcloud/bigtable/happybase/test_pool.py deleted file mode 100644 index f61658ae35fc..000000000000 --- a/gcloud/bigtable/happybase/test_pool.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class TestConnectionPool(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.happybase.pool import ConnectionPool - return ConnectionPool - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - import six - import threading - from gcloud.bigtable.happybase.connection import Connection - - size = 11 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) # Avoid implicit environ check. - pool = self._makeOne(size, cluster=cluster) - - self.assertTrue(isinstance(pool._lock, type(threading.Lock()))) - self.assertTrue(isinstance(pool._thread_connections, threading.local)) - self.assertEqual(pool._thread_connections.__dict__, {}) - - queue = pool._queue - self.assertTrue(isinstance(queue, six.moves.queue.LifoQueue)) - self.assertTrue(queue.full()) - self.assertEqual(queue.maxsize, size) - for connection in queue.queue: - self.assertTrue(isinstance(connection, Connection)) - self.assertTrue(connection._cluster is cluster_copy) - - def test_constructor_passes_kwargs(self): - table_prefix = 'foo' - table_prefix_separator = '<>' - cluster = _Cluster() # Avoid implicit environ check. - - size = 1 - pool = self._makeOne(size, table_prefix=table_prefix, - table_prefix_separator=table_prefix_separator, - cluster=cluster) - - for connection in pool._queue.queue: - self.assertEqual(connection.table_prefix, table_prefix) - self.assertEqual(connection.table_prefix_separator, - table_prefix_separator) - - def test_constructor_ignores_autoconnect(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase.connection import Connection - from gcloud.bigtable.happybase import pool as MUT - - class ConnectionWithOpen(Connection): - - _open_called = False - - def open(self): - self._open_called = True - - # First make sure the custom Connection class does as expected. - cluster_copy1 = _Cluster() - cluster_copy2 = _Cluster() - cluster_copy3 = _Cluster() - cluster = _Cluster( - copies=[cluster_copy1, cluster_copy2, cluster_copy3]) - connection = ConnectionWithOpen(autoconnect=False, cluster=cluster) - self.assertFalse(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy1) - connection = ConnectionWithOpen(autoconnect=True, cluster=cluster) - self.assertTrue(connection._open_called) - self.assertTrue(connection._cluster is cluster_copy2) - - # Then make sure autoconnect=True is ignored in a pool. - size = 1 - with _Monkey(MUT, Connection=ConnectionWithOpen): - pool = self._makeOne(size, autoconnect=True, cluster=cluster) - - for connection in pool._queue.queue: - self.assertTrue(isinstance(connection, ConnectionWithOpen)) - self.assertTrue(connection._cluster is cluster_copy3) - self.assertFalse(connection._open_called) - - def test_constructor_infers_cluster(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase.connection import Connection - from gcloud.bigtable.happybase import pool as MUT - - size = 1 - cluster_copy = _Cluster() - all_copies = [cluster_copy] * size - cluster = _Cluster(copies=all_copies) - get_cluster_calls = [] - - def mock_get_cluster(timeout=None): - get_cluster_calls.append(timeout) - return cluster - - with _Monkey(MUT, _get_cluster=mock_get_cluster): - pool = self._makeOne(size) - - for connection in pool._queue.queue: - self.assertTrue(isinstance(connection, Connection)) - # We know that the Connection() constructor will - # call cluster.copy(). - self.assertTrue(connection._cluster is cluster_copy) - - self.assertEqual(get_cluster_calls, [None]) - - def test_constructor_non_integer_size(self): - size = None - with self.assertRaises(TypeError): - self._makeOne(size) - - def test_constructor_non_positive_size(self): - size = -10 - with self.assertRaises(ValueError): - self._makeOne(size) - size = 0 - with self.assertRaises(ValueError): - self._makeOne(size) - - def _makeOneWithMockQueue(self, queue_return): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import pool as MUT - - # We are going to use a fake queue, so we don't want any connections - # or clusters to be created in the constructor. - size = -1 - cluster = object() - with _Monkey(MUT, _MIN_POOL_SIZE=size): - pool = self._makeOne(size, cluster=cluster) - - pool._queue = _Queue(queue_return) - return pool - - def test__acquire_connection(self): - queue_return = object() - pool = self._makeOneWithMockQueue(queue_return) - - timeout = 432 - connection = pool._acquire_connection(timeout=timeout) - self.assertTrue(connection is queue_return) - self.assertEqual(pool._queue._get_calls, [(True, timeout)]) - self.assertEqual(pool._queue._put_calls, []) - - def test__acquire_connection_failure(self): - from gcloud.bigtable.happybase.pool import NoConnectionsAvailable - - pool = self._makeOneWithMockQueue(None) - timeout = 1027 - with self.assertRaises(NoConnectionsAvailable): - pool._acquire_connection(timeout=timeout) - self.assertEqual(pool._queue._get_calls, [(True, timeout)]) - self.assertEqual(pool._queue._put_calls, []) - - def test_connection_is_context_manager(self): - import contextlib - import six - - queue_return = _Connection() - pool = self._makeOneWithMockQueue(queue_return) - cnxn_context = pool.connection() - if six.PY3: # pragma: NO COVER - self.assertTrue(isinstance(cnxn_context, - contextlib._GeneratorContextManager)) - else: - self.assertTrue(isinstance(cnxn_context, - contextlib.GeneratorContextManager)) - - def test_connection_no_current_cnxn(self): - queue_return = _Connection() - pool = self._makeOneWithMockQueue(queue_return) - timeout = 55 - - self.assertFalse(hasattr(pool._thread_connections, 'current')) - with pool.connection(timeout=timeout) as connection: - self.assertEqual(pool._thread_connections.current, queue_return) - self.assertTrue(connection is queue_return) - self.assertFalse(hasattr(pool._thread_connections, 'current')) - - self.assertEqual(pool._queue._get_calls, [(True, timeout)]) - self.assertEqual(pool._queue._put_calls, - [(queue_return, None, None)]) - - def test_connection_with_current_cnxn(self): - current_cnxn = _Connection() - queue_return = _Connection() - pool = self._makeOneWithMockQueue(queue_return) - pool._thread_connections.current = current_cnxn - timeout = 8001 - - with pool.connection(timeout=timeout) as connection: - self.assertTrue(connection is current_cnxn) - - self.assertEqual(pool._queue._get_calls, []) - self.assertEqual(pool._queue._put_calls, []) - self.assertEqual(pool._thread_connections.current, current_cnxn) - - -class _Client(object): - - def __init__(self): - self.stop_calls = 0 - - def stop(self): - self.stop_calls += 1 - - -class _Connection(object): - - def open(self): - pass - - -class _Cluster(object): - - def __init__(self, copies=()): - self.copies = list(copies) - # Included to support Connection.__del__ - self._client = _Client() - - def copy(self): - if self.copies: - result = self.copies[0] - self.copies[:] = self.copies[1:] - return result - else: - return self - - -class _Queue(object): - - def __init__(self, result=None): - self.result = result - self._get_calls = [] - self._put_calls = [] - - def get(self, block=None, timeout=None): - self._get_calls.append((block, timeout)) - if self.result is None: - import six - raise six.moves.queue.Empty - else: - return self.result - - def put(self, item, block=None, timeout=None): - self._put_calls.append((item, block, timeout)) diff --git a/gcloud/bigtable/happybase/test_table.py b/gcloud/bigtable/happybase/test_table.py deleted file mode 100644 index be18ec1bc014..000000000000 --- a/gcloud/bigtable/happybase/test_table.py +++ /dev/null @@ -1,1505 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_make_row(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import make_row - return make_row(*args, **kwargs) - - def test_it(self): - with self.assertRaises(NotImplementedError): - self._callFUT({}, False) - - -class Test_make_ordered_row(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import make_ordered_row - return make_ordered_row(*args, **kwargs) - - def test_it(self): - with self.assertRaises(NotImplementedError): - self._callFUT([], False) - - -class TestTable(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.happybase.table import Table - return Table - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - cluster = object() - connection = _Connection(cluster) - tables_constructed = [] - - def make_low_level_table(*args, **kwargs): - result = _MockLowLevelTable(*args, **kwargs) - tables_constructed.append(result) - return result - - with _Monkey(MUT, _LowLevelTable=make_low_level_table): - table = self._makeOne(name, connection) - self.assertEqual(table.name, name) - self.assertEqual(table.connection, connection) - - table_instance, = tables_constructed - self.assertEqual(table._low_level_table, table_instance) - self.assertEqual(table_instance.args, (name, cluster)) - self.assertEqual(table_instance.kwargs, {}) - - def test_constructor_null_connection(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - self.assertEqual(table.name, name) - self.assertEqual(table.connection, connection) - self.assertEqual(table._low_level_table, None) - - def test_families(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - - # Mock the column families to be returned. - col_fam_name = 'fam' - gc_rule = object() - col_fam = _MockLowLevelColumnFamily(col_fam_name, gc_rule=gc_rule) - col_fams = {col_fam_name: col_fam} - table._low_level_table.column_families = col_fams - - to_dict_result = object() - to_dict_calls = [] - - def mock_gc_rule_to_dict(gc_rule): - to_dict_calls.append(gc_rule) - return to_dict_result - - with _Monkey(MUT, _gc_rule_to_dict=mock_gc_rule_to_dict): - result = table.families() - - self.assertEqual(result, {col_fam_name: to_dict_result}) - self.assertEqual(table._low_level_table.list_column_families_calls, 1) - self.assertEqual(to_dict_calls, [gc_rule]) - - def test___repr__(self): - name = 'table-name' - table = self._makeOne(name, None) - self.assertEqual(repr(table), '') - - def test_regions(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - - with self.assertRaises(NotImplementedError): - table.regions() - - def test_row_empty_row(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - table._low_level_table.read_row_result = None - - # Set-up mocks. - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - row_key = 'row-key' - timestamp = object() - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper): - result = table.row(row_key, timestamp=timestamp) - - # read_row_result == None --> No results. - self.assertEqual(result, {}) - - read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - expected_kwargs = { - 'filters': [], - 'versions': 1, - 'timestamp': timestamp, - } - self.assertEqual(mock_filters, [expected_kwargs]) - - def test_row_with_columns(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - table._low_level_table.read_row_result = None - - # Set-up mocks. - fake_col_filter = object() - mock_columns = [] - - def mock_columns_filter_helper(*args): - mock_columns.append(args) - return fake_col_filter - - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - row_key = 'row-key' - columns = object() - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper, - _columns_filter_helper=mock_columns_filter_helper): - result = table.row(row_key, columns=columns) - - # read_row_result == None --> No results. - self.assertEqual(result, {}) - - read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - self.assertEqual(mock_columns, [(columns,)]) - expected_kwargs = { - 'filters': [fake_col_filter], - 'versions': 1, - 'timestamp': None, - } - self.assertEqual(mock_filters, [expected_kwargs]) - - def test_row_with_results(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - from gcloud.bigtable.row_data import PartialRowData - - row_key = 'row-key' - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - partial_row = PartialRowData(row_key) - table._low_level_table.read_row_result = partial_row - - # Set-up mocks. - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - fake_pair = object() - mock_cells = [] - - def mock_cells_to_pairs(*args, **kwargs): - mock_cells.append((args, kwargs)) - return [fake_pair] - - col_fam = u'cf1' - qual = b'qual' - fake_cells = object() - partial_row._cells = {col_fam: {qual: fake_cells}} - include_timestamp = object() - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper, - _cells_to_pairs=mock_cells_to_pairs): - result = table.row(row_key, include_timestamp=include_timestamp) - - # The results come from _cells_to_pairs. - expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} - self.assertEqual(result, expected_result) - - read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - expected_kwargs = { - 'filters': [], - 'versions': 1, - 'timestamp': None, - } - self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) - - def test_rows_empty_row(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - - result = table.rows([]) - self.assertEqual(result, []) - - def test_rows_with_columns(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - rr_result = _MockPartialRowsData() - table._low_level_table.read_rows_result = rr_result - self.assertEqual(rr_result.consume_all_calls, 0) - - # Set-up mocks. - fake_col_filter = object() - mock_cols = [] - - def mock_columns_filter_helper(*args): - mock_cols.append(args) - return fake_col_filter - - fake_rows_filter = object() - mock_rows = [] - - def mock_row_keys_filter_helper(*args): - mock_rows.append(args) - return fake_rows_filter - - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - rows = ['row-key'] - columns = object() - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper, - _row_keys_filter_helper=mock_row_keys_filter_helper, - _columns_filter_helper=mock_columns_filter_helper): - result = table.rows(rows, columns=columns) - - # read_rows_result == Empty PartialRowsData --> No results. - self.assertEqual(result, []) - - read_rows_args = () - read_rows_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) - self.assertEqual(rr_result.consume_all_calls, 1) - - self.assertEqual(mock_cols, [(columns,)]) - self.assertEqual(mock_rows, [(rows,)]) - expected_kwargs = { - 'filters': [fake_col_filter, fake_rows_filter], - 'versions': 1, - 'timestamp': None, - } - self.assertEqual(mock_filters, [expected_kwargs]) - - def test_rows_with_results(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - from gcloud.bigtable.row_data import PartialRowData - - row_key1 = 'row-key1' - row_key2 = 'row-key2' - rows = [row_key1, row_key2] - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - - row1 = PartialRowData(row_key1) - # Return row1 but not row2 - rr_result = _MockPartialRowsData(rows={row_key1: row1}) - table._low_level_table.read_rows_result = rr_result - self.assertEqual(rr_result.consume_all_calls, 0) - - # Set-up mocks. - fake_rows_filter = object() - mock_rows = [] - - def mock_row_keys_filter_helper(*args): - mock_rows.append(args) - return fake_rows_filter - - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - fake_pair = object() - mock_cells = [] - - def mock_cells_to_pairs(*args, **kwargs): - mock_cells.append((args, kwargs)) - return [fake_pair] - - col_fam = u'cf1' - qual = b'qual' - fake_cells = object() - row1._cells = {col_fam: {qual: fake_cells}} - include_timestamp = object() - with _Monkey(MUT, _row_keys_filter_helper=mock_row_keys_filter_helper, - _filter_chain_helper=mock_filter_chain_helper, - _cells_to_pairs=mock_cells_to_pairs): - result = table.rows(rows, include_timestamp=include_timestamp) - - # read_rows_result == PartialRowsData with row_key1 - expected_result = {col_fam.encode('ascii') + b':' + qual: fake_pair} - self.assertEqual(result, [(row_key1, expected_result)]) - - read_rows_args = () - read_rows_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) - self.assertEqual(rr_result.consume_all_calls, 1) - - self.assertEqual(mock_rows, [(rows,)]) - expected_kwargs = { - 'filters': [fake_rows_filter], - 'versions': 1, - 'timestamp': None, - } - self.assertEqual(mock_filters, [expected_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) - - def test_cells_empty_row(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - table._low_level_table.read_row_result = None - - # Set-up mocks. - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - row_key = 'row-key' - column = 'fam:col1' - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper): - result = table.cells(row_key, column) - - # read_row_result == None --> No results. - self.assertEqual(result, []) - - read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - expected_kwargs = { - 'column': column, - 'versions': None, - 'timestamp': None, - } - self.assertEqual(mock_filters, [expected_kwargs]) - - def test_cells_with_results(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - from gcloud.bigtable.row_data import PartialRowData - - row_key = 'row-key' - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - partial_row = PartialRowData(row_key) - table._low_level_table.read_row_result = partial_row - - # These are all passed to mocks. - versions = object() - timestamp = object() - include_timestamp = object() - - # Set-up mocks. - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - fake_result = object() - mock_cells = [] - - def mock_cells_to_pairs(*args, **kwargs): - mock_cells.append((args, kwargs)) - return fake_result - - col_fam = 'cf1' - qual = 'qual' - fake_cells = object() - partial_row._cells = {col_fam: {qual: fake_cells}} - column = col_fam + ':' + qual - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper, - _cells_to_pairs=mock_cells_to_pairs): - result = table.cells(row_key, column, versions=versions, - timestamp=timestamp, - include_timestamp=include_timestamp) - - self.assertEqual(result, fake_result) - - read_row_args = (row_key,) - read_row_kwargs = {'filter_': fake_filter} - self.assertEqual(table._low_level_table.read_row_calls, [ - (read_row_args, read_row_kwargs), - ]) - - filter_kwargs = { - 'column': column, - 'versions': versions, - 'timestamp': timestamp, - } - self.assertEqual(mock_filters, [filter_kwargs]) - to_pairs_kwargs = {'include_timestamp': include_timestamp} - self.assertEqual(mock_cells, - [((fake_cells,), to_pairs_kwargs)]) - - def test_scan_with_batch_size(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - # Use unknown to force a TypeError, so we don't need to - # stub out the rest of the method. - with self.assertRaises(TypeError): - with _Monkey(MUT, _WARN=mock_warn): - list(table.scan(batch_size=object(), unknown=None)) - - self.assertEqual(len(warned), 1) - self.assertIn('batch_size', warned[0]) - - def test_scan_with_scan_batching(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - # Use unknown to force a TypeError, so we don't need to - # stub out the rest of the method. - with self.assertRaises(TypeError): - with _Monkey(MUT, _WARN=mock_warn): - list(table.scan(scan_batching=object(), unknown=None)) - - self.assertEqual(len(warned), 1) - self.assertIn('scan_batching', warned[0]) - - def test_scan_with_sorted_columns(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - warned = [] - - def mock_warn(msg): - warned.append(msg) - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - # Use unknown to force a TypeError, so we don't need to - # stub out the rest of the method. - with self.assertRaises(TypeError): - with _Monkey(MUT, _WARN=mock_warn): - list(table.scan(sorted_columns=object(), unknown=None)) - - self.assertEqual(len(warned), 1) - self.assertIn('sorted_columns', warned[0]) - - def test_scan_with_invalid_limit(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - with self.assertRaises(ValueError): - list(table.scan(limit=-10)) - - def test_scan_with_row_prefix_and_row_start(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - with self.assertRaises(ValueError): - list(table.scan(row_prefix='a', row_stop='abc')) - - def test_scan_with_string_filter(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - with self.assertRaises(TypeError): - list(table.scan(filter='some-string')) - - def _scan_test_helper(self, row_limits=(None, None), row_prefix=None, - columns=None, filter_=None, timestamp=None, - include_timestamp=False, limit=None, rr_result=None, - expected_result=None): - import types - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - row_start, row_stop = row_limits - connection = None - table = self._makeOne(name, connection) - table._low_level_table = _MockLowLevelTable() - rr_result = rr_result or _MockPartialRowsData() - table._low_level_table.read_rows_result = rr_result - self.assertEqual(rr_result.consume_next_calls, 0) - - # Set-up mocks. - fake_col_filter = object() - mock_columns = [] - - def mock_columns_filter_helper(*args): - mock_columns.append(args) - return fake_col_filter - - fake_filter = object() - mock_filters = [] - - def mock_filter_chain_helper(**kwargs): - mock_filters.append(kwargs) - return fake_filter - - with _Monkey(MUT, _filter_chain_helper=mock_filter_chain_helper, - _columns_filter_helper=mock_columns_filter_helper): - result = table.scan(row_start=row_start, row_stop=row_stop, - row_prefix=row_prefix, columns=columns, - filter=filter_, timestamp=timestamp, - include_timestamp=include_timestamp, - limit=limit) - self.assertTrue(isinstance(result, types.GeneratorType)) - # Need to consume the result while the monkey patch is applied. - # read_rows_result == Empty PartialRowsData --> No results. - expected_result = expected_result or [] - self.assertEqual(list(result), expected_result) - - read_rows_args = () - if row_prefix: - row_start = row_prefix - row_stop = MUT._string_successor(row_prefix) - read_rows_kwargs = { - 'end_key': row_stop, - 'filter_': fake_filter, - 'limit': limit, - 'start_key': row_start, - } - self.assertEqual(table._low_level_table.read_rows_calls, [ - (read_rows_args, read_rows_kwargs), - ]) - self.assertEqual(rr_result.consume_next_calls, - rr_result.iterations + 1) - - if columns is not None: - self.assertEqual(mock_columns, [(columns,)]) - else: - self.assertEqual(mock_columns, []) - - filters = [] - if filter_ is not None: - filters.append(filter_) - if columns: - filters.append(fake_col_filter) - expected_kwargs = { - 'filters': filters, - 'versions': 1, - 'timestamp': timestamp, - } - self.assertEqual(mock_filters, [expected_kwargs]) - - def test_scan_with_columns(self): - columns = object() - self._scan_test_helper(columns=columns) - - def test_scan_with_row_start_and_stop(self): - row_start = 'bar' - row_stop = 'foo' - row_limits = (row_start, row_stop) - self._scan_test_helper(row_limits=row_limits) - - def test_scan_with_row_prefix(self): - row_prefix = 'row-prefi' - self._scan_test_helper(row_prefix=row_prefix) - - def test_scan_with_filter(self): - mock_filter = object() - self._scan_test_helper(filter_=mock_filter) - - def test_scan_with_no_results(self): - limit = 1337 - timestamp = object() - self._scan_test_helper(timestamp=timestamp, limit=limit) - - def test_scan_with_results(self): - from gcloud.bigtable.row_data import PartialRowData - - row_key1 = 'row-key1' - row1 = PartialRowData(row_key1) - rr_result = _MockPartialRowsData(rows={row_key1: row1}, iterations=1) - - include_timestamp = object() - expected_result = [(row_key1, {})] - self._scan_test_helper(include_timestamp=include_timestamp, - rr_result=rr_result, - expected_result=expected_result) - - def test_put(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - from gcloud.bigtable.happybase.table import _WAL_SENTINEL - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - batches_created = [] - - def make_batch(*args, **kwargs): - result = _MockBatch(*args, **kwargs) - batches_created.append(result) - return result - - row = 'row-key' - data = {'fam:col': 'foo'} - timestamp = None - with _Monkey(MUT, Batch=make_batch): - result = table.put(row, data, timestamp=timestamp) - - # There is no return value. - self.assertEqual(result, None) - - # Check how the batch was created and used. - batch, = batches_created - self.assertTrue(isinstance(batch, _MockBatch)) - self.assertEqual(batch.args, (table,)) - expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': None, - 'transaction': False, - 'wal': _WAL_SENTINEL, - } - self.assertEqual(batch.kwargs, expected_kwargs) - # Make sure it was a successful context manager - self.assertEqual(batch.exit_vals, [(None, None, None)]) - self.assertEqual(batch.put_args, [(row, data)]) - self.assertEqual(batch.delete_args, []) - - def test_delete(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - from gcloud.bigtable.happybase.table import _WAL_SENTINEL - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - batches_created = [] - - def make_batch(*args, **kwargs): - result = _MockBatch(*args, **kwargs) - batches_created.append(result) - return result - - row = 'row-key' - columns = ['fam:col1', 'fam:col2'] - timestamp = None - with _Monkey(MUT, Batch=make_batch): - result = table.delete(row, columns=columns, timestamp=timestamp) - - # There is no return value. - self.assertEqual(result, None) - - # Check how the batch was created and used. - batch, = batches_created - self.assertTrue(isinstance(batch, _MockBatch)) - self.assertEqual(batch.args, (table,)) - expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': None, - 'transaction': False, - 'wal': _WAL_SENTINEL, - } - self.assertEqual(batch.kwargs, expected_kwargs) - # Make sure it was a successful context manager - self.assertEqual(batch.exit_vals, [(None, None, None)]) - self.assertEqual(batch.put_args, []) - self.assertEqual(batch.delete_args, [(row, columns)]) - - def test_batch(self): - from gcloud._testing import _Monkey - from gcloud.bigtable.happybase import table as MUT - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - - timestamp = object() - batch_size = 42 - transaction = False # Must be False when batch_size is non-null - wal = object() - - with _Monkey(MUT, Batch=_MockBatch): - result = table.batch(timestamp=timestamp, batch_size=batch_size, - transaction=transaction, wal=wal) - - self.assertTrue(isinstance(result, _MockBatch)) - self.assertEqual(result.args, (table,)) - expected_kwargs = { - 'timestamp': timestamp, - 'batch_size': batch_size, - 'transaction': transaction, - 'wal': wal, - } - self.assertEqual(result.kwargs, expected_kwargs) - - def test_counter_get(self): - klass = self._getTargetClass() - counter_value = 1337 - - class TableWithInc(klass): - - incremented = [] - value = counter_value - - def counter_inc(self, row, column, value=1): - self.incremented.append((row, column, value)) - self.value += value - return self.value - - name = 'table-name' - connection = None - table = TableWithInc(name, connection) - - row = 'row-key' - column = 'fam:col1' - self.assertEqual(TableWithInc.incremented, []) - result = table.counter_get(row, column) - self.assertEqual(result, counter_value) - self.assertEqual(TableWithInc.incremented, [(row, column, 0)]) - - def test_counter_dec(self): - klass = self._getTargetClass() - counter_value = 42 - - class TableWithInc(klass): - - incremented = [] - value = counter_value - - def counter_inc(self, row, column, value=1): - self.incremented.append((row, column, value)) - self.value += value - return self.value - - name = 'table-name' - connection = None - table = TableWithInc(name, connection) - - row = 'row-key' - column = 'fam:col1' - dec_value = 987 - self.assertEqual(TableWithInc.incremented, []) - result = table.counter_dec(row, column, value=dec_value) - self.assertEqual(result, counter_value - dec_value) - self.assertEqual(TableWithInc.incremented, [(row, column, -dec_value)]) - - def _counter_inc_helper(self, row, column, value, commit_result): - import six - - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - # Mock the return values. - table._low_level_table = _MockLowLevelTable() - table._low_level_table.row_values[row] = row_obj = _MockLowLevelRow( - row, commit_result=commit_result) - - self.assertFalse(row_obj._append) - result = table.counter_inc(row, column, value=value) - self.assertTrue(row_obj._append) - - incremented_value = value + _MockLowLevelRow.COUNTER_DEFAULT - self.assertEqual(result, incremented_value) - - # Check the row values returned. - row_obj = table._low_level_table.row_values[row] - if isinstance(column, six.binary_type): - column = column.decode('utf-8') - self.assertEqual(row_obj.counts, - {tuple(column.split(':')): incremented_value}) - - def test_counter_set(self): - name = 'table-name' - connection = None - table = self._makeOne(name, connection) - - row = 'row-key' - column = 'fam:col1' - value = 42 - with self.assertRaises(NotImplementedError): - table.counter_set(row, column, value=value) - - def test_counter_inc(self): - import struct - - row = 'row-key' - col_fam = u'fam' - col_qual = u'col1' - column = col_fam + u':' + col_qual - value = 42 - packed_value = struct.pack('>q', value) - fake_timestamp = None - commit_result = { - col_fam: { - col_qual: [(packed_value, fake_timestamp)], - } - } - self._counter_inc_helper(row, column, value, commit_result) - - def test_counter_inc_column_bytes(self): - import struct - - row = 'row-key' - col_fam = b'fam' - col_qual = b'col1' - column = col_fam + b':' + col_qual - value = 42 - packed_value = struct.pack('>q', value) - fake_timestamp = None - commit_result = { - col_fam.decode('utf-8'): { - col_qual.decode('utf-8'): [(packed_value, fake_timestamp)], - } - } - self._counter_inc_helper(row, column, value, commit_result) - - def test_counter_inc_bad_result(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual - value = 42 - commit_result = None - with self.assertRaises(TypeError): - self._counter_inc_helper(row, column, value, commit_result) - - def test_counter_inc_result_key_error(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual - value = 42 - commit_result = {} - with self.assertRaises(KeyError): - self._counter_inc_helper(row, column, value, commit_result) - - def test_counter_inc_result_nested_key_error(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual - value = 42 - commit_result = {col_fam: {}} - with self.assertRaises(KeyError): - self._counter_inc_helper(row, column, value, commit_result) - - def test_counter_inc_result_non_unique_cell(self): - row = 'row-key' - col_fam = 'fam' - col_qual = 'col1' - column = col_fam + ':' + col_qual - value = 42 - fake_timestamp = None - packed_value = None - commit_result = { - col_fam: { - col_qual: [ - (packed_value, fake_timestamp), - (packed_value, fake_timestamp), - ], - } - } - with self.assertRaises(ValueError): - self._counter_inc_helper(row, column, value, commit_result) - - -class Test__gc_rule_to_dict(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _gc_rule_to_dict - return _gc_rule_to_dict(*args, **kwargs) - - def test_with_null(self): - gc_rule = None - result = self._callFUT(gc_rule) - self.assertEqual(result, {}) - - def test_with_max_versions(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - - max_versions = 2 - gc_rule = MaxVersionsGCRule(max_versions) - result = self._callFUT(gc_rule) - expected_result = {'max_versions': max_versions} - self.assertEqual(result, expected_result) - - def test_with_max_age(self): - import datetime - from gcloud.bigtable.column_family import MaxAgeGCRule - - time_to_live = 101 - max_age = datetime.timedelta(seconds=time_to_live) - gc_rule = MaxAgeGCRule(max_age) - result = self._callFUT(gc_rule) - expected_result = {'time_to_live': time_to_live} - self.assertEqual(result, expected_result) - - def test_with_non_gc_rule(self): - gc_rule = object() - result = self._callFUT(gc_rule) - self.assertTrue(result is gc_rule) - - def test_with_gc_rule_union(self): - from gcloud.bigtable.column_family import GCRuleUnion - - gc_rule = GCRuleUnion(rules=[]) - result = self._callFUT(gc_rule) - self.assertTrue(result is gc_rule) - - def test_with_intersection_other_than_two(self): - from gcloud.bigtable.column_family import GCRuleIntersection - - gc_rule = GCRuleIntersection(rules=[]) - result = self._callFUT(gc_rule) - self.assertTrue(result is gc_rule) - - def test_with_intersection_two_max_num_versions(self): - from gcloud.bigtable.column_family import GCRuleIntersection - from gcloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxVersionsGCRule(2) - gc_rule = GCRuleIntersection(rules=[rule1, rule2]) - result = self._callFUT(gc_rule) - self.assertTrue(result is gc_rule) - - def test_with_intersection_two_rules(self): - import datetime - from gcloud.bigtable.column_family import GCRuleIntersection - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - time_to_live = 101 - max_age = datetime.timedelta(seconds=time_to_live) - rule1 = MaxAgeGCRule(max_age) - max_versions = 2 - rule2 = MaxVersionsGCRule(max_versions) - gc_rule = GCRuleIntersection(rules=[rule1, rule2]) - result = self._callFUT(gc_rule) - expected_result = { - 'max_versions': max_versions, - 'time_to_live': time_to_live, - } - self.assertEqual(result, expected_result) - - def test_with_intersection_two_nested_rules(self): - from gcloud.bigtable.column_family import GCRuleIntersection - - rule1 = GCRuleIntersection(rules=[]) - rule2 = GCRuleIntersection(rules=[]) - gc_rule = GCRuleIntersection(rules=[rule1, rule2]) - result = self._callFUT(gc_rule) - self.assertTrue(result is gc_rule) - - -class Test__string_successor(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _string_successor - return _string_successor(*args, **kwargs) - - def test_with_alphanumeric(self): - self.assertEqual(self._callFUT(b'boa'), b'bob') - self.assertEqual(self._callFUT(b'abc1'), b'abc2') - - def test_with_last_byte(self): - self.assertEqual(self._callFUT(b'boa\xff'), b'bob') - - def test_with_empty_string(self): - self.assertEqual(self._callFUT(b''), b'') - - def test_with_all_last_bytes(self): - self.assertEqual(self._callFUT(b'\xff\xff\xff'), b'') - - def test_with_unicode_input(self): - self.assertEqual(self._callFUT(u'boa'), b'bob') - - -class Test__convert_to_time_range(unittest2.TestCase): - - def _callFUT(self, timestamp=None): - from gcloud.bigtable.happybase.table import _convert_to_time_range - return _convert_to_time_range(timestamp=timestamp) - - def test_null(self): - timestamp = None - result = self._callFUT(timestamp=timestamp) - self.assertEqual(result, None) - - def test_invalid_type(self): - timestamp = object() - with self.assertRaises(TypeError): - self._callFUT(timestamp=timestamp) - - def test_success(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable.row_filters import TimestampRange - - timestamp = 1441928298571 - ts_dt = _datetime_from_microseconds(1000 * timestamp) - result = self._callFUT(timestamp=timestamp) - self.assertTrue(isinstance(result, TimestampRange)) - self.assertEqual(result.start, None) - self.assertEqual(result.end, ts_dt) - - -class Test__cells_to_pairs(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _cells_to_pairs - return _cells_to_pairs(*args, **kwargs) - - def test_without_timestamp(self): - from gcloud.bigtable.row_data import Cell - - value1 = 'foo' - cell1 = Cell(value=value1, timestamp=None) - value2 = 'bar' - cell2 = Cell(value=value2, timestamp=None) - - result = self._callFUT([cell1, cell2]) - self.assertEqual(result, [value1, value2]) - - def test_with_timestamp(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable.row_data import Cell - - value1 = 'foo' - ts1_millis = 1221934570148 - ts1 = _datetime_from_microseconds(ts1_millis * 1000) - cell1 = Cell(value=value1, timestamp=ts1) - - value2 = 'bar' - ts2_millis = 1221955575548 - ts2 = _datetime_from_microseconds(ts2_millis * 1000) - cell2 = Cell(value=value2, timestamp=ts2) - - result = self._callFUT([cell1, cell2], include_timestamp=True) - self.assertEqual(result, - [(value1, ts1_millis), (value2, ts2_millis)]) - - -class Test__partial_row_to_dict(unittest2.TestCase): - - def _callFUT(self, partial_row_data, include_timestamp=False): - from gcloud.bigtable.happybase.table import _partial_row_to_dict - return _partial_row_to_dict(partial_row_data, - include_timestamp=include_timestamp) - - def test_without_timestamp(self): - from gcloud.bigtable.row_data import Cell - from gcloud.bigtable.row_data import PartialRowData - - row_data = PartialRowData(b'row-key') - val1 = b'hi-im-bytes' - val2 = b'bi-im-hytes' - row_data._cells[u'fam1'] = { - b'col1': [Cell(val1, None)], - b'col2': [Cell(val2, None)], - } - result = self._callFUT(row_data) - expected_result = { - b'fam1:col1': val1, - b'fam1:col2': val2, - } - self.assertEqual(result, expected_result) - - def test_with_timestamp(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable.row_data import Cell - from gcloud.bigtable.row_data import PartialRowData - - row_data = PartialRowData(b'row-key') - val1 = b'hi-im-bytes' - ts1_millis = 1221934570148 - ts1 = _datetime_from_microseconds(ts1_millis * 1000) - val2 = b'bi-im-hytes' - ts2_millis = 1331934880000 - ts2 = _datetime_from_microseconds(ts2_millis * 1000) - row_data._cells[u'fam1'] = { - b'col1': [Cell(val1, ts1)], - b'col2': [Cell(val2, ts2)], - } - result = self._callFUT(row_data, include_timestamp=True) - expected_result = { - b'fam1:col1': (val1, ts1_millis), - b'fam1:col2': (val2, ts2_millis), - } - self.assertEqual(result, expected_result) - - -class Test__filter_chain_helper(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _filter_chain_helper - return _filter_chain_helper(*args, **kwargs) - - def test_no_filters(self): - with self.assertRaises(ValueError): - self._callFUT() - - def test_single_filter(self): - from gcloud.bigtable.row_filters import CellsColumnLimitFilter - - versions = 1337 - result = self._callFUT(versions=versions) - self.assertTrue(isinstance(result, CellsColumnLimitFilter)) - # Relies on the fact that RowFilter instances can - # only have one value set. - self.assertEqual(result.num_cells, versions) - - def test_existing_filters(self): - from gcloud.bigtable.row_filters import CellsColumnLimitFilter - - filters = [] - versions = 1337 - result = self._callFUT(versions=versions, filters=filters) - # Make sure filters has grown. - self.assertEqual(filters, [result]) - - self.assertTrue(isinstance(result, CellsColumnLimitFilter)) - # Relies on the fact that RowFilter instances can - # only have one value set. - self.assertEqual(result.num_cells, versions) - - def _column_helper(self, num_filters, versions=None, timestamp=None, - column=None, col_fam=None, qual=None): - from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter - from gcloud.bigtable.row_filters import FamilyNameRegexFilter - from gcloud.bigtable.row_filters import RowFilterChain - - if col_fam is None: - col_fam = 'cf1' - if qual is None: - qual = 'qual' - if column is None: - column = col_fam + ':' + qual - result = self._callFUT(column, versions=versions, timestamp=timestamp) - self.assertTrue(isinstance(result, RowFilterChain)) - - self.assertEqual(len(result.filters), num_filters) - fam_filter = result.filters[0] - qual_filter = result.filters[1] - self.assertTrue(isinstance(fam_filter, FamilyNameRegexFilter)) - self.assertTrue(isinstance(qual_filter, ColumnQualifierRegexFilter)) - - # Relies on the fact that RowFilter instances can - # only have one value set. - self.assertEqual(fam_filter.regex, col_fam.encode('utf-8')) - self.assertEqual(qual_filter.regex, qual.encode('utf-8')) - - return result - - def test_column_only(self): - self._column_helper(num_filters=2) - - def test_column_bytes(self): - self._column_helper(num_filters=2, column=b'cfB:qualY', - col_fam=u'cfB', qual=u'qualY') - - def test_column_unicode(self): - self._column_helper(num_filters=2, column=u'cfU:qualN', - col_fam=u'cfU', qual=u'qualN') - - def test_with_versions(self): - from gcloud.bigtable.row_filters import CellsColumnLimitFilter - - versions = 11 - result = self._column_helper(num_filters=3, versions=versions) - - version_filter = result.filters[2] - self.assertTrue(isinstance(version_filter, CellsColumnLimitFilter)) - # Relies on the fact that RowFilter instances can - # only have one value set. - self.assertEqual(version_filter.num_cells, versions) - - def test_with_timestamp(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable.row_filters import TimestampRange - from gcloud.bigtable.row_filters import TimestampRangeFilter - - timestamp = 1441928298571 - result = self._column_helper(num_filters=3, timestamp=timestamp) - - range_filter = result.filters[2] - self.assertTrue(isinstance(range_filter, TimestampRangeFilter)) - # Relies on the fact that RowFilter instances can - # only have one value set. - time_range = range_filter.range_ - self.assertTrue(isinstance(time_range, TimestampRange)) - self.assertEqual(time_range.start, None) - ts_dt = _datetime_from_microseconds(1000 * timestamp) - self.assertEqual(time_range.end, ts_dt) - - def test_with_all_options(self): - versions = 11 - timestamp = 1441928298571 - self._column_helper(num_filters=4, versions=versions, - timestamp=timestamp) - - -class Test__columns_filter_helper(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _columns_filter_helper - return _columns_filter_helper(*args, **kwargs) - - def test_no_columns(self): - columns = [] - with self.assertRaises(ValueError): - self._callFUT(columns) - - def test_single_column(self): - from gcloud.bigtable.row_filters import FamilyNameRegexFilter - - col_fam = 'cf1' - columns = [col_fam] - result = self._callFUT(columns) - expected_result = FamilyNameRegexFilter(col_fam) - self.assertEqual(result, expected_result) - - def test_column_and_column_families(self): - from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter - from gcloud.bigtable.row_filters import FamilyNameRegexFilter - from gcloud.bigtable.row_filters import RowFilterChain - from gcloud.bigtable.row_filters import RowFilterUnion - - col_fam1 = 'cf1' - col_fam2 = 'cf2' - col_qual2 = 'qual2' - columns = [col_fam1, col_fam2 + ':' + col_qual2] - result = self._callFUT(columns) - - self.assertTrue(isinstance(result, RowFilterUnion)) - self.assertEqual(len(result.filters), 2) - filter1 = result.filters[0] - filter2 = result.filters[1] - - self.assertTrue(isinstance(filter1, FamilyNameRegexFilter)) - self.assertEqual(filter1.regex, col_fam1.encode('utf-8')) - - self.assertTrue(isinstance(filter2, RowFilterChain)) - filter2a, filter2b = filter2.filters - self.assertTrue(isinstance(filter2a, FamilyNameRegexFilter)) - self.assertEqual(filter2a.regex, col_fam2.encode('utf-8')) - self.assertTrue(isinstance(filter2b, ColumnQualifierRegexFilter)) - self.assertEqual(filter2b.regex, col_qual2.encode('utf-8')) - - -class Test__row_keys_filter_helper(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.happybase.table import _row_keys_filter_helper - return _row_keys_filter_helper(*args, **kwargs) - - def test_no_rows(self): - row_keys = [] - with self.assertRaises(ValueError): - self._callFUT(row_keys) - - def test_single_row(self): - from gcloud.bigtable.row_filters import RowKeyRegexFilter - - row_key = b'row-key' - row_keys = [row_key] - result = self._callFUT(row_keys) - expected_result = RowKeyRegexFilter(row_key) - self.assertEqual(result, expected_result) - - def test_many_rows(self): - from gcloud.bigtable.row_filters import RowFilterUnion - from gcloud.bigtable.row_filters import RowKeyRegexFilter - - row_key1 = b'row-key1' - row_key2 = b'row-key2' - row_key3 = b'row-key3' - row_keys = [row_key1, row_key2, row_key3] - result = self._callFUT(row_keys) - - filter1 = RowKeyRegexFilter(row_key1) - filter2 = RowKeyRegexFilter(row_key2) - filter3 = RowKeyRegexFilter(row_key3) - expected_result = RowFilterUnion(filters=[filter1, filter2, filter3]) - self.assertEqual(result, expected_result) - - -class _Connection(object): - - def __init__(self, cluster): - self._cluster = cluster - - -class _MockLowLevelColumnFamily(object): - - def __init__(self, column_family_id, gc_rule=None): - self.column_family_id = column_family_id - self.gc_rule = gc_rule - - -class _MockLowLevelTable(object): - - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - self.list_column_families_calls = 0 - self.column_families = {} - self.row_values = {} - self.read_row_calls = [] - self.read_row_result = None - self.read_rows_calls = [] - self.read_rows_result = None - - def list_column_families(self): - self.list_column_families_calls += 1 - return self.column_families - - def row(self, row_key, append=None): - result = self.row_values[row_key] - result._append = append - return result - - def read_row(self, *args, **kwargs): - self.read_row_calls.append((args, kwargs)) - return self.read_row_result - - def read_rows(self, *args, **kwargs): - self.read_rows_calls.append((args, kwargs)) - return self.read_rows_result - - -class _MockLowLevelRow(object): - - COUNTER_DEFAULT = 0 - - def __init__(self, row_key, commit_result=None): - self.row_key = row_key - self._append = False - self.counts = {} - self.commit_result = commit_result - - def increment_cell_value(self, column_family_id, column, int_value): - count = self.counts.setdefault((column_family_id, column), - self.COUNTER_DEFAULT) - self.counts[(column_family_id, column)] = count + int_value - - def commit(self): - return self.commit_result - - -class _MockBatch(object): - - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - self.exit_vals = [] - self.put_args = [] - self.delete_args = [] - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.exit_vals.append((exc_type, exc_value, traceback)) - - def put(self, *args): - self.put_args.append(args) - - def delete(self, *args): - self.delete_args.append(args) - - -class _MockPartialRowsData(object): - - def __init__(self, rows=None, iterations=0): - self.rows = rows or {} - self.consume_all_calls = 0 - self.consume_next_calls = 0 - self.iterations = iterations - - def consume_all(self): - self.consume_all_calls += 1 - - def consume_next(self): - self.consume_next_calls += 1 - if self.consume_next_calls > self.iterations: - raise StopIteration diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py deleted file mode 100644 index cb9ce2e67e3d..000000000000 --- a/gcloud/bigtable/row.py +++ /dev/null @@ -1,886 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Row.""" - - -import struct - -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud._helpers import _microseconds_from_datetime -from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - -_PACK_I64 = struct.Struct('>q').pack - -MAX_MUTATIONS = 100000 -"""The maximum number of mutations that a row can accumulate.""" - - -class Row(object): - """Base representation of a Google Cloud Bigtable Row. - - This class has three subclasses corresponding to the three - RPC methods for sending row mutations: - - * :class:`DirectRow` for ``MutateRow`` - * :class:`ConditionalRow` for ``CheckAndMutateRow`` - * :class:`AppendRow` for ``ReadModifyWriteRow`` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - def __init__(self, row_key, table): - self._row_key = _to_bytes(row_key) - self._table = table - - -class _SetDeleteRow(Row): - """Row helper for setting or deleting cell values. - - Implements helper methods to add mutations to set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - ALL_COLUMNS = object() - """Sentinel value used to indicate all columns in a column family.""" - - def _get_mutations(self, state): - """Gets the list of mutations for a given state. - - This method intended to be implemented by subclasses. - - ``state`` may not need to be used by all subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :raises: :class:`NotImplementedError ` - always. - """ - raise NotImplementedError - - def _set_cell(self, column_family_id, column, value, timestamp=None, - state=None): - """Helper for :meth:`set_cell` - - Adds a mutation to set the value in a specific cell. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - column = _to_bytes(column) - if isinstance(value, six.integer_types): - value = _PACK_I64(value) - value = _to_bytes(value) - if timestamp is None: - # Use -1 for current Bigtable server time. - timestamp_micros = -1 - else: - timestamp_micros = _microseconds_from_datetime(timestamp) - # Truncate to millisecond granularity. - timestamp_micros -= (timestamp_micros % 1000) - - mutation_val = data_pb2.Mutation.SetCell( - family_name=column_family_id, - column_qualifier=column, - timestamp_micros=timestamp_micros, - value=value, - ) - mutation_pb = data_pb2.Mutation(set_cell=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete(self, state=None): - """Helper for :meth:`delete` - - Adds a delete mutation (for the entire row) to the accumulated - mutations. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutation_val = data_pb2.Mutation.DeleteFromRow() - mutation_pb = data_pb2.Mutation(delete_from_row=mutation_val) - self._get_mutations(state).append(mutation_pb) - - def _delete_cells(self, column_family_id, columns, time_range=None, - state=None): - """Helper for :meth:`delete_cell` and :meth:`delete_cells`. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that is passed along to - :meth:`_get_mutations`. - """ - mutations_list = self._get_mutations(state) - if columns is self.ALL_COLUMNS: - mutation_val = data_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id, - ) - mutation_pb = data_pb2.Mutation(delete_from_family=mutation_val) - mutations_list.append(mutation_pb) - else: - delete_kwargs = {} - if time_range is not None: - delete_kwargs['time_range'] = time_range.to_pb() - - to_append = [] - for column in columns: - column = _to_bytes(column) - # time_range will never change if present, but the rest of - # delete_kwargs will - delete_kwargs.update( - family_name=column_family_id, - column_qualifier=column, - ) - mutation_val = data_pb2.Mutation.DeleteFromColumn( - **delete_kwargs) - mutation_pb = data_pb2.Mutation( - delete_from_column=mutation_val) - to_append.append(mutation_pb) - - # We don't add the mutations until all columns have been - # processed without error. - mutations_list.extend(to_append) - - -class DirectRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending "direct" mutations. - - These mutations directly set or delete cell contents: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - These methods can be used directly:: - - >>> row = table.row(b'row-key1') - >>> row.set_cell(u'fam', b'col1', b'cell-val') - >>> row.delete_cell(u'fam', b'col2') - - .. note:: - - A :class:`DirectRow` accumulates mutations locally via the - :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and - :meth:`delete_cells` methods. To actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - def __init__(self, row_key, table): - super(DirectRow, self).__init__(row_key, table) - self._pb_mutations = [] - - def _get_mutations(self, state): # pylint: disable=unused-argument - """Gets the list of mutations for a given state. - - ``state`` is unused by :class:`DirectRow` but is used by - subclasses. - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - return self._pb_mutations - - def set_cell(self, column_family_id, column, value, timestamp=None): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this :class:`DirectRow` - and the ``column``. The ``column`` must be in an existing - :class:`.ColumnFamily` (as determined by ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, - state=None) - - def delete(self): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - """ - self._delete(state=None) - - def delete_cell(self, column_family_id, column, time_range=None): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells(column_family_id, [column], time_range=time_range, - state=None) - - def delete_cells(self, column_family_id, columns, time_range=None): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then - the entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - """ - self._delete_cells(column_family_id, columns, time_range=time_range, - state=None) - - def commit(self): - """Makes a ``MutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations to an empty list. - - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - mutations_list = self._get_mutations(None) - num_mutations = len(mutations_list) - if num_mutations == 0: - return - if num_mutations > MAX_MUTATIONS: - raise ValueError('%d total mutations exceed the maximum allowable ' - '%d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.MutateRowRequest( - table_name=self._table.name, - row_key=self._row_key, - mutations=mutations_list, - ) - # We expect a `google.protobuf.empty_pb2.Empty` - client = self._table._cluster._client - client._data_stub.MutateRow(request_pb, client.timeout_seconds) - self.clear() - - def clear(self): - """Removes all currently accumulated mutations on the current row.""" - del self._pb_mutations[:] - - -class ConditionalRow(_SetDeleteRow): - """Google Cloud Bigtable Row for sending mutations conditionally. - - Each mutation has an associated state: :data:`True` or :data:`False`. - When :meth:`commit`-ed, the mutations for the :data:`True` - state will be applied if the filter matches any cells in - the row, otherwise the :data:`False` state will be applied. - - A :class:`ConditionalRow` accumulates mutations in the same way a - :class:`DirectRow` does: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - with the only change the extra ``state`` parameter:: - - >>> row_cond = table.row(b'row-key2', filter_=row_filter) - >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) - >>> row_cond.delete_cell(u'fam', b'col', state=False) - - .. note:: - - As with :class:`DirectRow`, to actually send these mutations to the - Google Cloud Bigtable API, you must call :meth:`commit`. - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - - :type filter_: :class:`.RowFilter` - :param filter_: Filter to be used for conditional mutations. - """ - def __init__(self, row_key, table, filter_): - super(ConditionalRow, self).__init__(row_key, table) - self._filter = filter_ - self._true_pb_mutations = [] - self._false_pb_mutations = [] - - def _get_mutations(self, state): - """Gets the list of mutations for a given state. - - Over-ridden so that the state can be used in: - - * :meth:`set_cell` - * :meth:`delete` - * :meth:`delete_cell` - * :meth:`delete_cells` - - :type state: bool - :param state: The state that the mutation should be - applied in. - - :rtype: list - :returns: The list to add new mutations to (for the current state). - """ - if state: - return self._true_pb_mutations - else: - return self._false_pb_mutations - - def commit(self): - """Makes a ``CheckAndMutateRow`` API request. - - If no mutations have been created in the row, no request is made. - - The mutations will be applied conditionally, based on whether the - filter matches any cells in the :class:`ConditionalRow` or not. (Each - method which adds a mutation has a ``state`` parameter for this - purpose.) - - Mutations are applied atomically and in order, meaning that earlier - mutations can be masked / negated by later ones. Cells already present - in the row are left unchanged unless explicitly changed by a mutation. - - After committing the accumulated mutations, resets the local - mutations. - - :rtype: bool - :returns: Flag indicating if the filter was matched (which also - indicates which set of mutations were applied by the server). - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - true_mutations = self._get_mutations(state=True) - false_mutations = self._get_mutations(state=False) - num_true_mutations = len(true_mutations) - num_false_mutations = len(false_mutations) - if num_true_mutations == 0 and num_false_mutations == 0: - return - if (num_true_mutations > MAX_MUTATIONS or - num_false_mutations > MAX_MUTATIONS): - raise ValueError( - 'Exceed the maximum allowable mutations (%d). Had %s true ' - 'mutations and %d false mutations.' % ( - MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - - request_pb = messages_pb2.CheckAndMutateRowRequest( - table_name=self._table.name, - row_key=self._row_key, - predicate_filter=self._filter.to_pb(), - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - # We expect a `.messages_pb2.CheckAndMutateRowResponse` - client = self._table._cluster._client - resp = client._data_stub.CheckAndMutateRow( - request_pb, client.timeout_seconds) - self.clear() - return resp.predicate_matched - - # pylint: disable=arguments-differ - def set_cell(self, column_family_id, column, value, timestamp=None, - state=True): - """Sets a value in this row. - - The cell is determined by the ``row_key`` of this - :class:`ConditionalRow` and the ``column``. The ``column`` must be in - an existing :class:`.ColumnFamily` (as determined by - ``column_family_id``). - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes or :class:`int` - :param value: The value to set in the cell. If an integer is used, - will be interpreted as a 64-bit big-endian signed - integer (8 bytes). - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (Optional) The timestamp of the operation. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, - state=state) - - def delete(self, state=True): - """Deletes this row from the table. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete(state=state) - - def delete_cell(self, column_family_id, column, time_range=None, - state=True): - """Deletes cell in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family that will have a - cell deleted. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells(column_family_id, [column], time_range=time_range, - state=state) - - def delete_cells(self, column_family_id, columns, time_range=None, - state=True): - """Deletes cells in this row. - - .. note:: - - This method adds a mutation to the accumulated mutations on this - row, but does not make an API request. To actually - send an API request (with the mutations) to the Google Cloud - Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column - or columns with cells being deleted. Must be - of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type columns: :class:`list` of :class:`str` / - :func:`unicode `, or :class:`object` - :param columns: The columns within the column family that will have - cells deleted. If :attr:`ALL_COLUMNS` is used then the - entire column family will be deleted from the row. - - :type time_range: :class:`TimestampRange` - :param time_range: (Optional) The range of time within which cells - should be deleted. - - :type state: bool - :param state: (Optional) The state that the mutation should be - applied in. Defaults to :data:`True`. - """ - self._delete_cells(column_family_id, columns, time_range=time_range, - state=state) - # pylint: enable=arguments-differ - - def clear(self): - """Removes all currently accumulated mutations on the current row.""" - del self._true_pb_mutations[:] - del self._false_pb_mutations[:] - - -class AppendRow(Row): - """Google Cloud Bigtable Row for sending append mutations. - - These mutations are intended to augment the value of an existing cell - and uses the methods: - - * :meth:`append_cell_value` - * :meth:`increment_cell_value` - - The first works by appending bytes and the second by incrementing an - integer (stored in the cell as 8 bytes). In either case, if the - cell is empty, assumes the default empty value (empty string for - bytes or and 0 for integer). - - :type row_key: bytes - :param row_key: The key for the current row. - - :type table: :class:`Table ` - :param table: The table that owns the row. - """ - - def __init__(self, row_key, table): - super(AppendRow, self).__init__(row_key, table) - self._rule_pb_list = [] - - def clear(self): - """Removes all currently accumulated modifications on current row.""" - del self._rule_pb_list[:] - - def append_cell_value(self, column_family_id, column, value): - """Appends a value to an existing cell. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type value: bytes - :param value: The value to append to the existing value in the cell. If - the targeted cell is unset, it will be treated as - containing the empty string. - """ - column = _to_bytes(column) - value = _to_bytes(value) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - append_value=value) - self._rule_pb_list.append(rule_pb) - - def increment_cell_value(self, column_family_id, column, int_value): - """Increments a value in an existing cell. - - Assumes the value in the cell is stored as a 64 bit integer - serialized to bytes. - - .. note:: - - This method adds a read-modify rule protobuf to the accumulated - read-modify rules on this row, but does not make an API - request. To actually send an API request (with the rules) to the - Google Cloud Bigtable API, call :meth:`commit`. - - :type column_family_id: str - :param column_family_id: The column family that contains the column. - Must be of the form - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type column: bytes - :param column: The column within the column family where the cell - is located. - - :type int_value: int - :param int_value: The value to increment the existing value in the cell - by. If the targeted cell is unset, it will be treated - as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit - big-endian signed integer), or the entire request - will fail. - """ - column = _to_bytes(column) - rule_pb = data_pb2.ReadModifyWriteRule(family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value) - self._rule_pb_list.append(rule_pb) - - def commit(self): - """Makes a ``ReadModifyWriteRow`` API request. - - This commits modifications made by :meth:`append_cell_value` and - :meth:`increment_cell_value`. If no modifications were made, makes - no API request and just returns ``{}``. - - Modifies a row atomically, reading the latest existing - timestamp / value from the specified columns and writing a new value by - appending / incrementing. The new cell created uses either the current - server time or the highest timestamp of a cell in that column (if it - exceeds the server time). - - After committing the accumulated mutations, resets the local mutations. - - .. code:: python - - >>> append_row.commit() - { - u'col-fam-id': { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - }, - u'col-fam-id2': { - b'col-name3-but-other-fam': [ - (b'foo', datetime.datetime(...)), - ], - }, - } - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. - """ - num_mutations = len(self._rule_pb_list) - if num_mutations == 0: - return {} - if num_mutations > MAX_MUTATIONS: - raise ValueError('%d total append mutations exceed the maximum ' - 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_pb2.ReadModifyWriteRowRequest( - table_name=self._table.name, - row_key=self._row_key, - rules=self._rule_pb_list, - ) - # We expect a `.data_pb2.Row` - client = self._table._cluster._client - row_response = client._data_stub.ReadModifyWriteRow( - request_pb, client.timeout_seconds) - - # Reset modifications after commit-ing request. - self.clear() - - # NOTE: We expect row_response.key == self._row_key but don't check. - return _parse_rmw_row_response(row_response) - - -def _parse_rmw_row_response(row_response): - """Parses the response to a ``ReadModifyWriteRow`` request. - - :type row_response: :class:`.data_pb2.Row` - :param row_response: The response row (with only modified cells) from a - ``ReadModifyWriteRow`` request. - - :rtype: dict - :returns: The new contents of all modified cells. Returned as a - dictionary of column families, each of which holds a - dictionary of columns. Each column contains a list of cells - modified. Each cell is represented with a two-tuple with the - value (in bytes) and the timestamp for the cell. For example: - - .. code:: python - - { - u'col-fam-id': { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - }, - u'col-fam-id2': { - b'col-name3-but-other-fam': [ - (b'foo', datetime.datetime(...)), - ], - }, - } - """ - result = {} - for column_family in row_response.families: - column_family_id, curr_family = _parse_family_pb(column_family) - result[column_family_id] = curr_family - return result - - -def _parse_family_pb(family_pb): - """Parses a Family protobuf into a dictionary. - - :type family_pb: :class:`._generated.bigtable_data_pb2.Family` - :param family_pb: A protobuf - - :rtype: tuple - :returns: A string and dictionary. The string is the name of the - column family and the dictionary has column names (within the - family) as keys and cell lists as values. Each cell is - represented with a two-tuple with the value (in bytes) and the - timestamp for the cell. For example: - - .. code:: python - - { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - } - """ - result = {} - for column in family_pb.columns: - result[column.qualifier] = cells = [] - for cell in column.cells: - val_pair = ( - cell.value, - _datetime_from_microseconds(cell.timestamp_micros), - ) - cells.append(val_pair) - - return family_pb.name, result diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py deleted file mode 100644 index e64a242f8507..000000000000 --- a/gcloud/bigtable/row_data.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Container for Google Cloud Bigtable Cells and Streaming Row Contents.""" - - -import copy -import six - -from gcloud._helpers import _datetime_from_microseconds -from gcloud._helpers import _to_bytes - - -class Cell(object): - """Representation of a Google Cloud Bigtable Cell. - - :type value: bytes - :param value: The value stored in the cell. - - :type timestamp: :class:`datetime.datetime` - :param timestamp: The timestamp when the cell was stored. - - :type labels: list - :param labels: (Optional) List of strings. Labels applied to the cell. - """ - - def __init__(self, value, timestamp, labels=()): - self.value = value - self.timestamp = timestamp - self.labels = list(labels) - - @classmethod - def from_pb(cls, cell_pb): - """Create a new cell from a Cell protobuf. - - :type cell_pb: :class:`._generated.bigtable_data_pb2.Cell` - :param cell_pb: The protobuf to convert. - - :rtype: :class:`Cell` - :returns: The cell corresponding to the protobuf. - """ - timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros) - if cell_pb.labels: - return cls(cell_pb.value, timestamp, labels=cell_pb.labels) - else: - return cls(cell_pb.value, timestamp) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.value == self.value and - other.timestamp == self.timestamp and - other.labels == self.labels) - - def __ne__(self, other): - return not self.__eq__(other) - - -class PartialRowData(object): - """Representation of partial row in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) data. - """ - - def __init__(self, row_key): - self._row_key = row_key - self._cells = {} - self._committed = False - self._chunks_encountered = False - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other._row_key == self._row_key and - other._committed == self._committed and - other._chunks_encountered == self._chunks_encountered and - other._cells == self._cells) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - """Convert the cells to a dictionary. - - This is intended to be used with HappyBase, so the column family and - column qualiers are combined (with ``:``). - - :rtype: dict - :returns: Dictionary containing all the data in the cells of this row. - """ - result = {} - for column_family_id, columns in six.iteritems(self._cells): - for column_qual, cells in six.iteritems(columns): - key = (_to_bytes(column_family_id) + b':' + - _to_bytes(column_qual)) - result[key] = cells - return result - - @property - def cells(self): - """Property returning all the cells accumulated on this partial row. - - :rtype: dict - :returns: Dictionary of the :class:`Cell` objects accumulated. This - dictionary has two-levels of keys (first for column families - and second for column names/qualifiers within a family). For - a given column, a list of :class:`Cell` objects is stored. - """ - return copy.deepcopy(self._cells) - - @property - def row_key(self): - """Getter for the current (partial) row's key. - - :rtype: bytes - :returns: The current (partial) row's key. - """ - return self._row_key - - @property - def committed(self): - """Getter for the committed status of the (partial) row. - - :rtype: bool - :returns: The committed status of the (partial) row. - """ - return self._committed - - def clear(self): - """Clears all cells that have been added.""" - self._committed = False - self._chunks_encountered = False - self._cells.clear() - - def _handle_commit_row(self, chunk, index, last_chunk_index): - """Handles a ``commit_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :type index: int - :param index: The current index of the chunk. - - :type last_chunk_index: int - :param last_chunk_index: The index of the last chunk. - - :raises: :class:`ValueError ` if the value of - ``commit_row`` is :data:`False` or if the chunk passed is not - the last chunk in a response. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``commit_row``. - if not chunk.commit_row: - raise ValueError('Received commit_row that was False.') - - if index != last_chunk_index: - raise ValueError('Commit row chunk was not the last chunk') - else: - self._committed = True - - def _handle_reset_row(self, chunk): - """Handles a ``reset_row`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - - :raises: :class:`ValueError ` if the value of - ``reset_row`` is :data:`False` - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``reset_row``. - if not chunk.reset_row: - raise ValueError('Received reset_row that was False.') - - self.clear() - - def _handle_row_contents(self, chunk): - """Handles a ``row_contents`` chunk. - - :type chunk: ``ReadRowsResponse.Chunk`` - :param chunk: The chunk being handled. - """ - # NOTE: We assume the caller has checked that the ``ONEOF`` property - # for ``chunk`` is ``row_contents``. - - # chunk.row_contents is ._generated.bigtable_data_pb2.Family - column_family_id = chunk.row_contents.name - column_family_dict = self._cells.setdefault(column_family_id, {}) - for column in chunk.row_contents.columns: - cells = [Cell.from_pb(cell) for cell in column.cells] - - column_name = column.qualifier - column_cells = column_family_dict.setdefault(column_name, []) - column_cells.extend(cells) - - def update_from_read_rows(self, read_rows_response_pb): - """Updates the current row from a ``ReadRows`` response. - - :type read_rows_response_pb: - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - :param read_rows_response_pb: A response streamed back as part of a - ``ReadRows`` request. - - :raises: :class:`ValueError ` if the current - partial row has already been committed, if the row key on the - response doesn't match the current one or if there is a chunk - encountered with an unexpected ``ONEOF`` protobuf property. - """ - if self._committed: - raise ValueError('The row has been committed') - - if read_rows_response_pb.row_key != self.row_key: - raise ValueError('Response row key (%r) does not match current ' - 'one (%r).' % (read_rows_response_pb.row_key, - self.row_key)) - - last_chunk_index = len(read_rows_response_pb.chunks) - 1 - for index, chunk in enumerate(read_rows_response_pb.chunks): - chunk_property = chunk.WhichOneof('chunk') - if chunk_property == 'row_contents': - self._handle_row_contents(chunk) - elif chunk_property == 'reset_row': - self._handle_reset_row(chunk) - elif chunk_property == 'commit_row': - self._handle_commit_row(chunk, index, last_chunk_index) - else: - # NOTE: This includes chunk_property == None since we always - # want a value to be set - raise ValueError('Unexpected chunk property: %s' % ( - chunk_property,)) - - self._chunks_encountered = True - - -class PartialRowsData(object): - """Convenience wrapper for consuming a ``ReadRows`` streaming response. - - :type response_iterator: - :class:`grpc.framework.alpha._reexport._CancellableIterator` - :param response_iterator: A streaming iterator returned from a - ``ReadRows`` request. - """ - - def __init__(self, response_iterator): - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` - self._response_iterator = response_iterator - self._rows = {} - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other._response_iterator == self._response_iterator - - def __ne__(self, other): - return not self.__eq__(other) - - @property - def rows(self): - """Property returning all rows accumulated from the stream. - - :rtype: dict - :returns: Dictionary of :class:`PartialRowData`. - """ - # NOTE: To avoid duplicating large objects, this is just the - # mutable private data. - return self._rows - - def cancel(self): - """Cancels the iterator, closing the stream.""" - self._response_iterator.cancel() - - def consume_next(self): - """Consumes the next ``ReadRowsResponse`` from the stream. - - Parses the response and stores it as a :class:`PartialRowData` - in a dictionary owned by this object. - - :raises: :class:`StopIteration ` if the - response iterator has no more responses to stream. - """ - read_rows_response = self._response_iterator.next() - row_key = read_rows_response.row_key - partial_row = self._rows.get(row_key) - if partial_row is None: - partial_row = self._rows[row_key] = PartialRowData(row_key) - # NOTE: This is not atomic in the case of failures. - partial_row.update_from_read_rows(read_rows_response) - - def consume_all(self, max_loops=None): - """Consume the streamed responses until there are no more. - - This simply calls :meth:`consume_next` until there are no - more to consume. - - :type max_loops: int - :param max_loops: (Optional) Maximum number of times to try to consume - an additional ``ReadRowsResponse``. You can use this - to avoid long wait times. - """ - curr_loop = 0 - if max_loops is None: - max_loops = float('inf') - while curr_loop < max_loops: - curr_loop += 1 - try: - self.consume_next() - except StopIteration: - break diff --git a/gcloud/bigtable/row_filters.py b/gcloud/bigtable/row_filters.py deleted file mode 100644 index b7a1388b3a09..000000000000 --- a/gcloud/bigtable/row_filters.py +++ /dev/null @@ -1,764 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Filters for Google Cloud Bigtable Row classes.""" - - -from gcloud._helpers import _microseconds_from_datetime -from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - -class RowFilter(object): - """Basic filter to apply to cells in a row. - - These values can be combined via :class:`RowFilterChain`, - :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. - - .. note:: - - This class is a do-nothing base class for all row filters. - """ - - def __ne__(self, other): - return not self.__eq__(other) - - -class _BoolFilter(RowFilter): - """Row filter that uses a boolean flag. - - :type flag: bool - :param flag: An indicator if a setting is turned on or off. - """ - - def __init__(self, flag): - self.flag = flag - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.flag == self.flag - - -class SinkFilter(_BoolFilter): - """Advanced row filter to skip parent filters. - - :type flag: bool - :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Cannot be used within the - ``predicate_filter``, ``true_filter``, or ``false_filter`` - of a :class:`ConditionalRowFilter`. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(sink=self.flag) - - -class PassAllFilter(_BoolFilter): - """Row filter equivalent to not filtering at all. - - :type flag: bool - :param flag: Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(pass_all_filter=self.flag) - - -class BlockAllFilter(_BoolFilter): - """Row filter that doesn't match any cells. - - :type flag: bool - :param flag: Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(block_all_filter=self.flag) - - -class _RegexFilter(RowFilter): - """Row filter that uses a regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - :type regex: bytes or str - :param regex: A regular expression (RE2) for some row filter. - """ - - def __init__(self, regex): - self.regex = _to_bytes(regex) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.regex == self.regex - - -class RowKeyRegexFilter(_RegexFilter): - """Row filter for a row key regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells from rows with row - keys that satisfy this regex. For a - ``CheckAndMutateRowRequest``, this filter is unnecessary - since the row key is already specified. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(row_key_regex_filter=self.regex) - - -class RowSampleFilter(RowFilter): - """Matches all cells from a row with probability p. - - :type sample: float - :param sample: The probability of matching a cell (must be in the - interval ``[0, 1]``). - """ - - def __init__(self, sample): - self.sample = sample - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.sample == self.sample - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(row_sample_filter=self.sample) - - -class FamilyNameRegexFilter(_RegexFilter): - """Row filter for a family name regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - :type regex: str - :param regex: A regular expression (RE2) to match cells from columns in a - given column family. For technical reasons, the regex must - not contain the ``':'`` character, even if it is not being - used as a literal. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(family_name_regex_filter=self.regex) - - -class ColumnQualifierRegexFilter(_RegexFilter): - """Row filter for a column qualifier regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells from column that - match this regex (irrespective of column family). - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(column_qualifier_regex_filter=self.regex) - - -class TimestampRange(object): - """Range of time with inclusive lower and exclusive upper bounds. - - :type start: :class:`datetime.datetime` - :param start: (Optional) The (inclusive) lower bound of the timestamp - range. If omitted, defaults to Unix epoch. - - :type end: :class:`datetime.datetime` - :param end: (Optional) The (exclusive) upper bound of the timestamp - range. If omitted, no upper bound is used. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.start == self.start and - other.end == self.end) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_pb(self): - """Converts the :class:`TimestampRange` to a protobuf. - - :rtype: :class:`.data_pb2.TimestampRange` - :returns: The converted current object. - """ - timestamp_range_kwargs = {} - if self.start is not None: - timestamp_range_kwargs['start_timestamp_micros'] = ( - _microseconds_from_datetime(self.start)) - if self.end is not None: - timestamp_range_kwargs['end_timestamp_micros'] = ( - _microseconds_from_datetime(self.end)) - return data_pb2.TimestampRange(**timestamp_range_kwargs) - - -class TimestampRangeFilter(RowFilter): - """Row filter that limits cells to a range of time. - - :type range_: :class:`TimestampRange` - :param range_: Range of time that cells should match against. - """ - - def __init__(self, range_): - self.range_ = range_ - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.range_ == self.range_ - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts the ``range_`` on the current object to a protobuf and - then uses it in the ``timestamp_range_filter`` field. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) - - -class ColumnRangeFilter(RowFilter): - """A row filter to restrict to a range of columns. - - Both the start and end column can be included or excluded in the range. - By default, we include them both, but this can be changed with optional - flags. - - :type column_family_id: str - :param column_family_id: The column family that contains the columns. Must - be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type start_column: bytes - :param start_column: The start of the range of columns. If no value is - used, the backend applies no upper bound to the - values. - - :type end_column: bytes - :param end_column: The end of the range of columns. If no value is used, - the backend applies no upper bound to the values. - - :type inclusive_start: bool - :param inclusive_start: Boolean indicating if the start column should be - included in the range (or excluded). Defaults - to :data:`True` if ``start_column`` is passed and - no ``inclusive_start`` was given. - - :type inclusive_end: bool - :param inclusive_end: Boolean indicating if the end column should be - included in the range (or excluded). Defaults - to :data:`True` if ``end_column`` is passed and - no ``inclusive_end`` was given. - - :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_column`` is given or if ``inclusive_end`` - is set but no ``end_column`` is given - """ - - def __init__(self, column_family_id, start_column=None, end_column=None, - inclusive_start=None, inclusive_end=None): - self.column_family_id = column_family_id - - if inclusive_start is None: - inclusive_start = True - elif start_column is None: - raise ValueError('Inclusive start was specified but no ' - 'start column was given.') - self.start_column = start_column - self.inclusive_start = inclusive_start - - if inclusive_end is None: - inclusive_end = True - elif end_column is None: - raise ValueError('Inclusive end was specified but no ' - 'end column was given.') - self.end_column = end_column - self.inclusive_end = inclusive_end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.column_family_id == self.column_family_id and - other.start_column == self.start_column and - other.end_column == self.end_column and - other.inclusive_start == self.inclusive_start and - other.inclusive_end == self.inclusive_end) - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts to a :class:`.data_pb2.ColumnRange` and then uses it - in the ``column_range_filter`` field. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - column_range_kwargs = {'family_name': self.column_family_id} - if self.start_column is not None: - if self.inclusive_start: - key = 'start_qualifier_inclusive' - else: - key = 'start_qualifier_exclusive' - column_range_kwargs[key] = _to_bytes(self.start_column) - if self.end_column is not None: - if self.inclusive_end: - key = 'end_qualifier_inclusive' - else: - key = 'end_qualifier_exclusive' - column_range_kwargs[key] = _to_bytes(self.end_column) - - column_range = data_pb2.ColumnRange(**column_range_kwargs) - return data_pb2.RowFilter(column_range_filter=column_range) - - -class ValueRegexFilter(_RegexFilter): - """Row filter for a value regular expression. - - The ``regex`` must be valid RE2 patterns. See Google's - `RE2 reference`_ for the accepted syntax. - - .. _RE2 reference: https://github.com/google/re2/wiki/Syntax - - .. note:: - - Special care need be used with the expression used. Since - each of these properties can contain arbitrary bytes, the ``\\C`` - escape sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\\n``, which may be - present in a binary value. - - :type regex: bytes - :param regex: A regular expression (RE2) to match cells with values that - match this regex. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(value_regex_filter=self.regex) - - -class ValueRangeFilter(RowFilter): - """A range of values to restrict to in a row filter. - - Will only match cells that have values in this range. - - Both the start and end value can be included or excluded in the range. - By default, we include them both, but this can be changed with optional - flags. - - :type start_value: bytes - :param start_value: The start of the range of values. If no value is used, - the backend applies no lower bound to the values. - - :type end_value: bytes - :param end_value: The end of the range of values. If no value is used, - the backend applies no upper bound to the values. - - :type inclusive_start: bool - :param inclusive_start: Boolean indicating if the start value should be - included in the range (or excluded). Defaults - to :data:`True` if ``start_value`` is passed and - no ``inclusive_start`` was given. - - :type inclusive_end: bool - :param inclusive_end: Boolean indicating if the end value should be - included in the range (or excluded). Defaults - to :data:`True` if ``end_value`` is passed and - no ``inclusive_end`` was given. - - :raises: :class:`ValueError ` if ``inclusive_start`` - is set but no ``start_value`` is given or if ``inclusive_end`` - is set but no ``end_value`` is given - """ - - def __init__(self, start_value=None, end_value=None, - inclusive_start=None, inclusive_end=None): - if inclusive_start is None: - inclusive_start = True - elif start_value is None: - raise ValueError('Inclusive start was specified but no ' - 'start value was given.') - self.start_value = start_value - self.inclusive_start = inclusive_start - - if inclusive_end is None: - inclusive_end = True - elif end_value is None: - raise ValueError('Inclusive end was specified but no ' - 'end value was given.') - self.end_value = end_value - self.inclusive_end = inclusive_end - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.start_value == self.start_value and - other.end_value == self.end_value and - other.inclusive_start == self.inclusive_start and - other.inclusive_end == self.inclusive_end) - - def to_pb(self): - """Converts the row filter to a protobuf. - - First converts to a :class:`.data_pb2.ValueRange` and then uses - it to create a row filter protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - value_range_kwargs = {} - if self.start_value is not None: - if self.inclusive_start: - key = 'start_value_inclusive' - else: - key = 'start_value_exclusive' - value_range_kwargs[key] = _to_bytes(self.start_value) - if self.end_value is not None: - if self.inclusive_end: - key = 'end_value_inclusive' - else: - key = 'end_value_exclusive' - value_range_kwargs[key] = _to_bytes(self.end_value) - - value_range = data_pb2.ValueRange(**value_range_kwargs) - return data_pb2.RowFilter(value_range_filter=value_range) - - -class _CellCountFilter(RowFilter): - """Row filter that uses an integer count of cells. - - The cell count is used as an offset or a limit for the number - of results returned. - - :type num_cells: int - :param num_cells: An integer count / offset / limit. - """ - - def __init__(self, num_cells): - self.num_cells = num_cells - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.num_cells == self.num_cells - - -class CellsRowOffsetFilter(_CellCountFilter): - """Row filter to skip cells in a row. - - :type num_cells: int - :param num_cells: Skips the first N cells of the row. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) - - -class CellsRowLimitFilter(_CellCountFilter): - """Row filter to limit cells in a row. - - :type num_cells: int - :param num_cells: Matches only the first N cells of the row. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) - - -class CellsColumnLimitFilter(_CellCountFilter): - """Row filter to limit cells in a column. - - :type num_cells: int - :param num_cells: Matches only the most recent N cells within each column. - This filters a (family name, column) pair, based on - timestamps of each cell. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) - - -class StripValueTransformerFilter(_BoolFilter): - """Row filter that transforms cells into empty string (0 bytes). - - :type flag: bool - :param flag: If :data:`True`, replaces each cell's value with the empty - string. As the name indicates, this is more useful as a - transformer than a generic query / filter. - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(strip_value_transformer=self.flag) - - -class ApplyLabelFilter(RowFilter): - """Filter to apply labels to cells. - - Intended to be used as an intermediate filter on a pre-existing filtered - result set. This way if two sets are combined, the label can tell where - the cell(s) originated.This allows the client to determine which results - were produced from which part of the filter. - - .. note:: - - Due to a technical limitation of the backend, it is not currently - possible to apply multiple labels to a cell. - - :type label: str - :param label: Label to apply to cells in the output row. Values must be - at most 15 characters long, and match the pattern - ``[a-z0-9\\-]+``. - """ - - def __init__(self, label): - self.label = label - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.label == self.label - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - return data_pb2.RowFilter(apply_label_transformer=self.label) - - -class _FilterCombination(RowFilter): - """Chain of row filters. - - Sends rows through several filters in sequence. The filters are "chained" - together to process a row. After the first filter is applied, the second - is applied to the filtered output and so on for subsequent filters. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def __init__(self, filters=None): - if filters is None: - filters = [] - self.filters = filters - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return other.filters == self.filters - - -class RowFilterChain(_FilterCombination): - """Chain of row filters. - - Sends rows through several filters in sequence. The filters are "chained" - together to process a row. After the first filter is applied, the second - is applied to the filtered output and so on for subsequent filters. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - chain = data_pb2.RowFilter.Chain( - filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(chain=chain) - - -class RowFilterUnion(_FilterCombination): - """Union of row filters. - - Sends rows through several filters simultaneously, then - merges / interleaves all the filtered results together. - - If multiple cells are produced with the same column and timestamp, - they will all appear in the output row in an unspecified mutual order. - - :type filters: list - :param filters: List of :class:`RowFilter` - """ - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - interleave = data_pb2.RowFilter.Interleave( - filters=[row_filter.to_pb() for row_filter in self.filters]) - return data_pb2.RowFilter(interleave=interleave) - - -class ConditionalRowFilter(RowFilter): - """Conditional row filter which exhibits ternary behavior. - - Executes one of two filters based on another filter. If the ``base_filter`` - returns any cells in the row, then ``true_filter`` is executed. If not, - then ``false_filter`` is executed. - - .. note:: - - The ``base_filter`` does not execute atomically with the true and false - filters, which may lead to inconsistent or unexpected results. - - Additionally, executing a :class:`ConditionalRowFilter` has poor - performance on the server, especially when ``false_filter`` is set. - - :type base_filter: :class:`RowFilter` - :param base_filter: The filter to condition on before executing the - true/false filters. - - :type true_filter: :class:`RowFilter` - :param true_filter: (Optional) The filter to execute if there are any cells - matching ``base_filter``. If not provided, no results - will be returned in the true case. - - :type false_filter: :class:`RowFilter` - :param false_filter: (Optional) The filter to execute if there are no cells - matching ``base_filter``. If not provided, no results - will be returned in the false case. - """ - - def __init__(self, base_filter, true_filter=None, false_filter=None): - self.base_filter = base_filter - self.true_filter = true_filter - self.false_filter = false_filter - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.base_filter == self.base_filter and - other.true_filter == self.true_filter and - other.false_filter == self.false_filter) - - def to_pb(self): - """Converts the row filter to a protobuf. - - :rtype: :class:`.data_pb2.RowFilter` - :returns: The converted current object. - """ - condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} - if self.true_filter is not None: - condition_kwargs['true_filter'] = self.true_filter.to_pb() - if self.false_filter is not None: - condition_kwargs['false_filter'] = self.false_filter.to_pb() - condition = data_pb2.RowFilter.Condition(**condition_kwargs) - return data_pb2.RowFilter(condition=condition) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py deleted file mode 100644 index 5815086d7c00..000000000000 --- a/gcloud/bigtable/table.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""User friendly container for Google Cloud Bigtable Table.""" - - -from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 -from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) -from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as data_messages_pb2) -from gcloud.bigtable.column_family import _gc_rule_from_pb -from gcloud.bigtable.column_family import ColumnFamily -from gcloud.bigtable.row import AppendRow -from gcloud.bigtable.row import ConditionalRow -from gcloud.bigtable.row import DirectRow -from gcloud.bigtable.row_data import PartialRowData -from gcloud.bigtable.row_data import PartialRowsData - - -class Table(object): - """Representation of a Google Cloud Bigtable Table. - - .. note:: - - We don't define any properties on a table other than the name. As - the proto says, in a request: - - The ``name`` field of the Table and all of its ColumnFamilies must - be left blank, and will be populated in the response. - - This leaves only the ``current_operation`` and ``granularity`` - fields. The ``current_operation`` is only used for responses while - ``granularity`` is an enum with only one value. - - We can use a :class:`Table` to: - - * :meth:`create` the table - * :meth:`rename` the table - * :meth:`delete` the table - * :meth:`list_column_families` in the table - - :type table_id: str - :param table_id: The ID of the table. - - :type cluster: :class:`Cluster <.cluster.Cluster>` - :param cluster: The cluster that owns the table. - """ - - def __init__(self, table_id, cluster): - self.table_id = table_id - self._cluster = cluster - - @property - def name(self): - """Table name used in requests. - - .. note:: - - This property will not change if ``table_id`` does not, but the - return value is not cached. - - The table name is of the form - - ``"projects/../zones/../clusters/../tables/{table_id}"`` - - :rtype: str - :returns: The table name. - """ - return self._cluster.name + '/tables/' + self.table_id - - def column_family(self, column_family_id, gc_rule=None): - """Factory to create a column family associated with this table. - - :type column_family_id: str - :param column_family_id: The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - - :type gc_rule: :class:`.GarbageCollectionRule` - :param gc_rule: (Optional) The garbage collection settings for this - column family. - - :rtype: :class:`.ColumnFamily` - :returns: A column family owned by this table. - """ - return ColumnFamily(column_family_id, self, gc_rule=gc_rule) - - def row(self, row_key, filter_=None, append=False): - """Factory to create a row associated with this table. - - .. warning:: - - At most one of ``filter_`` and ``append`` can be used in a - :class:`Row`. - - :type row_key: bytes - :param row_key: The key for the row being created. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) Filter to be used for conditional mutations. - See :class:`.DirectRow` for more details. - - :type append: bool - :param append: (Optional) Flag to determine if the row should be used - for append mutations. - - :rtype: :class:`.DirectRow` - :returns: A row owned by this table. - :raises: :class:`ValueError ` if both - ``filter_`` and ``append`` are used. - """ - if append and filter_ is not None: - raise ValueError('At most one of filter_ and append can be set') - if append: - return AppendRow(row_key, self) - elif filter_ is not None: - return ConditionalRow(row_key, self, filter_=filter_) - else: - return DirectRow(row_key, self) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - return (other.table_id == self.table_id and - other._cluster == self._cluster) - - def __ne__(self, other): - return not self.__eq__(other) - - def create(self, initial_split_keys=None): - """Creates this table. - - .. note:: - - Though a :class:`._generated.bigtable_table_data_pb2.Table` is also - allowed (as the ``table`` property) in a create table request, we - do not support it in this method. As mentioned in the - :class:`Table` docstring, the name is the only useful property in - the table proto. - - .. note:: - - A create request returns a - :class:`._generated.bigtable_table_data_pb2.Table` but we don't use - this response. The proto definition allows for the inclusion of a - ``current_operation`` in the response, but it does not appear that - the Cloud Bigtable API returns any operation. - - :type initial_split_keys: list - :param initial_split_keys: (Optional) List of row keys that will be - used to initially split the table into - several tablets (Tablets are similar to - HBase regions). Given two split keys, - ``"s1"`` and ``"s2"``, three tablets will be - created, spanning the key ranges: - ``[, s1)``, ``[s1, s2)``, ``[s2, )``. - """ - request_pb = messages_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys or [], - name=self._cluster.name, - table_id=self.table_id, - ) - client = self._cluster._client - # We expect a `._generated.bigtable_table_data_pb2.Table` - client._table_stub.CreateTable(request_pb, client.timeout_seconds) - - def rename(self, new_table_id): - """Rename this table. - - .. note:: - - This cannot be used to move tables between clusters, - zones, or projects. - - .. note:: - - The Bigtable Table Admin API currently (``v1``) returns - - ``BigtableTableService.RenameTable is not yet implemented`` - - when this method is used. It's unclear when this method will - actually be supported by the API. - - :type new_table_id: str - :param new_table_id: The new name table ID. - """ - request_pb = messages_pb2.RenameTableRequest( - name=self.name, - new_id=new_table_id, - ) - client = self._cluster._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.RenameTable(request_pb, client.timeout_seconds) - - self.table_id = new_table_id - - def delete(self): - """Delete this table.""" - request_pb = messages_pb2.DeleteTableRequest(name=self.name) - client = self._cluster._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.DeleteTable(request_pb, client.timeout_seconds) - - def list_column_families(self): - """List the column families owned by this table. - - :rtype: dict - :returns: Dictionary of column families attached to this table. Keys - are strings (column family names) and values are - :class:`.ColumnFamily` instances. - :raises: :class:`ValueError ` if the column - family name from the response does not agree with the computed - name from the column family ID. - """ - request_pb = messages_pb2.GetTableRequest(name=self.name) - client = self._cluster._client - # We expect a `._generated.bigtable_table_data_pb2.Table` - table_pb = client._table_stub.GetTable(request_pb, - client.timeout_seconds) - - result = {} - for column_family_id, value_pb in table_pb.column_families.items(): - gc_rule = _gc_rule_from_pb(value_pb.gc_rule) - column_family = self.column_family(column_family_id, - gc_rule=gc_rule) - if column_family.name != value_pb.name: - raise ValueError('Column family name %s does not agree with ' - 'name from request: %s.' % ( - column_family.name, value_pb.name)) - result[column_family_id] = column_family - return result - - def read_row(self, row_key, filter_=None): - """Read a single row from this table. - - :type row_key: bytes - :param row_key: The key of the row to read from. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - row. If unset, returns the entire row. - - :rtype: :class:`.PartialRowData`, :data:`NoneType ` - :returns: The contents of the row if any chunks were returned in - the response, otherwise :data:`None`. - :raises: :class:`ValueError ` if a commit row - chunk is never encountered. - """ - request_pb = _create_row_request(self.name, row_key=row_key, - filter_=filter_) - client = self._cluster._client - response_iterator = client._data_stub.ReadRows(request_pb, - client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` - result = PartialRowData(row_key) - for read_rows_response in response_iterator: - result.update_from_read_rows(read_rows_response) - - # Make sure the result actually contains data. - if not result._chunks_encountered: - return None - # Make sure the result was committed by the back-end. - if not result.committed: - raise ValueError('The row remains partial / is not committed.') - return result - - def read_rows(self, start_key=None, end_key=None, - allow_row_interleaving=None, limit=None, filter_=None): - """Read rows from this table. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which - are guaranteed to arrive in increasing - row order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads every column in - each row. - - :rtype: :class:`.PartialRowsData` - :returns: A :class:`.PartialRowsData` convenience wrapper for consuming - the streamed results. - """ - request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, filter_=filter_, - allow_row_interleaving=allow_row_interleaving, limit=limit) - client = self._cluster._client - response_iterator = client._data_stub.ReadRows(request_pb, - client.timeout_seconds) - # We expect an iterator of `data_messages_pb2.ReadRowsResponse` - return PartialRowsData(response_iterator) - - def sample_row_keys(self): - """Read a sample of row keys in the table. - - The returned row keys will delimit contiguous sections of the table of - approximately equal size, which can be used to break up the data for - distributed tasks like mapreduces. - - The elements in the iterator are a SampleRowKeys response and they have - the properties ``offset_bytes`` and ``row_key``. They occur in sorted - order. The table might have contents before the first row key in the - list and after the last one, but a key containing the empty string - indicates "end of table" and will be the last response given, if - present. - - .. note:: - - Row keys in this list may not have ever been written to or read - from, and users should therefore not make any assumptions about the - row key structure that are specific to their use case. - - The ``offset_bytes`` field on a response indicates the approximate - total storage space used by all rows in the table which precede - ``row_key``. Buffering the contents of all rows between two subsequent - samples would require space roughly equal to the difference in their - ``offset_bytes`` fields. - - :rtype: :class:`grpc.framework.alpha._reexport._CancellableIterator` - :returns: A cancel-able iterator. Can be consumed by calling ``next()`` - or by casting to a :class:`list` and can be cancelled by - calling ``cancel()``. - """ - request_pb = data_messages_pb2.SampleRowKeysRequest( - table_name=self.name) - client = self._cluster._client - response_iterator = client._data_stub.SampleRowKeys( - request_pb, client.timeout_seconds) - return response_iterator - - -def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type allow_row_interleaving: bool - :param allow_row_interleaving: (Optional) By default, rows are read - sequentially, producing results which are - guaranteed to arrive in increasing row - order. Setting - ``allow_row_interleaving`` to - :data:`True` allows multiple rows to be - interleaved in the response stream, - which increases throughput but breaks - this guarantee, and may force the - client to use more memory to buffer - partially-received rows. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. Note that if ``allow_row_interleaving`` is - set to :data:`True`, partial results may be returned for - more than N rows. However, only N ``commit_row`` chunks - will be sent. - - :rtype: :class:`data_messages_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set - """ - request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - if row_key is not None: - request_kwargs['row_key'] = _to_bytes(row_key) - if start_key is not None or end_key is not None: - range_kwargs = {} - if start_key is not None: - range_kwargs['start_key'] = _to_bytes(start_key) - if end_key is not None: - range_kwargs['end_key'] = _to_bytes(end_key) - row_range = data_pb2.RowRange(**range_kwargs) - request_kwargs['row_range'] = row_range - if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() - if allow_row_interleaving is not None: - request_kwargs['allow_row_interleaving'] = allow_row_interleaving - if limit is not None: - request_kwargs['num_rows_limit'] = limit - - return data_messages_pb2.ReadRowsRequest(**request_kwargs) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py deleted file mode 100644 index 1516297b5cba..000000000000 --- a/gcloud/bigtable/test_client.py +++ /dev/null @@ -1,770 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class TestClient(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.client import Client - return Client - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _constructor_test_helper(self, expected_scopes, creds, - read_only=False, admin=False, - user_agent=None, timeout_seconds=None, - expected_creds=None): - from gcloud.bigtable import client as MUT - - user_agent = user_agent or MUT.DEFAULT_USER_AGENT - timeout_seconds = timeout_seconds or MUT.DEFAULT_TIMEOUT_SECONDS - PROJECT = 'PROJECT' - client = self._makeOne(project=PROJECT, credentials=creds, - read_only=read_only, admin=admin, - user_agent=user_agent, - timeout_seconds=timeout_seconds) - - expected_creds = expected_creds or creds - self.assertTrue(client._credentials is expected_creds) - if expected_scopes is not None: - self.assertEqual(client._credentials.scopes, expected_scopes) - - self.assertEqual(client.project, PROJECT) - self.assertEqual(client.timeout_seconds, timeout_seconds) - self.assertEqual(client.user_agent, user_agent) - # Check stubs are set (but null) - self.assertEqual(client._data_stub_internal, None) - self.assertEqual(client._cluster_stub_internal, None) - self.assertEqual(client._operations_stub_internal, None) - self.assertEqual(client._table_stub_internal, None) - - def test_constructor_default_scopes(self): - from gcloud.bigtable import client as MUT - - expected_scopes = [MUT.DATA_SCOPE] - creds = _Credentials() - self._constructor_test_helper(expected_scopes, creds) - - def test_constructor_custom_user_agent_and_timeout(self): - from gcloud.bigtable import client as MUT - - timeout_seconds = 1337 - user_agent = 'custom-application' - expected_scopes = [MUT.DATA_SCOPE] - creds = _Credentials() - self._constructor_test_helper(expected_scopes, creds, - user_agent=user_agent, - timeout_seconds=timeout_seconds) - - def test_constructor_with_admin(self): - from gcloud.bigtable import client as MUT - - expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE] - creds = _Credentials() - self._constructor_test_helper(expected_scopes, creds, admin=True) - - def test_constructor_with_read_only(self): - from gcloud.bigtable import client as MUT - - expected_scopes = [MUT.READ_ONLY_SCOPE] - creds = _Credentials() - self._constructor_test_helper(expected_scopes, creds, read_only=True) - - def test_constructor_both_admin_and_read_only(self): - creds = _Credentials() - with self.assertRaises(ValueError): - self._constructor_test_helper([], creds, admin=True, - read_only=True) - - def test_constructor_implicit_credentials(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - - creds = _Credentials() - expected_scopes = [MUT.DATA_SCOPE] - - def mock_get_credentials(): - return creds - - with _Monkey(MUT, get_credentials=mock_get_credentials): - self._constructor_test_helper(expected_scopes, None, - expected_creds=creds) - - def test_constructor_credentials_wo_create_scoped(self): - creds = object() - expected_scopes = None - self._constructor_test_helper(expected_scopes, creds) - - def _copy_test_helper(self, read_only=False, admin=False): - credentials = _Credentials('value') - project = 'PROJECT' - timeout_seconds = 123 - user_agent = 'you-sir-age-int' - client = self._makeOne(project=project, credentials=credentials, - read_only=read_only, admin=admin, - timeout_seconds=timeout_seconds, - user_agent=user_agent) - # Put some fake stubs in place so that we can verify they - # don't get copied. - client._data_stub_internal = object() - client._cluster_stub_internal = object() - client._operations_stub_internal = object() - client._table_stub_internal = object() - - new_client = client.copy() - self.assertEqual(new_client._admin, client._admin) - self.assertEqual(new_client._credentials, client._credentials) - self.assertEqual(new_client.project, client.project) - self.assertEqual(new_client.user_agent, client.user_agent) - self.assertEqual(new_client.timeout_seconds, client.timeout_seconds) - # Make sure stubs are not preserved. - self.assertEqual(new_client._data_stub_internal, None) - self.assertEqual(new_client._cluster_stub_internal, None) - self.assertEqual(new_client._operations_stub_internal, None) - self.assertEqual(new_client._table_stub_internal, None) - - def test_copy(self): - self._copy_test_helper() - - def test_copy_admin(self): - self._copy_test_helper(admin=True) - - def test_copy_read_only(self): - self._copy_test_helper(read_only=True) - - def test_credentials_getter(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - self.assertTrue(client.credentials is credentials) - - def test_project_name_property(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - project_name = 'projects/' + project - self.assertEqual(client.project_name, project_name) - - def test_data_stub_getter(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - client._data_stub_internal = object() - self.assertTrue(client._data_stub is client._data_stub_internal) - - def test_data_stub_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - with self.assertRaises(ValueError): - getattr(client, '_data_stub') - - def test_cluster_stub_getter(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - client._cluster_stub_internal = object() - self.assertTrue(client._cluster_stub is client._cluster_stub_internal) - - def test_cluster_stub_non_admin_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=False) - with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') - - def test_cluster_stub_unset_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - with self.assertRaises(ValueError): - getattr(client, '_cluster_stub') - - def test_operations_stub_getter(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - client._operations_stub_internal = object() - self.assertTrue(client._operations_stub is - client._operations_stub_internal) - - def test_operations_stub_non_admin_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=False) - with self.assertRaises(ValueError): - getattr(client, '_operations_stub') - - def test_operations_stub_unset_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - with self.assertRaises(ValueError): - getattr(client, '_operations_stub') - - def test_table_stub_getter(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - client._table_stub_internal = object() - self.assertTrue(client._table_stub is client._table_stub_internal) - - def test_table_stub_non_admin_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=False) - with self.assertRaises(ValueError): - getattr(client, '_table_stub') - - def test_table_stub_unset_failure(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=True) - with self.assertRaises(ValueError): - getattr(client, '_table_stub') - - def test__make_data_stub(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import DATA_API_HOST - from gcloud.bigtable.client import DATA_API_PORT - from gcloud.bigtable.client import DATA_STUB_FACTORY - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - fake_stub = object() - make_stub_args = [] - - def mock_make_stub(*args): - make_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_data_stub() - - self.assertTrue(result is fake_stub) - self.assertEqual(make_stub_args, [ - ( - client, - DATA_STUB_FACTORY, - DATA_API_HOST, - DATA_API_PORT, - ), - ]) - - def test__make_cluster_stub(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import CLUSTER_STUB_FACTORY - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - fake_stub = object() - make_stub_args = [] - - def mock_make_stub(*args): - make_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_cluster_stub() - - self.assertTrue(result is fake_stub) - self.assertEqual(make_stub_args, [ - ( - client, - CLUSTER_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, - ), - ]) - - def test__make_operations_stub(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import CLUSTER_ADMIN_HOST - from gcloud.bigtable.client import CLUSTER_ADMIN_PORT - from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - fake_stub = object() - make_stub_args = [] - - def mock_make_stub(*args): - make_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_operations_stub() - - self.assertTrue(result is fake_stub) - self.assertEqual(make_stub_args, [ - ( - client, - OPERATIONS_STUB_FACTORY, - CLUSTER_ADMIN_HOST, - CLUSTER_ADMIN_PORT, - ), - ]) - - def test__make_table_stub(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - from gcloud.bigtable.client import TABLE_ADMIN_HOST - from gcloud.bigtable.client import TABLE_ADMIN_PORT - from gcloud.bigtable.client import TABLE_STUB_FACTORY - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - fake_stub = object() - make_stub_args = [] - - def mock_make_stub(*args): - make_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, _make_stub=mock_make_stub): - result = client._make_table_stub() - - self.assertTrue(result is fake_stub) - self.assertEqual(make_stub_args, [ - ( - client, - TABLE_STUB_FACTORY, - TABLE_ADMIN_HOST, - TABLE_ADMIN_PORT, - ), - ]) - - def test_is_started(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - self.assertFalse(client.is_started()) - client._data_stub_internal = object() - self.assertTrue(client.is_started()) - client._data_stub_internal = None - self.assertFalse(client.is_started()) - - def _start_method_helper(self, admin): - from gcloud._testing import _Monkey - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import client as MUT - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=admin) - - stub = _FakeStub() - make_stub_args = [] - - def mock_make_stub(*args): - make_stub_args.append(args) - return stub - - with _Monkey(MUT, _make_stub=mock_make_stub): - client.start() - - self.assertTrue(client._data_stub_internal is stub) - if admin: - self.assertTrue(client._cluster_stub_internal is stub) - self.assertTrue(client._operations_stub_internal is stub) - self.assertTrue(client._table_stub_internal is stub) - self.assertEqual(stub._entered, 4) - self.assertEqual(len(make_stub_args), 4) - else: - self.assertTrue(client._cluster_stub_internal is None) - self.assertTrue(client._operations_stub_internal is None) - self.assertTrue(client._table_stub_internal is None) - self.assertEqual(stub._entered, 1) - self.assertEqual(len(make_stub_args), 1) - self.assertEqual(stub._exited, []) - - def test_start_non_admin(self): - self._start_method_helper(admin=False) - - def test_start_with_admin(self): - self._start_method_helper(admin=True) - - def test_start_while_started(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - client._data_stub_internal = data_stub = object() - self.assertTrue(client.is_started()) - client.start() - - # Make sure the stub did not change. - self.assertEqual(client._data_stub_internal, data_stub) - - def _stop_method_helper(self, admin): - from gcloud.bigtable._testing import _FakeStub - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials, - admin=admin) - - stub1 = _FakeStub() - stub2 = _FakeStub() - client._data_stub_internal = stub1 - client._cluster_stub_internal = stub2 - client._operations_stub_internal = stub2 - client._table_stub_internal = stub2 - client.stop() - self.assertTrue(client._data_stub_internal is None) - self.assertTrue(client._cluster_stub_internal is None) - self.assertTrue(client._operations_stub_internal is None) - self.assertTrue(client._table_stub_internal is None) - self.assertEqual(stub1._entered, 0) - self.assertEqual(stub2._entered, 0) - exc_none_triple = (None, None, None) - self.assertEqual(stub1._exited, [exc_none_triple]) - if admin: - self.assertEqual(stub2._exited, [exc_none_triple] * 3) - else: - self.assertEqual(stub2._exited, []) - - def test_stop_non_admin(self): - self._stop_method_helper(admin=False) - - def test_stop_with_admin(self): - self._stop_method_helper(admin=True) - - def test_stop_while_stopped(self): - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - self.assertFalse(client.is_started()) - - # This is a bit hacky. We set the cluster stub protected value - # since it isn't used in is_started() and make sure that stop - # doesn't reset this value to None. - client._cluster_stub_internal = cluster_stub = object() - client.stop() - # Make sure the cluster stub did not change. - self.assertEqual(client._cluster_stub_internal, cluster_stub) - - def test_cluster_factory(self): - from gcloud.bigtable.cluster import Cluster - - credentials = _Credentials() - project = 'PROJECT' - client = self._makeOne(project=project, credentials=credentials) - - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display-name' - serve_nodes = 42 - cluster = client.cluster(zone, cluster_id, display_name=display_name, - serve_nodes=serve_nodes) - self.assertTrue(isinstance(cluster, Cluster)) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) - - def _list_zones_helper(self, zone_status): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 281330 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_pb2.ListZonesRequest( - name='projects/' + project, - ) - - # Create response_pb - zone1 = 'foo' - zone2 = 'bar' - response_pb = messages_pb2.ListZonesResponse( - zones=[ - data_pb2.Zone(display_name=zone1, status=zone_status), - data_pb2.Zone(display_name=zone2, status=zone_status), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = [zone1, zone2] - - # Perform the method and check the result. - result = client.list_zones() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListZones', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_zones(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - self._list_zones_helper(data_pb2.Zone.OK) - - def test_list_zones_failure(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - with self.assertRaises(ValueError): - self._list_zones_helper(data_pb2.Zone.EMERGENCY_MAINENANCE) - - def test_list_clusters(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - credentials = _Credentials() - project = 'PROJECT' - timeout_seconds = 8004 - client = self._makeOne(project=project, credentials=credentials, - admin=True, timeout_seconds=timeout_seconds) - - # Create request_pb - request_pb = messages_pb2.ListClustersRequest( - name='projects/' + project, - ) - - # Create response_pb - zone = 'foo' - failed_zone = 'bar' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_name1 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id1) - cluster_name2 = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id2) - response_pb = messages_pb2.ListClustersResponse( - failed_zones=[ - data_pb2.Zone(display_name=failed_zone), - ], - clusters=[ - data_pb2.Cluster( - name=cluster_name1, - display_name=cluster_name1, - serve_nodes=3, - ), - data_pb2.Cluster( - name=cluster_name2, - display_name=cluster_name2, - serve_nodes=3, - ), - ], - ) - - # Patch the stub used by the API method. - client._cluster_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - failed_zones = [failed_zone] - clusters = [ - client.cluster(zone, cluster_id1), - client.cluster(zone, cluster_id2), - ] - expected_result = (clusters, failed_zones) - - # Perform the method and check the result. - result = client.list_clusters() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListClusters', - (request_pb, timeout_seconds), - {}, - )]) - - -class Test_MetadataPlugin(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.client import _MetadataPlugin - return _MetadataPlugin - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - from gcloud.bigtable.client import Client - from gcloud.bigtable.client import DATA_SCOPE - - credentials = _Credentials() - project = 'PROJECT' - user_agent = 'USER_AGENT' - client = Client(project=project, credentials=credentials, - user_agent=user_agent) - transformer = self._makeOne(client) - self.assertTrue(transformer._credentials is credentials) - self.assertEqual(transformer._user_agent, user_agent) - self.assertEqual(credentials.scopes, [DATA_SCOPE]) - - def test___call__(self): - from gcloud.bigtable.client import Client - from gcloud.bigtable.client import DATA_SCOPE - from gcloud.bigtable.client import DEFAULT_USER_AGENT - - access_token_expected = 'FOOBARBAZ' - credentials = _Credentials(access_token=access_token_expected) - project = 'PROJECT' - client = Client(project=project, credentials=credentials) - callback_args = [] - - def callback(*args): - callback_args.append(args) - - transformer = self._makeOne(client) - result = transformer(None, callback) - cb_headers = [ - ('Authorization', 'Bearer ' + access_token_expected), - ('User-agent', DEFAULT_USER_AGENT), - ] - self.assertEqual(result, None) - self.assertEqual(callback_args, [(cb_headers, None)]) - self.assertEqual(credentials.scopes, [DATA_SCOPE]) - self.assertEqual(len(credentials._tokens), 1) - - -class Test__make_stub(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.client import _make_stub - return _make_stub(*args, **kwargs) - - def test_it(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import client as MUT - - mock_result = object() - stub_inputs = [] - - SSL_CREDS = object() - METADATA_CREDS = object() - COMPOSITE_CREDS = object() - CHANNEL = object() - - class _ImplementationsModule(object): - - def __init__(self): - self.ssl_channel_credentials_args = None - self.metadata_call_credentials_args = None - self.composite_channel_credentials_args = None - self.secure_channel_args = None - - def ssl_channel_credentials(self, *args): - self.ssl_channel_credentials_args = args - return SSL_CREDS - - def metadata_call_credentials(self, *args, **kwargs): - self.metadata_call_credentials_args = (args, kwargs) - return METADATA_CREDS - - def composite_channel_credentials(self, *args): - self.composite_channel_credentials_args = args - return COMPOSITE_CREDS - - def secure_channel(self, *args): - self.secure_channel_args = args - return CHANNEL - - implementations_mod = _ImplementationsModule() - - def mock_stub_factory(channel): - stub_inputs.append(channel) - return mock_result - - metadata_plugin = object() - clients = [] - - def mock_plugin(client): - clients.append(client) - return metadata_plugin - - host = 'HOST' - port = 1025 - client = object() - with _Monkey(MUT, implementations=implementations_mod, - _MetadataPlugin=mock_plugin): - result = self._callFUT(client, mock_stub_factory, host, port) - - self.assertTrue(result is mock_result) - self.assertEqual(stub_inputs, [CHANNEL]) - self.assertEqual(clients, [client]) - self.assertEqual(implementations_mod.ssl_channel_credentials_args, - (None, None, None)) - self.assertEqual(implementations_mod.metadata_call_credentials_args, - ((metadata_plugin,), {'name': 'google_creds'})) - self.assertEqual( - implementations_mod.composite_channel_credentials_args, - (SSL_CREDS, METADATA_CREDS)) - self.assertEqual(implementations_mod.secure_channel_args, - (host, port, COMPOSITE_CREDS)) - - -class _Credentials(object): - - scopes = None - - def __init__(self, access_token=None): - self._access_token = access_token - self._tokens = [] - - def get_access_token(self): - from oauth2client.client import AccessTokenInfo - token = AccessTokenInfo(access_token=self._access_token, - expires_in=None) - self._tokens.append(token) - return token - - def create_scoped(self, scope): - self.scopes = scope - return self - - def __eq__(self, other): - return self._access_token == other._access_token diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py deleted file mode 100644 index 427a4ec9126b..000000000000 --- a/gcloud/bigtable/test_cluster.py +++ /dev/null @@ -1,922 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class TestOperation(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.cluster import Operation - return Operation - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _constructor_test_helper(self, cluster=None): - import datetime - op_type = 'fake-op' - op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) - - self.assertEqual(operation.op_type, op_type) - self.assertEqual(operation.op_id, op_id) - self.assertEqual(operation.begin, begin) - self.assertEqual(operation._cluster, cluster) - self.assertFalse(operation._complete) - - def test_constructor_defaults(self): - self._constructor_test_helper() - - def test_constructor_explicit_cluster(self): - cluster = object() - self._constructor_test_helper(cluster=cluster) - - def test___eq__(self): - import datetime - op_type = 'fake-op' - op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) - self.assertEqual(operation1, operation2) - - def test___eq__type_differ(self): - operation1 = self._makeOne('foo', 123, None) - operation2 = object() - self.assertNotEqual(operation1, operation2) - - def test___ne__same_value(self): - import datetime - op_type = 'fake-op' - op_id = 8915 - begin = datetime.datetime(2015, 10, 22, 1, 1) - cluster = object() - operation1 = self._makeOne(op_type, op_id, begin, cluster=cluster) - operation2 = self._makeOne(op_type, op_id, begin, cluster=cluster) - comparison_val = (operation1 != operation2) - self.assertFalse(comparison_val) - - def test___ne__(self): - operation1 = self._makeOne('foo', 123, None) - operation2 = self._makeOne('bar', 456, None) - self.assertNotEqual(operation1, operation2) - - def test_finished_without_operation(self): - operation = self._makeOne(None, None, None) - operation._complete = True - with self.assertRaises(ValueError): - operation.finished() - - def _finished_helper(self, done): - import datetime - from google.longrunning import operations_pb2 - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.cluster import Cluster - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - op_type = 'fake-op' - op_id = 789 - begin = datetime.datetime(2015, 10, 22, 1, 1) - timeout_seconds = 1 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = Cluster(zone, cluster_id, client) - operation = self._makeOne(op_type, op_id, begin, cluster=cluster) - - # Create request_pb - op_name = ('operations/projects/' + project + '/zones/' + - zone + '/clusters/' + cluster_id + - '/operations/%d' % (op_id,)) - request_pb = operations_pb2.GetOperationRequest(name=op_name) - - # Create response_pb - response_pb = operations_pb2.Operation(done=done) - - # Patch the stub used by the API method. - client._operations_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = done - - # Perform the method and check the result. - result = operation.finished() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetOperation', - (request_pb, timeout_seconds), - {}, - )]) - - if done: - self.assertTrue(operation._complete) - else: - self.assertFalse(operation._complete) - - def test_finished(self): - self._finished_helper(done=True) - - def test_finished_not_done(self): - self._finished_helper(done=False) - - -class TestCluster(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.cluster import Cluster - return Cluster - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - zone = 'zone' - cluster_id = 'cluster-id' - client = object() - - cluster = self._makeOne(zone, cluster_id, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, cluster_id) - self.assertEqual(cluster.serve_nodes, 3) - self.assertTrue(cluster._client is client) - - def test_constructor_non_default(self): - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - client = object() - - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertTrue(cluster._client is client) - - def test_copy(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = 'display_name' - serve_nodes = 8 - - client = _Client(project) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - new_cluster = cluster.copy() - - # Make sure the client copy succeeded. - self.assertFalse(new_cluster._client is client) - self.assertEqual(new_cluster._client, client) - # Make sure the client got copied to a new instance. - self.assertFalse(cluster is new_cluster) - self.assertEqual(cluster, new_cluster) - - def test_table_factory(self): - from gcloud.bigtable.table import Table - - zone = 'zone' - cluster_id = 'cluster-id' - cluster = self._makeOne(zone, cluster_id, None) - - table_id = 'table_id' - table = cluster.table(table_id) - self.assertTrue(isinstance(table, Table)) - self.assertEqual(table.table_id, table_id) - self.assertEqual(table._cluster, cluster) - - def test__update_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - display_name = 'display_name' - serve_nodes = 8 - cluster_pb = data_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, - ) - - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, display_name) - self.assertEqual(cluster.serve_nodes, serve_nodes) - - def test__update_from_pb_no_display_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = data_pb2.Cluster(serve_nodes=331) - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - - def test__update_from_pb_no_serve_nodes(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = data_pb2.Cluster(display_name='name') - cluster = self._makeOne(None, None, None) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.display_name, None) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - - def test_from_pb_success(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) - - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster( - name=cluster_name, - display_name=cluster_id, - serve_nodes=331, - ) - - klass = self._getTargetClass() - cluster = klass.from_pb(cluster_pb, client) - self.assertTrue(isinstance(cluster, klass)) - self.assertEqual(cluster._client, client) - self.assertEqual(cluster.zone, zone) - self.assertEqual(cluster.cluster_id, cluster_id) - - def test_from_pb_bad_cluster_name(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - cluster_name = 'INCORRECT_FORMAT' - cluster_pb = data_pb2.Cluster(name=cluster_name) - - klass = self._getTargetClass() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) - - def test_from_pb_project_mistmatch(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - alt_project = 'ALT_PROJECT' - client = _Client(project=alt_project) - - self.assertNotEqual(project, alt_project) - - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster_pb = data_pb2.Cluster(name=cluster_name) - - klass = self._getTargetClass() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, client) - - def test_name_property(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - client = _Client(project=project) - - cluster = self._makeOne(zone, cluster_id, client) - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - self.assertEqual(cluster.name, cluster_name) - - def test___eq__(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) - self.assertEqual(cluster1, cluster2) - - def test___eq__type_differ(self): - cluster1 = self._makeOne('zone', 'cluster_id', 'client') - cluster2 = object() - self.assertNotEqual(cluster1, cluster2) - - def test___ne__same_value(self): - zone = 'zone' - cluster_id = 'cluster_id' - client = object() - cluster1 = self._makeOne(zone, cluster_id, client) - cluster2 = self._makeOne(zone, cluster_id, client) - comparison_val = (cluster1 != cluster2) - self.assertFalse(comparison_val) - - def test___ne__(self): - cluster1 = self._makeOne('zone1', 'cluster_id1', 'client1') - cluster2 = self._makeOne('zone2', 'cluster_id2', 'client2') - self.assertNotEqual(cluster1, cluster2) - - def test_reload(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 123 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.GetClusterRequest(name=cluster_name) - - # Create response_pb - serve_nodes = 31 - display_name = u'hey-hi-hello' - response_pb = data_pb2.Cluster( - display_name=display_name, - serve_nodes=serve_nodes, - ) - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - self.assertEqual(cluster.display_name, cluster_id) - - # Perform the method and check the result. - result = cluster.reload() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetCluster', - (request_pb, timeout_seconds), - {}, - )]) - - # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, serve_nodes) - self.assertEqual(cluster.display_name, display_name) - - def test_create(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 578 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb. Just a mock since we monkey patch - # _prepare_create_request - request_pb = object() - - # Create response_pb - op_id = 5678 - op_begin = object() - op_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, op_id)) - current_op = operations_pb2.Operation(name=op_name) - response_pb = data_pb2.Cluster(current_operation=current_op) - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = MUT.Operation('create', op_id, op_begin, - cluster=cluster) - - # Create the mocks. - prep_create_called = [] - - def mock_prep_create_req(cluster): - prep_create_called.append(cluster) - return request_pb - - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _prepare_create_request=mock_prep_create_req, - _process_operation=mock_process_operation): - result = cluster.create() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CreateCluster', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(prep_create_called, [cluster]) - self.assertEqual(process_operation_called, [current_op]) - - def test_update(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - serve_nodes = 81 - display_name = 'display_name' - timeout_seconds = 9 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client, - display_name=display_name, - serve_nodes=serve_nodes) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = data_pb2.Cluster( - name=cluster_name, - display_name=display_name, - serve_nodes=serve_nodes, - ) - - # Create response_pb - current_op = operations_pb2.Operation() - response_pb = data_pb2.Cluster(current_operation=current_op) - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('update', op_id, op_begin, - cluster=cluster) - - # Create mocks - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _process_operation=mock_process_operation): - result = cluster.update() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UpdateCluster', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(process_operation_called, [current_op]) - - def test_delete(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 57 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.DeleteClusterRequest(name=cluster_name) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = cluster.delete() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - - def test_undelete(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 78 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_pb - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = messages_pb2.UndeleteClusterRequest(name=cluster_name) - - # Create response_pb - response_pb = operations_pb2.Operation() - - # Patch the stub used by the API method. - client._cluster_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - op_id = 5678 - op_begin = object() - expected_result = MUT.Operation('undelete', op_id, op_begin, - cluster=cluster) - - # Create the mocks. - process_operation_called = [] - - def mock_process_operation(operation_pb): - process_operation_called.append(operation_pb) - return op_id, op_begin - - # Perform the method and check the result. - with _Monkey(MUT, _process_operation=mock_process_operation): - result = cluster.undelete() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UndeleteCluster', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(process_operation_called, [response_pb]) - - def _list_tables_helper(self, table_id, table_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as table_data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as table_messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - timeout_seconds = 45 - - client = _Client(project, timeout_seconds=timeout_seconds) - cluster = self._makeOne(zone, cluster_id, client) - - # Create request_ - cluster_name = ('projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id) - request_pb = table_messages_pb2.ListTablesRequest(name=cluster_name) - - # Create response_pb - table_name = table_name or (cluster_name + '/tables/' + table_id) - response_pb = table_messages_pb2.ListTablesResponse( - tables=[ - table_data_pb2.Table(name=table_name), - ], - ) - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_table = cluster.table(table_id) - expected_result = [expected_table] - - # Perform the method and check the result. - result = cluster.list_tables() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListTables', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_tables(self): - table_id = 'table_id' - self._list_tables_helper(table_id) - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(None, table_name='wrong-format') - - def test_list_tables_failure_name_bad_before(self): - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - - table_id = 'table_id' - bad_table_name = ('nonempty-section-before' + - 'projects/' + project + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - with self.assertRaises(ValueError): - self._list_tables_helper(table_id, table_name=bad_table_name) - - -class Test__prepare_create_request(unittest2.TestCase): - - def _callFUT(self, cluster): - from gcloud.bigtable.cluster import _prepare_create_request - return _prepare_create_request(cluster) - - def test_it(self): - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.cluster import Cluster - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - display_name = u'DISPLAY_NAME' - serve_nodes = 8 - client = _Client(project) - - cluster = Cluster(zone, cluster_id, client, - display_name=display_name, serve_nodes=serve_nodes) - request_pb = self._callFUT(cluster) - self.assertTrue(isinstance(request_pb, - messages_pb2.CreateClusterRequest)) - self.assertEqual(request_pb.cluster_id, cluster_id) - self.assertEqual(request_pb.name, - 'projects/' + project + '/zones/' + zone) - self.assertTrue(isinstance(request_pb.cluster, data_pb2.Cluster)) - self.assertEqual(request_pb.cluster.display_name, display_name) - self.assertEqual(request_pb.cluster.serve_nodes, serve_nodes) - - -class Test__parse_pb_any_to_native(unittest2.TestCase): - - def _callFUT(self, any_val, expected_type=None): - from gcloud.bigtable.cluster import _parse_pb_any_to_native - return _parse_pb_any_to_native(any_val, expected_type=expected_type) - - def test_with_known_type_url(self): - from google.protobuf import any_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable import cluster as MUT - - type_url = 'type.googleapis.com/' + data_pb2._CELL.full_name - fake_type_url_map = {type_url: data_pb2.Cell} - - cell = data_pb2.Cell( - timestamp_micros=0, - value=b'foobar', - ) - any_val = any_pb2.Any( - type_url=type_url, - value=cell.SerializeToString(), - ) - with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): - result = self._callFUT(any_val) - - self.assertEqual(result, cell) - - def test_with_create_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._CREATECLUSTERMETADATA.full_name) - metadata = messages_pb2.CreateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - original_request=messages_pb2.CreateClusterRequest( - name='foo', - cluster_id='bar', - cluster=data_pb2.Cluster( - display_name='quux', - serve_nodes=1337, - ), - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_update_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._UPDATECLUSTERMETADATA.full_name) - metadata = messages_pb2.UpdateClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - cancel_time=Timestamp(seconds=100, nanos=76543), - original_request=data_pb2.Cluster( - display_name='the-end', - serve_nodes=42, - ), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_with_undelete_cluster_metadata(self): - from google.protobuf import any_pb2 - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - - type_url = ('type.googleapis.com/' + - messages_pb2._UNDELETECLUSTERMETADATA.full_name) - metadata = messages_pb2.UndeleteClusterMetadata( - request_time=Timestamp(seconds=1, nanos=1234), - finish_time=Timestamp(seconds=10, nanos=891011), - ) - - any_val = any_pb2.Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - result = self._callFUT(any_val) - self.assertEqual(result, metadata) - - def test_unknown_type_url(self): - from google.protobuf import any_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable import cluster as MUT - - fake_type_url_map = {} - any_val = any_pb2.Any() - with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): - with self.assertRaises(KeyError): - self._callFUT(any_val) - - def test_disagreeing_type_url(self): - from google.protobuf import any_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable import cluster as MUT - - type_url1 = 'foo' - type_url2 = 'bar' - fake_type_url_map = {type_url1: None} - any_val = any_pb2.Any(type_url=type_url2) - with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map): - with self.assertRaises(ValueError): - self._callFUT(any_val, expected_type=type_url1) - - -class Test__process_operation(unittest2.TestCase): - - def _callFUT(self, operation_pb): - from gcloud.bigtable.cluster import _process_operation - return _process_operation(operation_pb) - - def test_it(self): - from google.longrunning import operations_pb2 - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_cluster_service_messages_pb2 as messages_pb2) - from gcloud.bigtable import cluster as MUT - - project = 'PROJECT' - zone = 'zone' - cluster_id = 'cluster-id' - expected_operation_id = 234 - operation_name = ('operations/projects/%s/zones/%s/clusters/%s/' - 'operations/%d' % (project, zone, cluster_id, - expected_operation_id)) - - current_op = operations_pb2.Operation(name=operation_name) - - # Create mocks. - request_metadata = messages_pb2.CreateClusterMetadata() - parse_pb_any_called = [] - - def mock_parse_pb_any_to_native(any_val, expected_type=None): - parse_pb_any_called.append((any_val, expected_type)) - return request_metadata - - expected_operation_begin = object() - ts_to_dt_called = [] - - def mock_pb_timestamp_to_datetime(timestamp): - ts_to_dt_called.append(timestamp) - return expected_operation_begin - - # Exectute method with mocks in place. - with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native, - _pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime): - operation_id, operation_begin = self._callFUT(current_op) - - # Check outputs. - self.assertEqual(operation_id, expected_operation_id) - self.assertTrue(operation_begin is expected_operation_begin) - - # Check mocks were used correctly. - self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)]) - self.assertEqual(ts_to_dt_called, [request_metadata.request_time]) - - def test_op_name_parsing_failure(self): - from google.longrunning import operations_pb2 - from gcloud.bigtable._generated import ( - bigtable_cluster_data_pb2 as data_pb2) - - current_op = operations_pb2.Operation(name='invalid') - cluster = data_pb2.Cluster(current_operation=current_op) - with self.assertRaises(ValueError): - self._callFUT(cluster) - - -class _Client(object): - - def __init__(self, project, timeout_seconds=None): - self.project = project - self.project_name = 'projects/' + self.project - self.timeout_seconds = timeout_seconds - - def copy(self): - from copy import deepcopy - return deepcopy(self) - - def __eq__(self, other): - return (other.project == self.project and - other.project_name == self.project_name and - other.timeout_seconds == self.timeout_seconds) diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py deleted file mode 100644 index 139a959e0a7b..000000000000 --- a/gcloud/bigtable/test_column_family.py +++ /dev/null @@ -1,649 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class Test__timedelta_to_duration_pb(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.column_family import _timedelta_to_duration_pb - return _timedelta_to_duration_pb(*args, **kwargs) - - def test_it(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = microseconds = 1 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._callFUT(timedelta_val) - self.assertTrue(isinstance(result, duration_pb2.Duration)) - self.assertEqual(result.seconds, seconds) - self.assertEqual(result.nanos, 1000 * microseconds) - - def test_with_negative_microseconds(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = 1 - microseconds = -5 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._callFUT(timedelta_val) - self.assertTrue(isinstance(result, duration_pb2.Duration)) - self.assertEqual(result.seconds, seconds - 1) - self.assertEqual(result.nanos, 10**9 + 1000 * microseconds) - - def test_with_negative_seconds(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = -1 - microseconds = 5 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._callFUT(timedelta_val) - self.assertTrue(isinstance(result, duration_pb2.Duration)) - self.assertEqual(result.seconds, seconds + 1) - self.assertEqual(result.nanos, -(10**9 - 1000 * microseconds)) - - -class Test__duration_pb_to_timedelta(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.column_family import _duration_pb_to_timedelta - return _duration_pb_to_timedelta(*args, **kwargs) - - def test_it(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = microseconds = 1 - duration_pb = duration_pb2.Duration(seconds=seconds, - nanos=1000 * microseconds) - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._callFUT(duration_pb) - self.assertTrue(isinstance(result, datetime.timedelta)) - self.assertEqual(result, timedelta_val) - - -class TestMaxVersionsGCRule(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - return MaxVersionsGCRule - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test___eq__type_differ(self): - gc_rule1 = self._makeOne(10) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___eq__same_value(self): - gc_rule1 = self._makeOne(2) - gc_rule2 = self._makeOne(2) - self.assertEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - gc_rule1 = self._makeOne(99) - gc_rule2 = self._makeOne(99) - comparison_val = (gc_rule1 != gc_rule2) - self.assertFalse(comparison_val) - - def test_to_pb(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - max_num_versions = 1337 - gc_rule = self._makeOne(max_num_versions=max_num_versions) - pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, - data_pb2.GcRule(max_num_versions=max_num_versions)) - - -class TestMaxAgeGCRule(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.column_family import MaxAgeGCRule - return MaxAgeGCRule - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test___eq__type_differ(self): - max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___eq__same_value(self): - max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) - gc_rule2 = self._makeOne(max_age=max_age) - self.assertEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) - gc_rule2 = self._makeOne(max_age=max_age) - comparison_val = (gc_rule1 != gc_rule2) - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - - max_age = datetime.timedelta(seconds=1) - duration = duration_pb2.Duration(seconds=1) - gc_rule = self._makeOne(max_age=max_age) - pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, data_pb2.GcRule(max_age=duration)) - - -class TestGCRuleUnion(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.column_family import GCRuleUnion - return GCRuleUnion - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_union = self._makeOne(rules) - self.assertTrue(rule_union.rules is rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) - comparison_val = (gc_rule1 != gc_rule2) - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule1, pb_rule2])) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) - - rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - union=data_pb2.GcRule.Union(rules=[pb_rule3, pb_rule4])) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestGCRuleIntersection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.column_family import GCRuleIntersection - return GCRuleIntersection - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_intersection = self._makeOne(rules) - self.assertTrue(rule_intersection.rules is rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) - comparison_val = (gc_rule1 != gc_rule2) - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( - rules=[pb_rule1, pb_rule2])) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = data_pb2.GcRule(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = data_pb2.GcRule(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._makeOne(rules=[rule1, rule2]) - pb_rule3 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( - rules=[pb_rule1, pb_rule2])) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = data_pb2.GcRule(max_num_versions=max_num_versions2) - - rule5 = self._makeOne(rules=[rule3, rule4]) - pb_rule5 = data_pb2.GcRule( - intersection=data_pb2.GcRule.Intersection( - rules=[pb_rule3, pb_rule4])) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestColumnFamily(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.column_family import ColumnFamily - return ColumnFamily - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - column_family_id = u'column-family-id' - table = object() - gc_rule = object() - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) - - self.assertEqual(column_family.column_family_id, column_family_id) - self.assertTrue(column_family._table is table) - self.assertTrue(column_family.gc_rule is gc_rule) - - def test_name_property(self): - column_family_id = u'column-family-id' - table_name = 'table_name' - table = _Table(table_name) - column_family = self._makeOne(column_family_id, table) - - expected_name = table_name + '/columnFamilies/' + column_family_id - self.assertEqual(column_family.name, expected_name) - - def test___eq__(self): - column_family_id = 'column_family_id' - table = object() - gc_rule = object() - column_family1 = self._makeOne(column_family_id, table, - gc_rule=gc_rule) - column_family2 = self._makeOne(column_family_id, table, - gc_rule=gc_rule) - self.assertEqual(column_family1, column_family2) - - def test___eq__type_differ(self): - column_family1 = self._makeOne('column_family_id', None) - column_family2 = object() - self.assertNotEqual(column_family1, column_family2) - - def test___ne__same_value(self): - column_family_id = 'column_family_id' - table = object() - gc_rule = object() - column_family1 = self._makeOne(column_family_id, table, - gc_rule=gc_rule) - column_family2 = self._makeOne(column_family_id, table, - gc_rule=gc_rule) - comparison_val = (column_family1 != column_family2) - self.assertFalse(comparison_val) - - def test___ne__(self): - column_family1 = self._makeOne('column_family_id1', None) - column_family2 = self._makeOne('column_family_id2', None) - self.assertNotEqual(column_family1, column_family2) - - def _create_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - timeout_seconds = 4 - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - column_family_pb = data_pb2.ColumnFamily() - else: - column_family_pb = data_pb2.ColumnFamily(gc_rule=gc_rule.to_pb()) - request_pb = messages_pb2.CreateColumnFamilyRequest( - name=table_name, - column_family_id=column_family_id, - column_family=column_family_pb, - ) - - # Create response_pb - response_pb = data_pb2.ColumnFamily() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # create() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.create() - self.assertEqual(stub.results, ()) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CreateColumnFamily', - (request_pb, timeout_seconds), - {}, - )]) - - def test_create(self): - self._create_test_helper(gc_rule=None) - - def test_create_with_gc_rule(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - gc_rule = MaxVersionsGCRule(1337) - self._create_test_helper(gc_rule=gc_rule) - - def _update_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - timeout_seconds = 28 - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id - - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - request_pb = data_pb2.ColumnFamily(name=column_family_name) - else: - request_pb = data_pb2.ColumnFamily( - name=column_family_name, - gc_rule=gc_rule.to_pb(), - ) - - # Create response_pb - response_pb = data_pb2.ColumnFamily() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # update() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.update() - self.assertEqual(stub.results, ()) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UpdateColumnFamily', - (request_pb, timeout_seconds), - {}, - )]) - - def test_update(self): - self._update_test_helper(gc_rule=None) - - def test_update_with_gc_rule(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - gc_rule = MaxVersionsGCRule(1337) - self._update_test_helper(gc_rule=gc_rule) - - def test_delete(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - timeout_seconds = 7 - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) - column_family_name = table_name + '/columnFamilies/' + column_family_id - - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table) - - # Create request_pb - request_pb = messages_pb2.DeleteColumnFamilyRequest( - name=column_family_name) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.delete() - self.assertEqual(stub.results, ()) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteColumnFamily', - (request_pb, timeout_seconds), - {}, - )]) - - -class Test__gc_rule_from_pb(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.bigtable.column_family import _gc_rule_from_pb - return _gc_rule_from_pb(*args, **kwargs) - - def test_empty(self): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - - gc_rule_pb = data_pb2.GcRule() - self.assertEqual(self._callFUT(gc_rule_pb), None) - - def test_max_num_versions(self): - from gcloud.bigtable.column_family import MaxVersionsGCRule - - orig_rule = MaxVersionsGCRule(1) - gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) - self.assertTrue(isinstance(result, MaxVersionsGCRule)) - self.assertEqual(result, orig_rule) - - def test_max_age(self): - import datetime - from gcloud.bigtable.column_family import MaxAgeGCRule - - orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) - gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) - self.assertTrue(isinstance(result, MaxAgeGCRule)) - self.assertEqual(result, orig_rule) - - def test_union(self): - import datetime - from gcloud.bigtable.column_family import GCRuleUnion - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleUnion([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) - self.assertTrue(isinstance(result, GCRuleUnion)) - self.assertEqual(result, orig_rule) - - def test_intersection(self): - import datetime - from gcloud.bigtable.column_family import GCRuleIntersection - from gcloud.bigtable.column_family import MaxAgeGCRule - from gcloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleIntersection([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) - self.assertTrue(isinstance(result, GCRuleIntersection)) - self.assertEqual(result, orig_rule) - - def test_unknown_field_name(self): - class MockProto(object): - - names = [] - - @classmethod - def WhichOneof(cls, name): - cls.names.append(name) - return 'unknown' - - self.assertEqual(MockProto.names, []) - self.assertRaises(ValueError, self._callFUT, MockProto) - self.assertEqual(MockProto.names, ['rule']) - - -class _Cluster(object): - - def __init__(self, client=None): - self._client = client - - -class _Client(object): - - def __init__(self, timeout_seconds=None): - self.timeout_seconds = timeout_seconds - - -class _Table(object): - - def __init__(self, name, client=None): - self.name = name - self._cluster = _Cluster(client) diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py deleted file mode 100644 index 9e6da708e6b6..000000000000 --- a/gcloud/bigtable/test_row.py +++ /dev/null @@ -1,851 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class Test_SetDeleteRow(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row import _SetDeleteRow - return _SetDeleteRow - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test__get_mutations_virtual(self): - row = self._makeOne(b'row-key', None) - with self.assertRaises(NotImplementedError): - row._get_mutations(None) - - -class TestDirectRow(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row import DirectRow - return DirectRow - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - row_key = b'row_key' - table = object() - - row = self._makeOne(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertTrue(row._table is table) - self.assertEqual(row._pb_mutations, []) - - def test_constructor_with_unicode(self): - row_key = u'row_key' - row_key_bytes = b'row_key' - table = object() - - row = self._makeOne(row_key, table) - self.assertEqual(row._row_key, row_key_bytes) - self.assertTrue(row._table is table) - - def test_constructor_with_non_bytes(self): - row_key = object() - with self.assertRaises(TypeError): - self._makeOne(row_key, None) - - def test__get_mutations(self): - row_key = b'row_key' - row = self._makeOne(row_key, None) - - row._pb_mutations = mutations = object() - self.assertTrue(mutations is row._get_mutations(None)) - - def _set_cell_helper(self, column=None, column_bytes=None, - value=b'foobar', timestamp=None, - timestamp_micros=-1): - import six - import struct - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_key = b'row_key' - column_family_id = u'column_family_id' - if column is None: - column = b'column' - table = object() - row = self._makeOne(row_key, table) - self.assertEqual(row._pb_mutations, []) - row.set_cell(column_family_id, column, - value, timestamp=timestamp) - - if isinstance(value, six.integer_types): - value = struct.pack('>q', value) - expected_pb = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( - family_name=column_family_id, - column_qualifier=column_bytes or column, - timestamp_micros=timestamp_micros, - value=value, - ), - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_set_cell(self): - self._set_cell_helper() - - def test_set_cell_with_string_column(self): - column_bytes = b'column' - column_non_bytes = u'column' - self._set_cell_helper(column=column_non_bytes, - column_bytes=column_bytes) - - def test_set_cell_with_integer_value(self): - value = 1337 - self._set_cell_helper(value=value) - - def test_set_cell_with_non_bytes_value(self): - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - value = object() # Not bytes - with self.assertRaises(TypeError): - row.set_cell(column_family_id, column, value) - - def test_set_cell_with_non_null_timestamp(self): - import datetime - from gcloud._helpers import _EPOCH - - microseconds = 898294371 - millis_granularity = microseconds - (microseconds % 1000) - timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) - self._set_cell_helper(timestamp=timestamp, - timestamp_micros=millis_granularity) - - def test_delete(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_key = b'row_key' - row = self._makeOne(row_key, object()) - self.assertEqual(row._pb_mutations, []) - row.delete() - - expected_pb = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cell(self): - klass = self._getTargetClass() - - class MockRow(klass): - - def __init__(self, *args, **kwargs): - super(MockRow, self).__init__(*args, **kwargs) - self._args = [] - self._kwargs = [] - - # Replace the called method with one that logs arguments. - def _delete_cells(self, *args, **kwargs): - self._args.append(args) - self._kwargs.append(kwargs) - - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' - table = object() - - mock_row = MockRow(row_key, table) - # Make sure no values are set before calling the method. - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, []) - self.assertEqual(mock_row._kwargs, []) - - # Actually make the request against the mock class. - time_range = object() - mock_row.delete_cell(column_family_id, column, time_range=time_range) - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, [(column_family_id, [column])]) - self.assertEqual(mock_row._kwargs, [{ - 'state': None, - 'time_range': time_range, - }]) - - def test_delete_cells_non_iterable(self): - row_key = b'row_key' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - columns = object() # Not iterable - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - - def test_delete_cells_all_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_key = b'row_key' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - klass = self._getTargetClass() - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, klass.ALL_COLUMNS) - - expected_pb = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id, - ), - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cells_no_columns(self): - row_key = b'row_key' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - columns = [] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - - def _delete_cells_helper(self, time_range=None): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - columns = [column] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns, time_range=time_range) - - expected_pb = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( - family_name=column_family_id, - column_qualifier=column, - ), - ) - if time_range is not None: - expected_pb.delete_from_column.time_range.CopyFrom( - time_range.to_pb()) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cells_no_time_range(self): - self._delete_cells_helper() - - def test_delete_cells_with_time_range(self): - import datetime - from gcloud._helpers import _EPOCH - from gcloud.bigtable.row_filters import TimestampRange - - microseconds = 30871000 # Makes sure already milliseconds granularity - start = _EPOCH + datetime.timedelta(microseconds=microseconds) - time_range = TimestampRange(start=start) - self._delete_cells_helper(time_range=time_range) - - def test_delete_cells_with_bad_column(self): - # This makes sure a failure on one of the columns doesn't leave - # the row's mutations in a bad state. - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' - table = object() - - row = self._makeOne(row_key, table) - columns = [column, object()] - self.assertEqual(row._pb_mutations, []) - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - - def test_delete_cells_with_string_columns(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_key = b'row_key' - column_family_id = u'column_family_id' - column1 = u'column1' - column1_bytes = b'column1' - column2 = u'column2' - column2_bytes = b'column2' - table = object() - - row = self._makeOne(row_key, table) - columns = [column1, column2] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) - - expected_pb1 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( - family_name=column_family_id, - column_qualifier=column1_bytes, - ), - ) - expected_pb2 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( - family_name=column_family_id, - column_qualifier=column2_bytes, - ), - ) - self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) - - def test_commit(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id = u'column_family_id' - column = b'column' - timeout_seconds = 711 - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - row = self._makeOne(row_key, table) - - # Create request_pb - value = b'bytes-value' - mutation = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( - family_name=column_family_id, - column_qualifier=column, - timestamp_micros=-1, # Default value. - value=value, - ), - ) - request_pb = messages_pb2.MutateRowRequest( - table_name=table_name, - row_key=row_key, - mutations=[mutation], - ) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # commit() has no return value when no filter. - - # Perform the method and check the result. - row.set_cell(column_family_id, column, value) - result = row.commit() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'MutateRow', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(row._pb_mutations, []) - - def test_commit_too_many_mutations(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import row as MUT - - row_key = b'row_key' - table = object() - row = self._makeOne(row_key, table) - row._pb_mutations = [1, 2, 3] - num_mutations = len(row._pb_mutations) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - def test_commit_no_mutations(self): - from gcloud.bigtable._testing import _FakeStub - - row_key = b'row_key' - client = _Client() - table = _Table(None, client=client) - row = self._makeOne(row_key, table) - self.assertEqual(row._pb_mutations, []) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertEqual(result, None) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - -class TestConditionalRow(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row import ConditionalRow - return ConditionalRow - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - row_key = b'row_key' - table = object() - filter_ = object() - - row = self._makeOne(row_key, table, filter_=filter_) - self.assertEqual(row._row_key, row_key) - self.assertTrue(row._table is table) - self.assertTrue(row._filter is filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test__get_mutations(self): - row_key = b'row_key' - filter_ = object() - row = self._makeOne(row_key, None, filter_=filter_) - - row._true_pb_mutations = true_mutations = object() - row._false_pb_mutations = false_mutations = object() - self.assertTrue(true_mutations is row._get_mutations(True)) - self.assertTrue(false_mutations is row._get_mutations(False)) - self.assertTrue(false_mutations is row._get_mutations(None)) - - def test_commit(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.row_filters import RowSampleFilter - - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id1 = u'column_family_id1' - column_family_id2 = u'column_family_id2' - column_family_id3 = u'column_family_id3' - column1 = b'column1' - column2 = b'column2' - timeout_seconds = 262 - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - row_filter = RowSampleFilter(0.33) - row = self._makeOne(row_key, table, filter_=row_filter) - - # Create request_pb - value1 = b'bytes-value' - mutation1 = data_pb2.Mutation( - set_cell=data_pb2.Mutation.SetCell( - family_name=column_family_id1, - column_qualifier=column1, - timestamp_micros=-1, # Default value. - value=value1, - ), - ) - mutation2 = data_pb2.Mutation( - delete_from_row=data_pb2.Mutation.DeleteFromRow(), - ) - mutation3 = data_pb2.Mutation( - delete_from_column=data_pb2.Mutation.DeleteFromColumn( - family_name=column_family_id2, - column_qualifier=column2, - ), - ) - mutation4 = data_pb2.Mutation( - delete_from_family=data_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id3, - ), - ) - request_pb = messages_pb2.CheckAndMutateRowRequest( - table_name=table_name, - row_key=row_key, - predicate_filter=row_filter.to_pb(), - true_mutations=[mutation1, mutation3, mutation4], - false_mutations=[mutation2], - ) - - # Create response_pb - predicate_matched = True - response_pb = messages_pb2.CheckAndMutateRowResponse( - predicate_matched=predicate_matched) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = predicate_matched - - # Perform the method and check the result. - row.set_cell(column_family_id1, column1, value1, state=True) - row.delete(state=False) - row.delete_cell(column_family_id2, column2, state=True) - row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) - result = row.commit() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CheckAndMutateRow', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test_commit_too_many_mutations(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import row as MUT - - row_key = b'row_key' - table = object() - filter_ = object() - row = self._makeOne(row_key, table, filter_=filter_) - row._true_pb_mutations = [1, 2, 3] - num_mutations = len(row._true_pb_mutations) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - def test_commit_no_mutations(self): - from gcloud.bigtable._testing import _FakeStub - - row_key = b'row_key' - client = _Client() - table = _Table(None, client=client) - filter_ = object() - row = self._makeOne(row_key, table, filter_=filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertEqual(result, None) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - -class TestAppendRow(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row import AppendRow - return AppendRow - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - row_key = b'row_key' - table = object() - - row = self._makeOne(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertTrue(row._table is table) - self.assertEqual(row._rule_pb_list, []) - - def test_clear(self): - row_key = b'row_key' - table = object() - row = self._makeOne(row_key, table) - row._rule_pb_list = [1, 2, 3] - row.clear() - self.assertEqual(row._rule_pb_list, []) - - def test_append_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - table = object() - row_key = b'row_key' - row = self._makeOne(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - column = b'column' - column_family_id = u'column_family_id' - value = b'bytes-val' - row.append_cell_value(column_family_id, column, value) - expected_pb = data_pb2.ReadModifyWriteRule( - family_name=column_family_id, column_qualifier=column, - append_value=value) - self.assertEqual(row._rule_pb_list, [expected_pb]) - - def test_increment_cell_value(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - table = object() - row_key = b'row_key' - row = self._makeOne(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - column = b'column' - column_family_id = u'column_family_id' - int_value = 281330 - row.increment_cell_value(column_family_id, column, int_value) - expected_pb = data_pb2.ReadModifyWriteRule( - family_name=column_family_id, column_qualifier=column, - increment_amount=int_value) - self.assertEqual(row._rule_pb_list, [expected_pb]) - - def test_commit(self): - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable import row as MUT - - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id = u'column_family_id' - column = b'column' - timeout_seconds = 87 - client = _Client(timeout_seconds=timeout_seconds) - table = _Table(table_name, client=client) - row = self._makeOne(row_key, table) - - # Create request_pb - value = b'bytes-value' - # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). - request_pb = messages_pb2.ReadModifyWriteRowRequest( - table_name=table_name, - row_key=row_key, - rules=[ - data_pb2.ReadModifyWriteRule( - family_name=column_family_id, - column_qualifier=column, - append_value=value, - ), - ], - ) - - # Create response_pb - response_pb = object() - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - row_responses = [] - expected_result = object() - - def mock_parse_rmw_row_response(row_response): - row_responses.append(row_response) - return expected_result - - # Perform the method and check the result. - with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): - row.append_cell_value(column_family_id, column, value) - result = row.commit() - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadModifyWriteRow', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(row_responses, [response_pb]) - self.assertEqual(row._rule_pb_list, []) - - def test_commit_no_rules(self): - from gcloud.bigtable._testing import _FakeStub - - row_key = b'row_key' - client = _Client() - table = _Table(None, client=client) - row = self._makeOne(row_key, table) - self.assertEqual(row._rule_pb_list, []) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertEqual(result, {}) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - def test_commit_too_many_mutations(self): - from gcloud._testing import _Monkey - from gcloud.bigtable import row as MUT - - row_key = b'row_key' - table = object() - row = self._makeOne(row_key, table) - row._rule_pb_list = [1, 2, 3] - num_mutations = len(row._rule_pb_list) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - -class Test__parse_rmw_row_response(unittest2.TestCase): - - def _callFUT(self, row_response): - from gcloud.bigtable.row import _parse_rmw_row_response - return _parse_rmw_row_response(row_response) - - def test_it(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - col_fam1 = u'col-fam-id' - col_fam2 = u'col-fam-id2' - col_name1 = b'col-name1' - col_name2 = b'col-name2' - col_name3 = b'col-name3-but-other-fam' - cell_val1 = b'cell-val' - cell_val2 = b'cell-val-newer' - cell_val3 = b'altcol-cell-val' - cell_val4 = b'foo' - - microseconds = 1000871 - timestamp = _datetime_from_microseconds(microseconds) - expected_output = { - col_fam1: { - col_name1: [ - (cell_val1, timestamp), - (cell_val2, timestamp), - ], - col_name2: [ - (cell_val3, timestamp), - ], - }, - col_fam2: { - col_name3: [ - (cell_val4, timestamp), - ], - }, - } - sample_input = data_pb2.Row( - families=[ - data_pb2.Family( - name=col_fam1, - columns=[ - data_pb2.Column( - qualifier=col_name1, - cells=[ - data_pb2.Cell( - value=cell_val1, - timestamp_micros=microseconds, - ), - data_pb2.Cell( - value=cell_val2, - timestamp_micros=microseconds, - ), - ], - ), - data_pb2.Column( - qualifier=col_name2, - cells=[ - data_pb2.Cell( - value=cell_val3, - timestamp_micros=microseconds, - ), - ], - ), - ], - ), - data_pb2.Family( - name=col_fam2, - columns=[ - data_pb2.Column( - qualifier=col_name3, - cells=[ - data_pb2.Cell( - value=cell_val4, - timestamp_micros=microseconds, - ), - ], - ), - ], - ), - ], - ) - self.assertEqual(expected_output, self._callFUT(sample_input)) - - -class Test__parse_family_pb(unittest2.TestCase): - - def _callFUT(self, family_pb): - from gcloud.bigtable.row import _parse_family_pb - return _parse_family_pb(family_pb) - - def test_it(self): - from gcloud._helpers import _datetime_from_microseconds - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - col_fam1 = u'col-fam-id' - col_name1 = b'col-name1' - col_name2 = b'col-name2' - cell_val1 = b'cell-val' - cell_val2 = b'cell-val-newer' - cell_val3 = b'altcol-cell-val' - - microseconds = 5554441037 - timestamp = _datetime_from_microseconds(microseconds) - expected_dict = { - col_name1: [ - (cell_val1, timestamp), - (cell_val2, timestamp), - ], - col_name2: [ - (cell_val3, timestamp), - ], - } - expected_output = (col_fam1, expected_dict) - sample_input = data_pb2.Family( - name=col_fam1, - columns=[ - data_pb2.Column( - qualifier=col_name1, - cells=[ - data_pb2.Cell( - value=cell_val1, - timestamp_micros=microseconds, - ), - data_pb2.Cell( - value=cell_val2, - timestamp_micros=microseconds, - ), - ], - ), - data_pb2.Column( - qualifier=col_name2, - cells=[ - data_pb2.Cell( - value=cell_val3, - timestamp_micros=microseconds, - ), - ], - ), - ], - ) - self.assertEqual(expected_output, self._callFUT(sample_input)) - - -class _Client(object): - - data_stub = None - - def __init__(self, timeout_seconds=None): - self.timeout_seconds = timeout_seconds - - -class _Cluster(object): - - def __init__(self, client=None): - self._client = client - - -class _Table(object): - - def __init__(self, name, client=None): - self.name = name - self._cluster = _Cluster(client) diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py deleted file mode 100644 index 56b1c15f0655..000000000000 --- a/gcloud/bigtable/test_row_data.py +++ /dev/null @@ -1,524 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class TestCell(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_data import Cell - return Cell - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def _from_pb_test_helper(self, labels=None): - import datetime - from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - timestamp_micros = 18738724000 # Make sure millis granularity - timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) - value = b'value-bytes' - - if labels is None: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros) - cell_expected = self._makeOne(value, timestamp) - else: - cell_pb = data_pb2.Cell(value=value, - timestamp_micros=timestamp_micros, - labels=labels) - cell_expected = self._makeOne(value, timestamp, labels=labels) - - klass = self._getTargetClass() - result = klass.from_pb(cell_pb) - self.assertEqual(result, cell_expected) - - def test_from_pb(self): - self._from_pb_test_helper() - - def test_from_pb_with_labels(self): - labels = [u'label1', u'label2'] - self._from_pb_test_helper(labels) - - def test_constructor(self): - value = object() - timestamp = object() - cell = self._makeOne(value, timestamp) - self.assertEqual(cell.value, value) - self.assertEqual(cell.timestamp, timestamp) - - def test___eq__(self): - value = object() - timestamp = object() - cell1 = self._makeOne(value, timestamp) - cell2 = self._makeOne(value, timestamp) - self.assertEqual(cell1, cell2) - - def test___eq__type_differ(self): - cell1 = self._makeOne(None, None) - cell2 = object() - self.assertNotEqual(cell1, cell2) - - def test___ne__same_value(self): - value = object() - timestamp = object() - cell1 = self._makeOne(value, timestamp) - cell2 = self._makeOne(value, timestamp) - comparison_val = (cell1 != cell2) - self.assertFalse(comparison_val) - - def test___ne__(self): - value1 = 'value1' - value2 = 'value2' - timestamp = object() - cell1 = self._makeOne(value1, timestamp) - cell2 = self._makeOne(value2, timestamp) - self.assertNotEqual(cell1, cell2) - - -class TestPartialRowData(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_data import PartialRowData - return PartialRowData - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - row_key = object() - partial_row_data = self._makeOne(row_key) - self.assertTrue(partial_row_data._row_key is row_key) - self.assertEqual(partial_row_data._cells, {}) - self.assertFalse(partial_row_data._committed) - self.assertFalse(partial_row_data._chunks_encountered) - - def test___eq__(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data2 = self._makeOne(row_key) - self.assertEqual(partial_row_data1, partial_row_data2) - - def test___eq__type_differ(self): - partial_row_data1 = self._makeOne(None) - partial_row_data2 = object() - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__same_value(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data2 = self._makeOne(row_key) - comparison_val = (partial_row_data1 != partial_row_data2) - self.assertFalse(comparison_val) - - def test___ne__(self): - row_key1 = object() - partial_row_data1 = self._makeOne(row_key1) - row_key2 = object() - partial_row_data2 = self._makeOne(row_key2) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__committed(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data1._committed = object() - partial_row_data2 = self._makeOne(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__cells(self): - row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data1._cells = object() - partial_row_data2 = self._makeOne(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test_to_dict(self): - cell1 = object() - cell2 = object() - cell3 = object() - - family_name1 = u'name1' - family_name2 = u'name2' - qual1 = b'col1' - qual2 = b'col2' - qual3 = b'col3' - - partial_row_data = self._makeOne(None) - partial_row_data._cells = { - family_name1: { - qual1: cell1, - qual2: cell2, - }, - family_name2: { - qual3: cell3, - }, - } - - result = partial_row_data.to_dict() - expected_result = { - b'name1:col1': cell1, - b'name1:col2': cell2, - b'name2:col3': cell3, - } - self.assertEqual(result, expected_result) - - def test_cells_property(self): - partial_row_data = self._makeOne(None) - cells = {1: 2} - partial_row_data._cells = cells - # Make sure we get a copy, not the original. - self.assertFalse(partial_row_data.cells is cells) - self.assertEqual(partial_row_data.cells, cells) - - def test_row_key_getter(self): - row_key = object() - partial_row_data = self._makeOne(row_key) - self.assertTrue(partial_row_data.row_key is row_key) - - def test_committed_getter(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = value = object() - self.assertTrue(partial_row_data.committed is value) - - def test_clear(self): - partial_row_data = self._makeOne(None) - cells = {1: 2} - partial_row_data._cells = cells - self.assertEqual(partial_row_data.cells, cells) - partial_row_data._committed = True - partial_row_data._chunks_encountered = True - partial_row_data.clear() - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - self.assertEqual(partial_row_data.cells, {}) - - def test__handle_commit_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - index = last_chunk_index = 1 - self.assertFalse(partial_row_data.committed) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) - self.assertTrue(partial_row_data.committed) - - def test__handle_commit_row_false(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_commit_row(chunk, None, None) - - def test__handle_commit_row_not_last_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - with self.assertRaises(ValueError): - index = 0 - last_chunk_index = 1 - self.assertNotEqual(index, last_chunk_index) - partial_row_data._handle_commit_row(chunk, index, last_chunk_index) - - def test__handle_reset_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Modify the PartialRowData object so we can check it's been cleared. - partial_row_data._cells = {1: 2} - partial_row_data._committed = True - partial_row_data._handle_reset_row(chunk) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - - def test__handle_reset_row_failure(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - partial_row_data = self._makeOne(None) - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=False) - - with self.assertRaises(ValueError): - partial_row_data._handle_reset_row(chunk) - - def test__handle_row_contents(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import Cell - - partial_row_data = self._makeOne(None) - cell1_pb = data_pb2.Cell(timestamp_micros=1, value=b'val1') - cell2_pb = data_pb2.Cell(timestamp_micros=200, value=b'val2') - cell3_pb = data_pb2.Cell(timestamp_micros=300000, value=b'val3') - col1 = b'col1' - col2 = b'col2' - columns = [ - data_pb2.Column(qualifier=col1, cells=[cell1_pb, cell2_pb]), - data_pb2.Column(qualifier=col2, cells=[cell3_pb]), - ] - family_name = u'name' - row_contents = data_pb2.Family(name=family_name, columns=columns) - chunk = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - self.assertEqual(partial_row_data.cells, {}) - partial_row_data._handle_row_contents(chunk) - expected_cells = { - family_name: { - col1: [Cell.from_pb(cell1_pb), Cell.from_pb(cell2_pb)], - col2: [Cell.from_pb(cell3_pb)], - } - } - self.assertEqual(partial_row_data.cells, expected_cells) - - def test_update_from_read_rows(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - - # Set-up chunk1, some data that will be reset by chunk2. - ignored_family_name = u'ignore-name' - row_contents = data_pb2.Family(name=ignored_family_name) - chunk1 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - # Set-up chunk2, a reset row. - chunk2 = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) - - # Set-up chunk3, a column family with no columns. - family_name = u'name' - row_contents = data_pb2.Family(name=family_name) - chunk3 = messages_pb2.ReadRowsResponse.Chunk(row_contents=row_contents) - - # Set-up chunk4, a commit row. - chunk4 = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - - # Prepare request and make sure PartialRowData is empty before. - read_rows_response_pb = messages_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk1, chunk2, chunk3, chunk4]) - self.assertEqual(partial_row_data.cells, {}) - self.assertFalse(partial_row_data.committed) - self.assertFalse(partial_row_data._chunks_encountered) - - # Parse the response and make sure the cells took place. - partial_row_data.update_from_read_rows(read_rows_response_pb) - self.assertEqual(partial_row_data.cells, {family_name: {}}) - self.assertFalse(ignored_family_name in partial_row_data.cells) - self.assertTrue(partial_row_data.committed) - self.assertTrue(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_while_committed(self): - partial_row_data = self._makeOne(None) - partial_row_data._committed = True - self.assertFalse(partial_row_data._chunks_encountered) - - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(None) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_row_key_disagree(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key1 = b'row-key1' - row_key2 = b'row-key2' - partial_row_data = self._makeOne(row_key1) - self.assertFalse(partial_row_data._chunks_encountered) - - self.assertNotEqual(row_key1, row_key2) - read_rows_response_pb = messages_pb2.ReadRowsResponse(row_key=row_key2) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - - def test_update_from_read_rows_empty_chunk(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - row_key = b'row-key' - partial_row_data = self._makeOne(row_key) - self.assertFalse(partial_row_data._chunks_encountered) - - chunk = messages_pb2.ReadRowsResponse.Chunk() - read_rows_response_pb = messages_pb2.ReadRowsResponse( - row_key=row_key, chunks=[chunk]) - - # This makes it an "empty" chunk. - self.assertEqual(chunk.WhichOneof('chunk'), None) - with self.assertRaises(ValueError): - partial_row_data.update_from_read_rows(read_rows_response_pb) - - self.assertFalse(partial_row_data._chunks_encountered) - - -class TestPartialRowsData(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_data import PartialRowsData - return PartialRowsData - - def _getDoNothingClass(self): - klass = self._getTargetClass() - - class FakePartialRowsData(klass): - - def __init__(self, *args, **kwargs): - super(FakePartialRowsData, self).__init__(*args, **kwargs) - self._consumed = [] - - def consume_next(self): - value = self._response_iterator.next() - self._consumed.append(value) - return value - - return FakePartialRowsData - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - response_iterator = object() - partial_rows_data = self._makeOne(response_iterator) - self.assertTrue(partial_rows_data._response_iterator - is response_iterator) - self.assertEqual(partial_rows_data._rows, {}) - - def test___eq__(self): - response_iterator = object() - partial_rows_data1 = self._makeOne(response_iterator) - partial_rows_data2 = self._makeOne(response_iterator) - self.assertEqual(partial_rows_data1, partial_rows_data2) - - def test___eq__type_differ(self): - partial_rows_data1 = self._makeOne(None) - partial_rows_data2 = object() - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test___ne__same_value(self): - response_iterator = object() - partial_rows_data1 = self._makeOne(response_iterator) - partial_rows_data2 = self._makeOne(response_iterator) - comparison_val = (partial_rows_data1 != partial_rows_data2) - self.assertFalse(comparison_val) - - def test___ne__(self): - response_iterator1 = object() - partial_rows_data1 = self._makeOne(response_iterator1) - response_iterator2 = object() - partial_rows_data2 = self._makeOne(response_iterator2) - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test_rows_getter(self): - partial_rows_data = self._makeOne(None) - partial_rows_data._rows = value = object() - self.assertTrue(partial_rows_data.rows is value) - - def test_cancel(self): - response_iterator = _MockCancellableIterator() - partial_rows_data = self._makeOne(response_iterator) - self.assertEqual(response_iterator.cancel_calls, 0) - partial_rows_data.cancel() - self.assertEqual(response_iterator.cancel_calls, 1) - - def test_consume_next(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - self.assertEqual(partial_rows_data.rows, {}) - partial_rows_data.consume_next() - expected_rows = {row_key: PartialRowData(row_key)} - self.assertEqual(partial_rows_data.rows, expected_rows) - - def test_consume_next_row_exists(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_data import PartialRowData - - row_key = b'row-key' - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - value_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=[chunk]) - response_iterator = _MockCancellableIterator(value_pb) - partial_rows_data = self._makeOne(response_iterator) - existing_values = PartialRowData(row_key) - partial_rows_data._rows[row_key] = existing_values - self.assertFalse(existing_values.committed) - partial_rows_data.consume_next() - self.assertTrue(existing_values.committed) - self.assertEqual(existing_values.cells, {}) - - def test_consume_next_empty_iter(self): - response_iterator = _MockCancellableIterator() - partial_rows_data = self._makeOne(response_iterator) - with self.assertRaises(StopIteration): - partial_rows_data.consume_next() - - def test_consume_all(self): - klass = self._getDoNothingClass() - - value1, value2, value3 = object(), object(), object() - response_iterator = _MockCancellableIterator(value1, value2, value3) - partial_rows_data = klass(response_iterator) - self.assertEqual(partial_rows_data._consumed, []) - partial_rows_data.consume_all() - self.assertEqual(partial_rows_data._consumed, [value1, value2, value3]) - - def test_consume_all_with_max_loops(self): - klass = self._getDoNothingClass() - - value1, value2, value3 = object(), object(), object() - response_iterator = _MockCancellableIterator(value1, value2, value3) - partial_rows_data = klass(response_iterator) - self.assertEqual(partial_rows_data._consumed, []) - partial_rows_data.consume_all(max_loops=1) - self.assertEqual(partial_rows_data._consumed, [value1]) - # Make sure the iterator still has the remaining values. - self.assertEqual(list(response_iterator.iter_values), [value2, value3]) - - -class _MockCancellableIterator(object): - - cancel_calls = 0 - - def __init__(self, *values): - self.iter_values = iter(values) - - def cancel(self): - self.cancel_calls += 1 - - def next(self): - return next(self.iter_values) diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py deleted file mode 100644 index aed90574683f..000000000000 --- a/gcloud/bigtable/test_row_filters.py +++ /dev/null @@ -1,1010 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class Test_BoolFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import _BoolFilter - return _BoolFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - flag = object() - row_filter = self._makeOne(flag) - self.assertTrue(row_filter.flag is flag) - - def test___eq__type_differ(self): - flag = object() - row_filter1 = self._makeOne(flag) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - flag = object() - row_filter1 = self._makeOne(flag) - row_filter2 = self._makeOne(flag) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - flag = object() - row_filter1 = self._makeOne(flag) - row_filter2 = self._makeOne(flag) - comparison_val = (row_filter1 != row_filter2) - self.assertFalse(comparison_val) - - -class TestSinkFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import SinkFilter - return SinkFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - flag = True - row_filter = self._makeOne(flag) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(sink=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestPassAllFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import PassAllFilter - return PassAllFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - flag = True - row_filter = self._makeOne(flag) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(pass_all_filter=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestBlockAllFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import BlockAllFilter - return BlockAllFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - flag = True - row_filter = self._makeOne(flag) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(block_all_filter=flag) - self.assertEqual(pb_val, expected_pb) - - -class Test_RegexFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import _RegexFilter - return _RegexFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - regex = b'abc' - row_filter = self._makeOne(regex) - self.assertTrue(row_filter.regex is regex) - - def test_constructor_non_bytes(self): - regex = u'abc' - row_filter = self._makeOne(regex) - self.assertEqual(row_filter.regex, b'abc') - - def test___eq__type_differ(self): - regex = b'def-rgx' - row_filter1 = self._makeOne(regex) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - regex = b'trex-regex' - row_filter1 = self._makeOne(regex) - row_filter2 = self._makeOne(regex) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - regex = b'abc' - row_filter1 = self._makeOne(regex) - row_filter2 = self._makeOne(regex) - comparison_val = (row_filter1 != row_filter2) - self.assertFalse(comparison_val) - - -class TestRowKeyRegexFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import RowKeyRegexFilter - return RowKeyRegexFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - regex = b'row-key-regex' - row_filter = self._makeOne(regex) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_key_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestRowSampleFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import RowSampleFilter - return RowSampleFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - sample = object() - row_filter = self._makeOne(sample) - self.assertTrue(row_filter.sample is sample) - - def test___eq__type_differ(self): - sample = object() - row_filter1 = self._makeOne(sample) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - sample = object() - row_filter1 = self._makeOne(sample) - row_filter2 = self._makeOne(sample) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - sample = 0.25 - row_filter = self._makeOne(sample) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(row_sample_filter=sample) - self.assertEqual(pb_val, expected_pb) - - -class TestFamilyNameRegexFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import FamilyNameRegexFilter - return FamilyNameRegexFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - regex = u'family-regex' - row_filter = self._makeOne(regex) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(family_name_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnQualifierRegexFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter - return ColumnQualifierRegexFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - regex = b'column-regex' - row_filter = self._makeOne(regex) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(column_qualifier_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestTimestampRange(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import TimestampRange - return TimestampRange - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - start = object() - end = object() - time_range = self._makeOne(start=start, end=end) - self.assertTrue(time_range.start is start) - self.assertTrue(time_range.end is end) - - def test___eq__(self): - start = object() - end = object() - time_range1 = self._makeOne(start=start, end=end) - time_range2 = self._makeOne(start=start, end=end) - self.assertEqual(time_range1, time_range2) - - def test___eq__type_differ(self): - start = object() - end = object() - time_range1 = self._makeOne(start=start, end=end) - time_range2 = object() - self.assertNotEqual(time_range1, time_range2) - - def test___ne__same_value(self): - start = object() - end = object() - time_range1 = self._makeOne(start=start, end=end) - time_range2 = self._makeOne(start=start, end=end) - comparison_val = (time_range1 != time_range2) - self.assertFalse(comparison_val) - - def _to_pb_helper(self, start_micros=None, end_micros=None): - import datetime - from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - pb_kwargs = {} - - start = None - if start_micros is not None: - start = _EPOCH + datetime.timedelta(microseconds=start_micros) - pb_kwargs['start_timestamp_micros'] = start_micros - end = None - if end_micros is not None: - end = _EPOCH + datetime.timedelta(microseconds=end_micros) - pb_kwargs['end_timestamp_micros'] = end_micros - time_range = self._makeOne(start=start, end=end) - - expected_pb = data_pb2.TimestampRange(**pb_kwargs) - self.assertEqual(time_range.to_pb(), expected_pb) - - def test_to_pb(self): - # Makes sure already milliseconds granularity - start_micros = 30871000 - end_micros = 12939371000 - self._to_pb_helper(start_micros=start_micros, - end_micros=end_micros) - - def test_to_pb_start_only(self): - # Makes sure already milliseconds granularity - start_micros = 30871000 - self._to_pb_helper(start_micros=start_micros) - - def test_to_pb_end_only(self): - # Makes sure already milliseconds granularity - end_micros = 12939371000 - self._to_pb_helper(end_micros=end_micros) - - -class TestTimestampRangeFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import TimestampRangeFilter - return TimestampRangeFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - range_ = object() - row_filter = self._makeOne(range_) - self.assertTrue(row_filter.range_ is range_) - - def test___eq__type_differ(self): - range_ = object() - row_filter1 = self._makeOne(range_) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - range_ = object() - row_filter1 = self._makeOne(range_) - row_filter2 = self._makeOne(range_) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import TimestampRange - - range_ = TimestampRange() - row_filter = self._makeOne(range_) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( - timestamp_range_filter=data_pb2.TimestampRange()) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnRangeFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ColumnRangeFilter - return ColumnRangeFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - column_family_id = object() - row_filter = self._makeOne(column_family_id) - self.assertTrue(row_filter.column_family_id is column_family_id) - self.assertEqual(row_filter.start_column, None) - self.assertEqual(row_filter.end_column, None) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter = self._makeOne(column_family_id, start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - self.assertTrue(row_filter.column_family_id is column_family_id) - self.assertTrue(row_filter.start_column is start_column) - self.assertTrue(row_filter.end_column is end_column) - self.assertTrue(row_filter.inclusive_start is inclusive_start) - self.assertTrue(row_filter.inclusive_end is inclusive_end) - - def test_constructor_bad_start(self): - column_family_id = object() - self.assertRaises(ValueError, self._makeOne, - column_family_id, inclusive_start=True) - - def test_constructor_bad_end(self): - column_family_id = object() - self.assertRaises(ValueError, self._makeOne, - column_family_id, inclusive_end=True) - - def test___eq__(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._makeOne(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - row_filter2 = self._makeOne(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - column_family_id = object() - row_filter1 = self._makeOne(column_family_id) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - column_family_id = u'column-family-id' - row_filter = self._makeOne(column_family_id) - col_range_pb = data_pb2.ColumnRange(family_name=column_family_id) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._makeOne(column_family_id, start_column=column) - col_range_pb = data_pb2.ColumnRange( - family_name=column_family_id, - start_qualifier_inclusive=column, - ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._makeOne(column_family_id, start_column=column, - inclusive_start=False) - col_range_pb = data_pb2.ColumnRange( - family_name=column_family_id, - start_qualifier_exclusive=column, - ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._makeOne(column_family_id, end_column=column) - col_range_pb = data_pb2.ColumnRange( - family_name=column_family_id, - end_qualifier_inclusive=column, - ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._makeOne(column_family_id, end_column=column, - inclusive_end=False) - col_range_pb = data_pb2.ColumnRange( - family_name=column_family_id, - end_qualifier_exclusive=column, - ) - expected_pb = data_pb2.RowFilter(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class TestValueRegexFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ValueRegexFilter - return ValueRegexFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - regex = b'value-regex' - row_filter = self._makeOne(regex) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestValueRangeFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ValueRangeFilter - return ValueRangeFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - row_filter = self._makeOne() - self.assertEqual(row_filter.start_value, None) - self.assertEqual(row_filter.end_value, None) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - row_filter = self._makeOne(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - self.assertTrue(row_filter.start_value is start_value) - self.assertTrue(row_filter.end_value is end_value) - self.assertTrue(row_filter.inclusive_start is inclusive_start) - self.assertTrue(row_filter.inclusive_end is inclusive_end) - - def test_constructor_bad_start(self): - self.assertRaises(ValueError, self._makeOne, inclusive_start=True) - - def test_constructor_bad_end(self): - self.assertRaises(ValueError, self._makeOne, inclusive_end=True) - - def test___eq__(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._makeOne(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - row_filter2 = self._makeOne(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - row_filter1 = self._makeOne() - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - row_filter = self._makeOne() - expected_pb = data_pb2.RowFilter( - value_range_filter=data_pb2.ValueRange()) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - value = b'some-value' - row_filter = self._makeOne(start_value=value) - val_range_pb = data_pb2.ValueRange(start_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - value = b'some-value' - row_filter = self._makeOne(start_value=value, inclusive_start=False) - val_range_pb = data_pb2.ValueRange(start_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - value = b'some-value' - row_filter = self._makeOne(end_value=value) - val_range_pb = data_pb2.ValueRange(end_value_inclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - value = b'some-value' - row_filter = self._makeOne(end_value=value, inclusive_end=False) - val_range_pb = data_pb2.ValueRange(end_value_exclusive=value) - expected_pb = data_pb2.RowFilter(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class Test_CellCountFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import _CellCountFilter - return _CellCountFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - num_cells = object() - row_filter = self._makeOne(num_cells) - self.assertTrue(row_filter.num_cells is num_cells) - - def test___eq__type_differ(self): - num_cells = object() - row_filter1 = self._makeOne(num_cells) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - num_cells = object() - row_filter1 = self._makeOne(num_cells) - row_filter2 = self._makeOne(num_cells) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - num_cells = object() - row_filter1 = self._makeOne(num_cells) - row_filter2 = self._makeOne(num_cells) - comparison_val = (row_filter1 != row_filter2) - self.assertFalse(comparison_val) - - -class TestCellsRowOffsetFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import CellsRowOffsetFilter - return CellsRowOffsetFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - num_cells = 76 - row_filter = self._makeOne(num_cells) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_offset_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsRowLimitFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import CellsRowLimitFilter - return CellsRowLimitFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - num_cells = 189 - row_filter = self._makeOne(num_cells) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(cells_per_row_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsColumnLimitFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import CellsColumnLimitFilter - return CellsColumnLimitFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - num_cells = 10 - row_filter = self._makeOne(num_cells) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter( - cells_per_column_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestStripValueTransformerFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import StripValueTransformerFilter - return StripValueTransformerFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - flag = True - row_filter = self._makeOne(flag) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(strip_value_transformer=flag) - self.assertEqual(pb_val, expected_pb) - - -class TestApplyLabelFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ApplyLabelFilter - return ApplyLabelFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - label = object() - row_filter = self._makeOne(label) - self.assertTrue(row_filter.label is label) - - def test___eq__type_differ(self): - label = object() - row_filter1 = self._makeOne(label) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - label = object() - row_filter1 = self._makeOne(label) - row_filter2 = self._makeOne(label) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - - label = u'label' - row_filter = self._makeOne(label) - pb_val = row_filter.to_pb() - expected_pb = data_pb2.RowFilter(apply_label_transformer=label) - self.assertEqual(pb_val, expected_pb) - - -class Test_FilterCombination(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import _FilterCombination - return _FilterCombination - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor_defaults(self): - row_filter = self._makeOne() - self.assertEqual(row_filter.filters, []) - - def test_constructor_explicit(self): - filters = object() - row_filter = self._makeOne(filters=filters) - self.assertTrue(row_filter.filters is filters) - - def test___eq__(self): - filters = object() - row_filter1 = self._makeOne(filters=filters) - row_filter2 = self._makeOne(filters=filters) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - filters = object() - row_filter1 = self._makeOne(filters=filters) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - -class TestRowFilterChain(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import RowFilterChain - return RowFilterChain - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() - - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( - filters=[row_filter1_pb, row_filter2_pb], - ), - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import CellsRowLimitFilter - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) - - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() - - row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() - - expected_pb = data_pb2.RowFilter( - chain=data_pb2.RowFilter.Chain( - filters=[row_filter3_pb, row_filter4_pb], - ), - ) - self.assertEqual(filter_pb, expected_pb) - - -class TestRowFilterUnion(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import RowFilterUnion - return RowFilterUnion - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() - - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( - filters=[row_filter1_pb, row_filter2_pb], - ), - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_nested(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import CellsRowLimitFilter - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) - - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() - - row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() - - expected_pb = data_pb2.RowFilter( - interleave=data_pb2.RowFilter.Interleave( - filters=[row_filter3_pb, row_filter4_pb], - ), - ) - self.assertEqual(filter_pb, expected_pb) - - -class TestConditionalRowFilter(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.row_filters import ConditionalRowFilter - return ConditionalRowFilter - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter = self._makeOne(base_filter, - true_filter=true_filter, - false_filter=false_filter) - self.assertTrue(cond_filter.base_filter is base_filter) - self.assertTrue(cond_filter.true_filter is true_filter) - self.assertTrue(cond_filter.false_filter is false_filter) - - def test___eq__(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._makeOne(base_filter, - true_filter=true_filter, - false_filter=false_filter) - cond_filter2 = self._makeOne(base_filter, - true_filter=true_filter, - false_filter=false_filter) - self.assertEqual(cond_filter1, cond_filter2) - - def test___eq__type_differ(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._makeOne(base_filter, - true_filter=true_filter, - false_filter=false_filter) - cond_filter2 = object() - self.assertNotEqual(cond_filter1, cond_filter2) - - def test_to_pb(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import CellsRowOffsetFilter - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = CellsRowOffsetFilter(11) - row_filter3_pb = row_filter3.to_pb() - - row_filter4 = self._makeOne(row_filter1, true_filter=row_filter2, - false_filter=row_filter3) - filter_pb = row_filter4.to_pb() - - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( - predicate_filter=row_filter1_pb, - true_filter=row_filter2_pb, - false_filter=row_filter3_pb, - ), - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_true_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) - filter_pb = row_filter3.to_pb() - - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( - predicate_filter=row_filter1_pb, - true_filter=row_filter2_pb, - ), - ) - self.assertEqual(filter_pb, expected_pb) - - def test_to_pb_false_only(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable.row_filters import RowSampleFilter - from gcloud.bigtable.row_filters import StripValueTransformerFilter - - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() - - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - - row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) - filter_pb = row_filter3.to_pb() - - expected_pb = data_pb2.RowFilter( - condition=data_pb2.RowFilter.Condition( - predicate_filter=row_filter1_pb, - false_filter=row_filter2_pb, - ), - ) - self.assertEqual(filter_pb, expected_pb) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py deleted file mode 100644 index 9fcdf21593b0..000000000000 --- a/gcloud/bigtable/test_table.py +++ /dev/null @@ -1,639 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import unittest2 - - -class TestTable(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.bigtable.table import Table - return Table - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - table_id = 'table-id' - cluster = object() - - table = self._makeOne(table_id, cluster) - self.assertEqual(table.table_id, table_id) - self.assertTrue(table._cluster is cluster) - - def test_name_property(self): - table_id = 'table-id' - cluster_name = 'cluster_name' - - cluster = _Cluster(cluster_name) - table = self._makeOne(table_id, cluster) - expected_name = cluster_name + '/tables/' + table_id - self.assertEqual(table.name, expected_name) - - def test_column_family_factory(self): - from gcloud.bigtable.column_family import ColumnFamily - - table_id = 'table-id' - gc_rule = object() - table = self._makeOne(table_id, None) - column_family_id = 'column_family_id' - column_family = table.column_family(column_family_id, gc_rule=gc_rule) - - self.assertTrue(isinstance(column_family, ColumnFamily)) - self.assertEqual(column_family.column_family_id, column_family_id) - self.assertTrue(column_family.gc_rule is gc_rule) - self.assertEqual(column_family._table, table) - - def test_row_factory_direct(self): - from gcloud.bigtable.row import DirectRow - - table_id = 'table-id' - table = self._makeOne(table_id, None) - row_key = b'row_key' - row = table.row(row_key) - - self.assertTrue(isinstance(row, DirectRow)) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_conditional(self): - from gcloud.bigtable.row import ConditionalRow - - table_id = 'table-id' - table = self._makeOne(table_id, None) - row_key = b'row_key' - filter_ = object() - row = table.row(row_key, filter_=filter_) - - self.assertTrue(isinstance(row, ConditionalRow)) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_append(self): - from gcloud.bigtable.row import AppendRow - - table_id = 'table-id' - table = self._makeOne(table_id, None) - row_key = b'row_key' - row = table.row(row_key, append=True) - - self.assertTrue(isinstance(row, AppendRow)) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - - def test_row_factory_failure(self): - table_id = 'table-id' - table = self._makeOne(table_id, None) - with self.assertRaises(ValueError): - table.row(b'row_key', filter_=object(), append=True) - - def test___eq__(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) - self.assertEqual(table1, table2) - - def test___eq__type_differ(self): - table1 = self._makeOne('table_id', None) - table2 = object() - self.assertNotEqual(table1, table2) - - def test___ne__same_value(self): - table_id = 'table_id' - cluster = object() - table1 = self._makeOne(table_id, cluster) - table2 = self._makeOne(table_id, cluster) - comparison_val = (table1 != table2) - self.assertFalse(comparison_val) - - def test___ne__(self): - table1 = self._makeOne('table_id1', 'cluster1') - table2 = self._makeOne('table_id2', 'cluster2') - self.assertNotEqual(table1, table2) - - def _create_test_helper(self, initial_split_keys): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 150 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - request_pb = messages_pb2.CreateTableRequest( - initial_split_keys=initial_split_keys, - name=cluster_name, - table_id=table_id, - ) - - # Create response_pb - response_pb = data_pb2.Table() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # create() has no return value. - - # Perform the method and check the result. - result = table.create(initial_split_keys=initial_split_keys) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CreateTable', - (request_pb, timeout_seconds), - {}, - )]) - - def test_create(self): - initial_split_keys = None - self._create_test_helper(initial_split_keys) - - def test_create_with_split_keys(self): - initial_split_keys = ['s1', 's2'] - self._create_test_helper(initial_split_keys) - - def test_rename(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - new_table_id = 'new_table_id' - timeout_seconds = 97 - self.assertNotEqual(new_table_id, table_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.RenameTableRequest( - name=table_name, - new_id=new_table_id, - ) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # rename() has no return value. - - # Perform the method and check the result. - result = table.rename(new_table_id) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'RenameTable', - (request_pb, timeout_seconds), - {}, - )]) - - def _list_column_families_helper(self, column_family_name=None): - from gcloud.bigtable._generated import ( - bigtable_table_data_pb2 as data_pb2) - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 502 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.GetTableRequest(name=table_name) - - # Create response_pb - column_family_id = 'foo' - if column_family_name is None: - column_family_name = (table_name + '/columnFamilies/' + - column_family_id) - column_family = data_pb2.ColumnFamily(name=column_family_name) - response_pb = data_pb2.Table( - column_families={column_family_id: column_family}, - ) - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = { - column_family_id: table.column_family(column_family_id), - } - - # Perform the method and check the result. - result = table.list_column_families() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetTable', - (request_pb, timeout_seconds), - {}, - )]) - - def test_list_column_families(self): - self._list_column_families_helper() - - def test_list_column_families_failure(self): - column_family_name = 'not-the-right-format' - with self.assertRaises(ValueError): - self._list_column_families_helper( - column_family_name=column_family_name) - - def test_delete(self): - from google.protobuf import empty_pb2 - from gcloud.bigtable._generated import ( - bigtable_table_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 871 - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - - client = _Client(timeout_seconds=timeout_seconds) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.DeleteTableRequest(name=table_name) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = table.delete() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteTable', - (request_pb, timeout_seconds), - {}, - )]) - - def _read_row_helper(self, chunks): - from gcloud._testing import _Monkey - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.row_data import PartialRowData - from gcloud.bigtable import table as MUT - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 596 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, row_key, filter_): - mock_created.append((table_name, row_key, filter_)) - return request_pb - - # Create response_iterator - row_key = b'row-key' - response_pb = messages_pb2.ReadRowsResponse(row_key=row_key, - chunks=chunks) - response_iterator = [response_pb] - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) - - # Create expected_result. - if chunks: - expected_result = PartialRowData(row_key) - expected_result._committed = True - expected_result._chunks_encountered = True - else: - expected_result = None - - # Perform the method and check the result. - filter_obj = object() - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_row(row_key, filter_=filter_obj) - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadRows', - (request_pb, timeout_seconds), - {}, - )]) - self.assertEqual(mock_created, [(table.name, row_key, filter_obj)]) - - def test_read_row(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - chunk = messages_pb2.ReadRowsResponse.Chunk(commit_row=True) - chunks = [chunk] - self._read_row_helper(chunks) - - def test_read_empty_row(self): - chunks = [] - self._read_row_helper(chunks) - - def test_read_row_still_partial(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - # There is never a "commit row". - chunk = messages_pb2.ReadRowsResponse.Chunk(reset_row=True) - chunks = [chunk] - with self.assertRaises(ValueError): - self._read_row_helper(chunks) - - def test_read_rows(self): - from gcloud._testing import _Monkey - from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable.row_data import PartialRowsData - from gcloud.bigtable import table as MUT - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1111 - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response_iterator - response_iterator = object() - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) - - # Create expected_result. - expected_result = PartialRowsData(response_iterator) - - # Perform the method and check the result. - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - allow_row_interleaving = True - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - allow_row_interleaving=allow_row_interleaving, limit=limit) - - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadRows', - (request_pb, timeout_seconds), - {}, - )]) - created_kwargs = { - 'start_key': start_key, - 'end_key': end_key, - 'filter_': filter_obj, - 'allow_row_interleaving': allow_row_interleaving, - 'limit': limit, - } - self.assertEqual(mock_created, [(table.name, created_kwargs)]) - - def test_sample_row_keys(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable._testing import _FakeStub - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - timeout_seconds = 1333 - - client = _Client(timeout_seconds=timeout_seconds) - cluster_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id) - cluster = _Cluster(cluster_name, client=client) - table = self._makeOne(table_id, cluster) - - # Create request_pb - table_name = cluster_name + '/tables/' + table_id - request_pb = messages_pb2.SampleRowKeysRequest(table_name=table_name) - - # Create response_iterator - response_iterator = object() # Just passed to a mock. - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) - - # Create expected_result. - expected_result = response_iterator - - # Perform the method and check the result. - result = table.sample_row_keys() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'SampleRowKeys', - (request_pb, timeout_seconds), - {}, - )]) - - -class Test__create_row_request(unittest2.TestCase): - - def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, allow_row_interleaving=None, limit=None): - from gcloud.bigtable.table import _create_row_request - return _create_row_request( - table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, allow_row_interleaving=allow_row_interleaving, - limit=limit) - - def test_table_name_only(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - result = self._callFUT(table_name) - expected_result = messages_pb2.ReadRowsRequest(table_name=table_name) - self.assertEqual(result, expected_result) - - def test_row_key_row_range_conflict(self): - with self.assertRaises(ValueError): - self._callFUT(None, row_key=object(), end_key=object()) - - def test_row_key(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - row_key = b'row_key' - result = self._callFUT(table_name, row_key=row_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_key=row_key, - ) - self.assertEqual(result, expected_result) - - def test_row_range_start_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - start_key = b'start_key' - result = self._callFUT(table_name, start_key=start_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key), - ) - self.assertEqual(result, expected_result) - - def test_row_range_end_key(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - end_key = b'end_key' - result = self._callFUT(table_name, end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(end_key=end_key), - ) - self.assertEqual(result, expected_result) - - def test_row_range_both_keys(self): - from gcloud.bigtable._generated import bigtable_data_pb2 as data_pb2 - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - start_key = b'start_key' - end_key = b'end_key' - result = self._callFUT(table_name, start_key=start_key, - end_key=end_key) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - row_range=data_pb2.RowRange(start_key=start_key, end_key=end_key), - ) - self.assertEqual(result, expected_result) - - def test_with_filter(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - from gcloud.bigtable.row_filters import RowSampleFilter - - table_name = 'table_name' - row_filter = RowSampleFilter(0.33) - result = self._callFUT(table_name, filter_=row_filter) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - filter=row_filter.to_pb(), - ) - self.assertEqual(result, expected_result) - - def test_with_allow_row_interleaving(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - allow_row_interleaving = True - result = self._callFUT(table_name, - allow_row_interleaving=allow_row_interleaving) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - allow_row_interleaving=allow_row_interleaving, - ) - self.assertEqual(result, expected_result) - - def test_with_limit(self): - from gcloud.bigtable._generated import ( - bigtable_service_messages_pb2 as messages_pb2) - - table_name = 'table_name' - limit = 1337 - result = self._callFUT(table_name, limit=limit) - expected_result = messages_pb2.ReadRowsRequest( - table_name=table_name, - num_rows_limit=limit, - ) - self.assertEqual(result, expected_result) - - -class _Client(object): - - data_stub = None - cluster_stub = None - operations_stub = None - table_stub = None - - def __init__(self, timeout_seconds=None): - self.timeout_seconds = timeout_seconds - - -class _Cluster(object): - - def __init__(self, name, client=None): - self.name = name - self._client = client diff --git a/gcloud/datastore/__init__.py b/gcloud/datastore/__init__.py deleted file mode 100644 index 6393c2ec99bb..000000000000 --- a/gcloud/datastore/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shortcut methods for getting set up with Google Cloud Datastore. - -You'll typically use these to get started with the API: - ->>> from gcloud import datastore ->>> ->>> client = datastore.Client() ->>> key = client.key('EntityKind', 1234) ->>> entity = datastore.Entity(key) ->>> query = client.query(kind='EntityKind') - -The main concepts with this API are: - -- :class:`gcloud.datastore.connection.Connection` - which represents a connection between your machine and the Cloud Datastore - API. - -- :class:`gcloud.datastore.client.Client` - which represents a project (string) and namespace (string) bundled with - a connection and has convenience methods for constructing objects with that - project / namespace. - -- :class:`gcloud.datastore.entity.Entity` - which represents a single entity in the datastore - (akin to a row in relational database world). - -- :class:`gcloud.datastore.key.Key` - which represents a pointer to a particular entity in the datastore - (akin to a unique identifier in relational database world). - -- :class:`gcloud.datastore.query.Query` - which represents a lookup or search over the rows in the datastore. - -- :class:`gcloud.datastore.transaction.Transaction` - which represents an all-or-none transaction and enables consistency - when race conditions may occur. -""" - -from gcloud.datastore.batch import Batch -from gcloud.datastore.connection import Connection -from gcloud.datastore.client import Client -from gcloud.datastore.entity import Entity -from gcloud.datastore.key import Key -from gcloud.datastore.query import Query -from gcloud.datastore.transaction import Transaction - - -SCOPE = Connection.SCOPE diff --git a/gcloud/datastore/_generated/__init__.py b/gcloud/datastore/_generated/__init__.py deleted file mode 100644 index 19a0f26e68de..000000000000 --- a/gcloud/datastore/_generated/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generated protobuf modules for Google Cloud Datastore API.""" diff --git a/gcloud/datastore/_generated/_datastore.proto b/gcloud/datastore/_generated/_datastore.proto deleted file mode 100644 index 6f6aedb39d8b..000000000000 --- a/gcloud/datastore/_generated/_datastore.proto +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.datastore.v1beta3; - -import "google/api/annotations.proto"; -import "google/datastore/v1beta3/entity.proto"; -import "google/datastore/v1beta3/query.proto"; - -option java_multiple_files = true; -option java_outer_classname = "DatastoreProto"; -option java_package = "com.google.datastore.v1beta3"; - - -// Each RPC normalizes the partition IDs of the keys in its input entities, -// and always returns entities with keys with normalized partition IDs. -// This applies to all keys and entities, including those in values, except keys -// with both an empty path and an empty or unset partition ID. Normalization of -// input keys sets the project ID (if not already set) to the project ID from -// the request. -// -service Datastore { - // Look up entities by key. - rpc Lookup(LookupRequest) returns (LookupResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:lookup" body: "*" }; - } - - // Query for entities. - rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:runQuery" body: "*" }; - } - - // Begin a new transaction. - rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:beginTransaction" body: "*" }; - } - - // Commit a transaction, optionally creating, deleting or modifying some - // entities. - rpc Commit(CommitRequest) returns (CommitResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:commit" body: "*" }; - } - - // Roll back a transaction. - rpc Rollback(RollbackRequest) returns (RollbackResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:rollback" body: "*" }; - } - - // Allocate IDs for the given keys (useful for referencing an entity before - // it is inserted). - rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) { - option (google.api.http) = { post: "/v1beta3/projects/{project_id}:allocateIds" body: "*" }; - } -} - -// The request for [google.datastore.v1beta3.Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup]. -message LookupRequest { - // Project ID against which to make the request. - string project_id = 8; - - // Options for this lookup request. - ReadOptions read_options = 1; - - // Keys of entities to look up. - repeated Key keys = 3; -} - -// The response for [google.datastore.v1beta3.Datastore.Lookup][google.datastore.v1beta3.Datastore.Lookup]. -message LookupResponse { - // Entities found as `ResultType.FULL` entities. The order of results in this - // field is undefined and has no relation to the order of the keys in the - // input. - repeated EntityResult found = 1; - - // Entities not found as `ResultType.KEY_ONLY` entities. The order of results - // in this field is undefined and has no relation to the order of the keys - // in the input. - repeated EntityResult missing = 2; - - // A list of keys that were not looked up due to resource constraints. The - // order of results in this field is undefined and has no relation to the - // order of the keys in the input. - repeated Key deferred = 3; -} - -// The request for [google.datastore.v1beta3.Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery]. -message RunQueryRequest { - // Project ID against which to make the request. - string project_id = 8; - - // Entities are partitioned into subsets, identified by a partition ID. - // Queries are scoped to a single partition. - // This partition ID is normalized with the standard default context - // partition ID. - PartitionId partition_id = 2; - - // The options for this query. - ReadOptions read_options = 1; - - // The type of query. - oneof query_type { - // The query to run. - Query query = 3; - - // The GQL query to run. - GqlQuery gql_query = 7; - } -} - -// The response for [google.datastore.v1beta3.Datastore.RunQuery][google.datastore.v1beta3.Datastore.RunQuery]. -message RunQueryResponse { - // A batch of query results (always present). - QueryResultBatch batch = 1; - - // The parsed form of the `GqlQuery` from the request, if it was set. - Query query = 2; -} - -// The request for [google.datastore.v1beta3.Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. -message BeginTransactionRequest { - // Project ID against which to make the request. - string project_id = 8; -} - -// The response for [google.datastore.v1beta3.Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. -message BeginTransactionResponse { - // The transaction identifier (always present). - bytes transaction = 1; -} - -// The request for [google.datastore.v1beta3.Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback]. -message RollbackRequest { - // Project ID against which to make the request. - string project_id = 8; - - // The transaction identifier, returned by a call to - // [google.datastore.v1beta3.Datastore.BeginTransaction][google.datastore.v1beta3.Datastore.BeginTransaction]. - bytes transaction = 1; -} - -// The response for [google.datastore.v1beta3.Datastore.Rollback][google.datastore.v1beta3.Datastore.Rollback] -// (an empty message). -message RollbackResponse { - -} - -// The request for [google.datastore.v1beta3.Datastore.Commit][google.datastore.v1beta3.Datastore.Commit]. -message CommitRequest { - // Commit modes. - enum Mode { - // Unspecified. - MODE_UNSPECIFIED = 0; - - // Transactional. - TRANSACTIONAL = 1; - - // Non-transactional. - NON_TRANSACTIONAL = 2; - } - - // Project ID against which to make the request. - string project_id = 8; - - // The type of commit to perform. Defaults to `TRANSACTIONAL`. - Mode mode = 5; - - // Must be set when mode is `TRANSACTIONAL`. - oneof transaction_selector { - // The transaction in which to write. - bytes transaction = 1; - } - - // The mutations to perform. - // - // When mode is `TRANSACTIONAL`, mutations affecting a single entity are - // applied in order. The following sequences of mutations affecting a single - // entity are not permitted in a single `Commit` request: - // - `insert` followed by `insert` - // - `update` followed by `insert` - // - `upsert` followed by `insert` - // - `delete` followed by `update` - // - // When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single - // entity. - repeated Mutation mutations = 6; -} - -// The response for [google.datastore.v1beta3.Datastore.Commit][google.datastore.v1beta3.Datastore.Commit]. -message CommitResponse { - // The result of performing the mutations. - // The i-th mutation result corresponds to the i-th mutation in the request. - repeated MutationResult mutation_results = 3; - - // The number of index entries updated during the commit. - int32 index_updates = 4; -} - -// The request for [google.datastore.v1beta3.Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds]. -message AllocateIdsRequest { - // Project ID against which to make the request. - string project_id = 8; - - // A list of keys with incomplete key paths for which to allocate IDs. - // No key may be reserved/read-only. - repeated Key keys = 1; -} - -// The response for [google.datastore.v1beta3.Datastore.AllocateIds][google.datastore.v1beta3.Datastore.AllocateIds]. -message AllocateIdsResponse { - // The keys specified in the request (in the same order), each with - // its key path completed with a newly allocated ID. - repeated Key keys = 1; -} - -// A mutation to apply to an entity. -message Mutation { - // The mutation operation. - // - // For `insert`, `update`, and `upsert`: - // - The entity's key must not be reserved/read-only. - // - No property in the entity may have a reserved name, - // not even a property in an entity in a value. - // - No value in the entity may have meaning 18, - // not even a value in an entity in another value. - oneof operation { - // The entity to insert. The entity must not already exist. - // The entity's key's final path element may be incomplete. - Entity insert = 4; - - // The entity to update. The entity must already exist. - // Must have a complete key path. - Entity update = 5; - - // The entity to upsert. The entity may or may not already exist. - // The entity's key's final path element may be incomplete. - Entity upsert = 6; - - // The key of the entity to delete. The entity may or may not already exist. - // Must have a complete key path and must not be reserved/read-only. - Key delete = 7; - } -} - -// The result of applying a mutation. -message MutationResult { - // The automatically allocated key. - // Set only when the mutation allocated a key. - Key key = 3; -} - -// Options shared by read requests. -message ReadOptions { - // Read consistencies. - enum ReadConsistency { - // Unspecified. - READ_CONSISTENCY_UNSPECIFIED = 0; - - // Strong consistency. - STRONG = 1; - - // Eventual consistency. - EVENTUAL = 2; - } - - // If not specified, lookups and ancestor queries default to - // `read_consistency`=`STRONG`, global queries default to - // `read_consistency`=`EVENTUAL`. - oneof consistency_type { - // The non-transactional read consistency to use. - // Cannot be set to `STRONG` for global queries. - ReadConsistency read_consistency = 1; - - // The transaction in which to read. - bytes transaction = 2; - } -} diff --git a/gcloud/datastore/_generated/_entity.proto b/gcloud/datastore/_generated/_entity.proto deleted file mode 100644 index 12423eb419f6..000000000000 --- a/gcloud/datastore/_generated/_entity.proto +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.datastore.v1beta3; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/timestamp.proto"; -import "google/type/latlng.proto"; - -option java_multiple_files = true; -option java_outer_classname = "EntityProto"; -option java_package = "com.google.datastore.v1beta3"; - - -// A partition ID identifies a grouping of entities. The grouping is always -// by project and namespace, however the namespace ID may be empty. -// -// A partition ID contains several dimensions: -// project ID and namespace ID. -// Partition dimensions: -// - A dimension may be `""`. -// - A dimension must be valid UTF-8 bytes. -// - A dimension's value must match regex `[A-Za-z\d\.\-_]{1,100}` -// If the value of any dimension matches regex `__.*__`, the partition is -// reserved/read-only. -// A reserved/read-only partition ID is forbidden in certain documented -// contexts. -// -// Foreign partition IDs (in which the project ID does -// not match the context project ID ) are discouraged. -// Reads and writes of foreign partition IDs may fail if the project is not in an active state. -message PartitionId { - // Project ID. - string project_id = 2; - - // Namespace ID. - string namespace_id = 4; -} - -// A unique identifier for an entity. -// If a key's partition id or any of its path kinds or names are -// reserved/read-only, the key is reserved/read-only. -// A reserved/read-only key is forbidden in certain documented contexts. -message Key { - // A (kind, ID/name) pair used to construct a key path. - // - // If either name nor ID is set, the element is complete. - // If neither is set, the element is incomplete. - message PathElement { - // The kind of the entity. - // A kind matching regex `__.*__` is reserved/read-only. - // A kind must not contain more than 1500 bytes when UTF-8 encoded. - // Cannot be `""`. - string kind = 1; - - // The type of id. - oneof id_type { - // The auto allocated ID of the entity. - // Never equal to zero. Values less than zero are discouraged and may not - // be supported in the future. - int64 id = 2; - - // The name of the entity. - // A name matching regex `__.*__` is reserved/read-only. - // A name must not be more than 1500 bytes when UTF-8 encoded. - // Cannot be `""`. - string name = 3; - } - } - - // Entities are partitioned into subsets, currently identified by a dataset - // (usually implicitly specified by the project) and namespace ID. - // Queries are scoped to a single partition. - PartitionId partition_id = 1; - - // The entity path. - // An entity path consists of one or more elements composed of a kind and a - // string or numerical identifier, which identify entities. The first - // element identifies a _root entity_, the second element identifies - // a _child_ of the root entity, the third element a child of the - // second entity, and so forth. The entities identified by all prefixes of - // the path are called the element's _ancestors_. - // An entity path is always fully complete: *all* of the entity's ancestors - // are required to be in the path along with the entity identifier itself. - // The only exception is that in some documented cases, the identifier in the - // last path element (for the entity) itself may be omitted. A path can never - // be empty. The path can have at most 100 elements. - repeated PathElement path = 2; -} - -// An array value. -message ArrayValue { - // Values in the array. - // The order of this array may not be preserved if it contains a mix of - // indexed and unindexed values. - repeated Value values = 1; -} - -// A message that can hold any of the supported value types and associated -// metadata. -message Value { - // Must have a value set. - oneof value_type { - // A null value. - google.protobuf.NullValue null_value = 11; - - // A boolean value. - bool boolean_value = 1; - - // An integer value. - int64 integer_value = 2; - - // A double value. - double double_value = 3; - - // A timestamp value. - // When stored in the Datastore, precise only to microseconds; - // any additional precision is rounded down. - google.protobuf.Timestamp timestamp_value = 10; - - // A key value. - Key key_value = 5; - - // A UTF-8 encoded string value. - // When `exclude_from_indexes` is false (it is indexed) and meaning is not - // 2, may have at most 1500 bytes. - // When meaning is 2, may have at most 2083 bytes. - // Otherwise, may be set to at least 1,000,000 bytes - string string_value = 17; - - // A blob value. - // May have at most 1,000,000 bytes. - // When `exclude_from_indexes` is false, may have at most 1500 bytes. - // In JSON requests, must be base64-encoded. - bytes blob_value = 18; - - // A geo point value representing a point on the surface of Earth. - google.type.LatLng geo_point_value = 8; - - // An entity value. - // May have no key. - // May have a key with an incomplete key path. - // May have a reserved/read-only key. - Entity entity_value = 6; - - // An array value. - // Cannot contain another array value. - // A `Value` instance that sets field `array_value` must not set fields - // `meaning` or `exclude_from_indexes`. - ArrayValue array_value = 9; - } - - // The `meaning` field should only be populated for backwards compatibility. - int32 meaning = 14; - - // If the value should be excluded from all indexes including those defined - // explicitly. - bool exclude_from_indexes = 19; -} - -// An entity. -// -// An entity is limited to 1 megabyte when stored. That _roughly_ -// corresponds to a limit of 1 megabyte for the serialized form of this -// message. -message Entity { - // The entity's key. - // - // An entity must have a key, unless otherwise documented (for example, - // an entity in `Value.entity_value` may have no key). - // An entity's kind is its key's path's last element's kind, - // or null if it has no key. - Key key = 1; - - // The entity's properties. - // The map's keys are property names. - // A property name matching regex `__.*__` is reserved. - // A reserved property name is forbidden in certain documented contexts. - // The name must not contain more than 500 characters. - // The name cannot be `""`. - map properties = 3; -} diff --git a/gcloud/datastore/_generated/_query.proto b/gcloud/datastore/_generated/_query.proto deleted file mode 100644 index 80cbb2045ebc..000000000000 --- a/gcloud/datastore/_generated/_query.proto +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.datastore.v1beta3; - -import "google/api/annotations.proto"; -import "google/datastore/v1beta3/entity.proto"; -import "google/protobuf/wrappers.proto"; - -option java_multiple_files = true; -option java_outer_classname = "QueryProto"; -option java_package = "com.google.datastore.v1beta3"; - - -// The result of fetching an entity from the datastore. -message EntityResult { - // Specifies what data the 'entity' field contains. - // A `ResultType` is either implied (for example, in `LookupResponse.found` - // from `datastore.proto`, it is always `FULL`) or specified by context (for - // example, in message `QueryResultBatch`, field `entity_result_type` - // specifies a `ResultType` for all the values in field `entity_results`). - enum ResultType { - // Unspecified. - RESULT_TYPE_UNSPECIFIED = 0; - - // The entire entity. - FULL = 1; - - // A projected subset of properties. The entity may have no key. A property - // value may have meaning 18. - PROJECTION = 2; - - // Only the key. - KEY_ONLY = 3; - } - - // The resulting entity. - Entity entity = 1; - - // A cursor that points to the position after the result entity. - // Set only when the `EntityResult` is part of a `QueryResultBatch` message. - bytes cursor = 3; -} - -// A query. -message Query { - // The projection to return. Defaults to returning all properties. - repeated Projection projection = 2; - - // The kinds to query (if empty, returns entities of all kinds). - // Currently at most 1 kind may be specified. - repeated KindExpression kind = 3; - - // The filter to apply. - Filter filter = 4; - - // The order to apply to the query results (if empty, order is unspecified). - repeated PropertyOrder order = 5; - - // The properties to make distinct. The query results will contain the first - // result for each distinct combination of values for the given properties - // (if empty, all results are returned). - repeated PropertyReference distinct_on = 6; - - // A starting point for the query results. Query cursors are - // returned in query result batches. - bytes start_cursor = 7; - - // An ending point for the query results. Query cursors are - // returned in query result batches. - bytes end_cursor = 8; - - // The number of results to skip. Applies before limit, but after all other - // constraints. - // Must be >= 0. - int32 offset = 10; - - // The maximum number of results to return. Applies after all other - // constraints. - // Unspecified is interpreted as no limit. - // Must be >= 0. - google.protobuf.Int32Value limit = 12; -} - -// A representation of a kind. -message KindExpression { - // The name of the kind. - string name = 1; -} - -// A reference to a property relative to the kind expressions. -message PropertyReference { - // The name of the property. - string name = 2; -} - -// A representation of a property in a projection. -message Projection { - // The property to project. - PropertyReference property = 1; -} - -// The desired order for a specific property. -message PropertyOrder { - // Direction. - enum Direction { - // Unspecified. - DIRECTION_UNSPECIFIED = 0; - - // Ascending. - ASCENDING = 1; - - // Descending. - DESCENDING = 2; - } - - // The property to order by. - PropertyReference property = 1; - - // The direction to order by. Defaults to `ASCENDING`. - Direction direction = 2; -} - -// A holder for any type of filter. -message Filter { - // The type of filter. - oneof filter_type { - // A composite filter. - CompositeFilter composite_filter = 1; - - // A filter on a property. - PropertyFilter property_filter = 2; - } -} - -// A filter that merges the multiple other filters using the given operator. -message CompositeFilter { - // Composite filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // And. - AND = 1; - } - - // The operator for combining multiple filters. - Operator op = 1; - - // The list of filters to combine. - // Must contain at least one filter. - repeated Filter filters = 2; -} - -// A filter on a specific property. -message PropertyFilter { - // Property filter operator. - enum Operator { - // Unspecified. This value must not be used. - OPERATOR_UNSPECIFIED = 0; - - // Less than. - LESS_THAN = 1; - - // Less than or equal. - LESS_THAN_OR_EQUAL = 2; - - // Greater than. - GREATER_THAN = 3; - - // Greater than or equal. - GREATER_THAN_OR_EQUAL = 4; - - // Equal. - EQUAL = 5; - - // Has ancestor. - HAS_ANCESTOR = 11; - } - - // The property to filter by. - PropertyReference property = 1; - - // The operator to filter by. - Operator op = 2; - - // The value to compare the property to. - Value value = 3; -} - -// A GQL query. -message GqlQuery { - // A string of the format described - // [here](https://developers.google.com/datastore/docs/concepts/gql). - string query_string = 1; - - // When false, the query string must not contain any literals and instead - // must bind all values. For example, - // `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while - // `SELECT * FROM Kind WHERE a = @value` is. - bool allow_literals = 2; - - // For each non-reserved named binding site in the query string, - // there must be a named parameter with that name, - // but not necessarily the inverse. - // Key must match regex `[A-Za-z_$][A-Za-z_$0-9]*`, must not match regex - // `__.*__`, and must not be `""`. - map named_bindings = 5; - - // Numbered binding site @1 references the first numbered parameter, - // effectively using 1-based indexing, rather than the usual 0. - // For each binding site numbered i in `query_string`, - // there must be an i-th numbered parameter. - // The inverse must also be true. - repeated GqlQueryParameter positional_bindings = 4; -} - -// A binding parameter for a GQL query. -message GqlQueryParameter { - // The type of parameter. - oneof parameter_type { - // Value. - Value value = 2; - - // Cursor. - bytes cursor = 3; - } -} - -// A batch of results produced by a query. -message QueryResultBatch { - // The possible values for the `more_results` field. - enum MoreResultsType { - // Unspecified. This value is never used. - MORE_RESULTS_TYPE_UNSPECIFIED = 0; - - // There may be additional batches to fetch from this query. - NOT_FINISHED = 1; - - // The query is finished, but there may be more results after the limit. - MORE_RESULTS_AFTER_LIMIT = 2; - - // The query is finished, but there may be more results after the end cursor. - MORE_RESULTS_AFTER_CURSOR = 4; - - // The query has been exhausted. - NO_MORE_RESULTS = 3; - } - - // The number of results skipped, typically because of an offset. - int32 skipped_results = 6; - - // A cursor that points to the position after the last skipped result. - // Will be set when `skipped_results` != 0. - bytes skipped_cursor = 3; - - // The result type for every entity in `entity_results`. - EntityResult.ResultType entity_result_type = 1; - - // The results for this batch. - repeated EntityResult entity_results = 2; - - // A cursor that points to the position after the last result in the batch. - bytes end_cursor = 4; - - // The state of the query after the current batch. - MoreResultsType more_results = 5; -} diff --git a/gcloud/datastore/_generated/datastore_grpc_pb2.py b/gcloud/datastore/_generated/datastore_grpc_pb2.py deleted file mode 100644 index 5e648344259e..000000000000 --- a/gcloud/datastore/_generated/datastore_grpc_pb2.py +++ /dev/null @@ -1,279 +0,0 @@ -import abc -from grpc.beta import implementations as beta_implementations -from grpc.early_adopter import implementations as early_adopter_implementations -from grpc.framework.alpha import utilities as alpha_utilities -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities -class EarlyAdopterDatastoreServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def Lookup(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def RunQuery(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def BeginTransaction(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def Commit(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def Rollback(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def AllocateIds(self, request, context): - raise NotImplementedError() -class EarlyAdopterDatastoreServer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def start(self): - raise NotImplementedError() - @abc.abstractmethod - def stop(self): - raise NotImplementedError() -class EarlyAdopterDatastoreStub(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def Lookup(self, request): - raise NotImplementedError() - Lookup.async = None - @abc.abstractmethod - def RunQuery(self, request): - raise NotImplementedError() - RunQuery.async = None - @abc.abstractmethod - def BeginTransaction(self, request): - raise NotImplementedError() - BeginTransaction.async = None - @abc.abstractmethod - def Commit(self, request): - raise NotImplementedError() - Commit.async = None - @abc.abstractmethod - def Rollback(self, request): - raise NotImplementedError() - Rollback.async = None - @abc.abstractmethod - def AllocateIds(self, request): - raise NotImplementedError() - AllocateIds.async = None -def early_adopter_create_Datastore_server(servicer, port, private_key=None, certificate_chain=None): - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - method_service_descriptions = { - "AllocateIds": alpha_utilities.unary_unary_service_description( - servicer.AllocateIds, - gcloud.datastore._generated.datastore_pb2.AllocateIdsRequest.FromString, - gcloud.datastore._generated.datastore_pb2.AllocateIdsResponse.SerializeToString, - ), - "BeginTransaction": alpha_utilities.unary_unary_service_description( - servicer.BeginTransaction, - gcloud.datastore._generated.datastore_pb2.BeginTransactionRequest.FromString, - gcloud.datastore._generated.datastore_pb2.BeginTransactionResponse.SerializeToString, - ), - "Commit": alpha_utilities.unary_unary_service_description( - servicer.Commit, - gcloud.datastore._generated.datastore_pb2.CommitRequest.FromString, - gcloud.datastore._generated.datastore_pb2.CommitResponse.SerializeToString, - ), - "Lookup": alpha_utilities.unary_unary_service_description( - servicer.Lookup, - gcloud.datastore._generated.datastore_pb2.LookupRequest.FromString, - gcloud.datastore._generated.datastore_pb2.LookupResponse.SerializeToString, - ), - "Rollback": alpha_utilities.unary_unary_service_description( - servicer.Rollback, - gcloud.datastore._generated.datastore_pb2.RollbackRequest.FromString, - gcloud.datastore._generated.datastore_pb2.RollbackResponse.SerializeToString, - ), - "RunQuery": alpha_utilities.unary_unary_service_description( - servicer.RunQuery, - gcloud.datastore._generated.datastore_pb2.RunQueryRequest.FromString, - gcloud.datastore._generated.datastore_pb2.RunQueryResponse.SerializeToString, - ), - } - return early_adopter_implementations.server("google.datastore.v1beta3.Datastore", method_service_descriptions, port, private_key=private_key, certificate_chain=certificate_chain) -def early_adopter_create_Datastore_stub(host, port, metadata_transformer=None, secure=False, root_certificates=None, private_key=None, certificate_chain=None, server_host_override=None): - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - method_invocation_descriptions = { - "AllocateIds": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.AllocateIdsRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.AllocateIdsResponse.FromString, - ), - "BeginTransaction": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.BeginTransactionRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.BeginTransactionResponse.FromString, - ), - "Commit": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.CommitRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.CommitResponse.FromString, - ), - "Lookup": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.LookupRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.LookupResponse.FromString, - ), - "Rollback": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.RollbackRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.RollbackResponse.FromString, - ), - "RunQuery": alpha_utilities.unary_unary_invocation_description( - gcloud.datastore._generated.datastore_pb2.RunQueryRequest.SerializeToString, - gcloud.datastore._generated.datastore_pb2.RunQueryResponse.FromString, - ), - } - return early_adopter_implementations.stub("google.datastore.v1beta3.Datastore", method_invocation_descriptions, host, port, metadata_transformer=metadata_transformer, secure=secure, root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain, server_host_override=server_host_override) - -class BetaDatastoreServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def Lookup(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def RunQuery(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def BeginTransaction(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def Commit(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def Rollback(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def AllocateIds(self, request, context): - raise NotImplementedError() - -class BetaDatastoreStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def Lookup(self, request, timeout): - raise NotImplementedError() - Lookup.future = None - @abc.abstractmethod - def RunQuery(self, request, timeout): - raise NotImplementedError() - RunQuery.future = None - @abc.abstractmethod - def BeginTransaction(self, request, timeout): - raise NotImplementedError() - BeginTransaction.future = None - @abc.abstractmethod - def Commit(self, request, timeout): - raise NotImplementedError() - Commit.future = None - @abc.abstractmethod - def Rollback(self, request, timeout): - raise NotImplementedError() - Rollback.future = None - @abc.abstractmethod - def AllocateIds(self, request, timeout): - raise NotImplementedError() - AllocateIds.future = None - -def beta_create_Datastore_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - request_deserializers = { - ('google.datastore.v1beta3.Datastore', 'AllocateIds'): gcloud.datastore._generated.datastore_pb2.AllocateIdsRequest.FromString, - ('google.datastore.v1beta3.Datastore', 'BeginTransaction'): gcloud.datastore._generated.datastore_pb2.BeginTransactionRequest.FromString, - ('google.datastore.v1beta3.Datastore', 'Commit'): gcloud.datastore._generated.datastore_pb2.CommitRequest.FromString, - ('google.datastore.v1beta3.Datastore', 'Lookup'): gcloud.datastore._generated.datastore_pb2.LookupRequest.FromString, - ('google.datastore.v1beta3.Datastore', 'Rollback'): gcloud.datastore._generated.datastore_pb2.RollbackRequest.FromString, - ('google.datastore.v1beta3.Datastore', 'RunQuery'): gcloud.datastore._generated.datastore_pb2.RunQueryRequest.FromString, - } - response_serializers = { - ('google.datastore.v1beta3.Datastore', 'AllocateIds'): gcloud.datastore._generated.datastore_pb2.AllocateIdsResponse.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'BeginTransaction'): gcloud.datastore._generated.datastore_pb2.BeginTransactionResponse.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Commit'): gcloud.datastore._generated.datastore_pb2.CommitResponse.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Lookup'): gcloud.datastore._generated.datastore_pb2.LookupResponse.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Rollback'): gcloud.datastore._generated.datastore_pb2.RollbackResponse.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'RunQuery'): gcloud.datastore._generated.datastore_pb2.RunQueryResponse.SerializeToString, - } - method_implementations = { - ('google.datastore.v1beta3.Datastore', 'AllocateIds'): face_utilities.unary_unary_inline(servicer.AllocateIds), - ('google.datastore.v1beta3.Datastore', 'BeginTransaction'): face_utilities.unary_unary_inline(servicer.BeginTransaction), - ('google.datastore.v1beta3.Datastore', 'Commit'): face_utilities.unary_unary_inline(servicer.Commit), - ('google.datastore.v1beta3.Datastore', 'Lookup'): face_utilities.unary_unary_inline(servicer.Lookup), - ('google.datastore.v1beta3.Datastore', 'Rollback'): face_utilities.unary_unary_inline(servicer.Rollback), - ('google.datastore.v1beta3.Datastore', 'RunQuery'): face_utilities.unary_unary_inline(servicer.RunQuery), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_Datastore_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - import gcloud.datastore._generated.datastore_pb2 - request_serializers = { - ('google.datastore.v1beta3.Datastore', 'AllocateIds'): gcloud.datastore._generated.datastore_pb2.AllocateIdsRequest.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'BeginTransaction'): gcloud.datastore._generated.datastore_pb2.BeginTransactionRequest.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Commit'): gcloud.datastore._generated.datastore_pb2.CommitRequest.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Lookup'): gcloud.datastore._generated.datastore_pb2.LookupRequest.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'Rollback'): gcloud.datastore._generated.datastore_pb2.RollbackRequest.SerializeToString, - ('google.datastore.v1beta3.Datastore', 'RunQuery'): gcloud.datastore._generated.datastore_pb2.RunQueryRequest.SerializeToString, - } - response_deserializers = { - ('google.datastore.v1beta3.Datastore', 'AllocateIds'): gcloud.datastore._generated.datastore_pb2.AllocateIdsResponse.FromString, - ('google.datastore.v1beta3.Datastore', 'BeginTransaction'): gcloud.datastore._generated.datastore_pb2.BeginTransactionResponse.FromString, - ('google.datastore.v1beta3.Datastore', 'Commit'): gcloud.datastore._generated.datastore_pb2.CommitResponse.FromString, - ('google.datastore.v1beta3.Datastore', 'Lookup'): gcloud.datastore._generated.datastore_pb2.LookupResponse.FromString, - ('google.datastore.v1beta3.Datastore', 'Rollback'): gcloud.datastore._generated.datastore_pb2.RollbackResponse.FromString, - ('google.datastore.v1beta3.Datastore', 'RunQuery'): gcloud.datastore._generated.datastore_pb2.RunQueryResponse.FromString, - } - cardinalities = { - 'AllocateIds': cardinality.Cardinality.UNARY_UNARY, - 'BeginTransaction': cardinality.Cardinality.UNARY_UNARY, - 'Commit': cardinality.Cardinality.UNARY_UNARY, - 'Lookup': cardinality.Cardinality.UNARY_UNARY, - 'Rollback': cardinality.Cardinality.UNARY_UNARY, - 'RunQuery': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.datastore.v1beta3.Datastore', cardinalities, options=stub_options) diff --git a/gcloud/datastore/_generated/datastore_pb2.py b/gcloud/datastore/_generated/datastore_pb2.py deleted file mode 100644 index ffba033868c0..000000000000 --- a/gcloud/datastore/_generated/datastore_pb2.py +++ /dev/null @@ -1,862 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/datastore/v1beta3/datastore.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.datastore._generated import entity_pb2 as google_dot_datastore_dot_v1beta3_dot_entity__pb2 -from gcloud.datastore._generated import query_pb2 as google_dot_datastore_dot_v1beta3_dot_query__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/datastore/v1beta3/datastore.proto', - package='google.datastore.v1beta3', - syntax='proto3', - serialized_pb=b'\n(google/datastore/v1beta3/datastore.proto\x12\x18google.datastore.v1beta3\x1a\x1cgoogle/api/annotations.proto\x1a%google/datastore/v1beta3/entity.proto\x1a$google/datastore/v1beta3/query.proto\"\x8d\x01\n\rLookupRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12;\n\x0cread_options\x18\x01 \x01(\x0b\x32%.google.datastore.v1beta3.ReadOptions\x12+\n\x04keys\x18\x03 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xb1\x01\n\x0eLookupResponse\x12\x35\n\x05\x66ound\x18\x01 \x03(\x0b\x32&.google.datastore.v1beta3.EntityResult\x12\x37\n\x07missing\x18\x02 \x03(\x0b\x32&.google.datastore.v1beta3.EntityResult\x12/\n\x08\x64\x65\x66\x65rred\x18\x03 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\x98\x02\n\x0fRunQueryRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12;\n\x0cpartition_id\x18\x02 \x01(\x0b\x32%.google.datastore.v1beta3.PartitionId\x12;\n\x0cread_options\x18\x01 \x01(\x0b\x32%.google.datastore.v1beta3.ReadOptions\x12\x30\n\x05query\x18\x03 \x01(\x0b\x32\x1f.google.datastore.v1beta3.QueryH\x00\x12\x37\n\tgql_query\x18\x07 \x01(\x0b\x32\".google.datastore.v1beta3.GqlQueryH\x00\x42\x0c\n\nquery_type\"}\n\x10RunQueryResponse\x12\x39\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32*.google.datastore.v1beta3.QueryResultBatch\x12.\n\x05query\x18\x02 \x01(\x0b\x32\x1f.google.datastore.v1beta3.Query\"-\n\x17\x42\x65ginTransactionRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\"/\n\x18\x42\x65ginTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\":\n\x0fRollbackRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"\x12\n\x10RollbackResponse\"\x8d\x02\n\rCommitRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12:\n\x04mode\x18\x05 \x01(\x0e\x32,.google.datastore.v1beta3.CommitRequest.Mode\x12\x15\n\x0btransaction\x18\x01 \x01(\x0cH\x00\x12\x35\n\tmutations\x18\x06 \x03(\x0b\x32\".google.datastore.v1beta3.Mutation\"F\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\x11\n\rTRANSACTIONAL\x10\x01\x12\x15\n\x11NON_TRANSACTIONAL\x10\x02\x42\x16\n\x14transaction_selector\"k\n\x0e\x43ommitResponse\x12\x42\n\x10mutation_results\x18\x03 \x03(\x0b\x32(.google.datastore.v1beta3.MutationResult\x12\x15\n\rindex_updates\x18\x04 \x01(\x05\"U\n\x12\x41llocateIdsRequest\x12\x12\n\nproject_id\x18\x08 \x01(\t\x12+\n\x04keys\x18\x01 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"B\n\x13\x41llocateIdsResponse\x12+\n\x04keys\x18\x01 \x03(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xe4\x01\n\x08Mutation\x12\x32\n\x06insert\x18\x04 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12\x32\n\x06update\x18\x05 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12\x32\n\x06upsert\x18\x06 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12/\n\x06\x64\x65lete\x18\x07 \x01(\x0b\x32\x1d.google.datastore.v1beta3.KeyH\x00\x42\x0b\n\toperation\"<\n\x0eMutationResult\x12*\n\x03key\x18\x03 \x01(\x0b\x32\x1d.google.datastore.v1beta3.Key\"\xda\x01\n\x0bReadOptions\x12Q\n\x10read_consistency\x18\x01 \x01(\x0e\x32\x35.google.datastore.v1beta3.ReadOptions.ReadConsistencyH\x00\x12\x15\n\x0btransaction\x18\x02 \x01(\x0cH\x00\"M\n\x0fReadConsistency\x12 \n\x1cREAD_CONSISTENCY_UNSPECIFIED\x10\x00\x12\n\n\x06STRONG\x10\x01\x12\x0c\n\x08\x45VENTUAL\x10\x02\x42\x12\n\x10\x63onsistency_type2\xb7\x07\n\tDatastore\x12\x8d\x01\n\x06Lookup\x12\'.google.datastore.v1beta3.LookupRequest\x1a(.google.datastore.v1beta3.LookupResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1beta3/projects/{project_id}:lookup:\x01*\x12\x95\x01\n\x08RunQuery\x12).google.datastore.v1beta3.RunQueryRequest\x1a*.google.datastore.v1beta3.RunQueryResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1beta3/projects/{project_id}:runQuery:\x01*\x12\xb5\x01\n\x10\x42\x65ginTransaction\x12\x31.google.datastore.v1beta3.BeginTransactionRequest\x1a\x32.google.datastore.v1beta3.BeginTransactionResponse\":\x82\xd3\xe4\x93\x02\x34\"//v1beta3/projects/{project_id}:beginTransaction:\x01*\x12\x8d\x01\n\x06\x43ommit\x12\'.google.datastore.v1beta3.CommitRequest\x1a(.google.datastore.v1beta3.CommitResponse\"0\x82\xd3\xe4\x93\x02*\"%/v1beta3/projects/{project_id}:commit:\x01*\x12\x95\x01\n\x08Rollback\x12).google.datastore.v1beta3.RollbackRequest\x1a*.google.datastore.v1beta3.RollbackResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1beta3/projects/{project_id}:rollback:\x01*\x12\xa1\x01\n\x0b\x41llocateIds\x12,.google.datastore.v1beta3.AllocateIdsRequest\x1a-.google.datastore.v1beta3.AllocateIdsResponse\"5\x82\xd3\xe4\x93\x02/\"*/v1beta3/projects/{project_id}:allocateIds:\x01*B0\n\x1c\x63om.google.datastore.v1beta3B\x0e\x44\x61tastoreProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_datastore_dot_v1beta3_dot_entity__pb2.DESCRIPTOR,google_dot_datastore_dot_v1beta3_dot_query__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_COMMITREQUEST_MODE = _descriptor.EnumDescriptor( - name='Mode', - full_name='google.datastore.v1beta3.CommitRequest.Mode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MODE_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TRANSACTIONAL', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NON_TRANSACTIONAL', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1263, - serialized_end=1333, -) -_sym_db.RegisterEnumDescriptor(_COMMITREQUEST_MODE) - -_READOPTIONS_READCONSISTENCY = _descriptor.EnumDescriptor( - name='ReadConsistency', - full_name='google.datastore.v1beta3.ReadOptions.ReadConsistency', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='READ_CONSISTENCY_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STRONG', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EVENTUAL', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2038, - serialized_end=2115, -) -_sym_db.RegisterEnumDescriptor(_READOPTIONS_READCONSISTENCY) - - -_LOOKUPREQUEST = _descriptor.Descriptor( - name='LookupRequest', - full_name='google.datastore.v1beta3.LookupRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.LookupRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='read_options', full_name='google.datastore.v1beta3.LookupRequest.read_options', index=1, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='keys', full_name='google.datastore.v1beta3.LookupRequest.keys', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=178, - serialized_end=319, -) - - -_LOOKUPRESPONSE = _descriptor.Descriptor( - name='LookupResponse', - full_name='google.datastore.v1beta3.LookupResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='found', full_name='google.datastore.v1beta3.LookupResponse.found', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='missing', full_name='google.datastore.v1beta3.LookupResponse.missing', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='deferred', full_name='google.datastore.v1beta3.LookupResponse.deferred', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=322, - serialized_end=499, -) - - -_RUNQUERYREQUEST = _descriptor.Descriptor( - name='RunQueryRequest', - full_name='google.datastore.v1beta3.RunQueryRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.RunQueryRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='partition_id', full_name='google.datastore.v1beta3.RunQueryRequest.partition_id', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='read_options', full_name='google.datastore.v1beta3.RunQueryRequest.read_options', index=2, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='query', full_name='google.datastore.v1beta3.RunQueryRequest.query', index=3, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gql_query', full_name='google.datastore.v1beta3.RunQueryRequest.gql_query', index=4, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='query_type', full_name='google.datastore.v1beta3.RunQueryRequest.query_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=502, - serialized_end=782, -) - - -_RUNQUERYRESPONSE = _descriptor.Descriptor( - name='RunQueryResponse', - full_name='google.datastore.v1beta3.RunQueryResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='batch', full_name='google.datastore.v1beta3.RunQueryResponse.batch', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='query', full_name='google.datastore.v1beta3.RunQueryResponse.query', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=784, - serialized_end=909, -) - - -_BEGINTRANSACTIONREQUEST = _descriptor.Descriptor( - name='BeginTransactionRequest', - full_name='google.datastore.v1beta3.BeginTransactionRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.BeginTransactionRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=911, - serialized_end=956, -) - - -_BEGINTRANSACTIONRESPONSE = _descriptor.Descriptor( - name='BeginTransactionResponse', - full_name='google.datastore.v1beta3.BeginTransactionResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='transaction', full_name='google.datastore.v1beta3.BeginTransactionResponse.transaction', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=958, - serialized_end=1005, -) - - -_ROLLBACKREQUEST = _descriptor.Descriptor( - name='RollbackRequest', - full_name='google.datastore.v1beta3.RollbackRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.RollbackRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transaction', full_name='google.datastore.v1beta3.RollbackRequest.transaction', index=1, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1007, - serialized_end=1065, -) - - -_ROLLBACKRESPONSE = _descriptor.Descriptor( - name='RollbackResponse', - full_name='google.datastore.v1beta3.RollbackResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1067, - serialized_end=1085, -) - - -_COMMITREQUEST = _descriptor.Descriptor( - name='CommitRequest', - full_name='google.datastore.v1beta3.CommitRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.CommitRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mode', full_name='google.datastore.v1beta3.CommitRequest.mode', index=1, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transaction', full_name='google.datastore.v1beta3.CommitRequest.transaction', index=2, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.datastore.v1beta3.CommitRequest.mutations', index=3, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _COMMITREQUEST_MODE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='transaction_selector', full_name='google.datastore.v1beta3.CommitRequest.transaction_selector', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1088, - serialized_end=1357, -) - - -_COMMITRESPONSE = _descriptor.Descriptor( - name='CommitResponse', - full_name='google.datastore.v1beta3.CommitResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='mutation_results', full_name='google.datastore.v1beta3.CommitResponse.mutation_results', index=0, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='index_updates', full_name='google.datastore.v1beta3.CommitResponse.index_updates', index=1, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1359, - serialized_end=1466, -) - - -_ALLOCATEIDSREQUEST = _descriptor.Descriptor( - name='AllocateIdsRequest', - full_name='google.datastore.v1beta3.AllocateIdsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.AllocateIdsRequest.project_id', index=0, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='keys', full_name='google.datastore.v1beta3.AllocateIdsRequest.keys', index=1, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1468, - serialized_end=1553, -) - - -_ALLOCATEIDSRESPONSE = _descriptor.Descriptor( - name='AllocateIdsResponse', - full_name='google.datastore.v1beta3.AllocateIdsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='keys', full_name='google.datastore.v1beta3.AllocateIdsResponse.keys', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1555, - serialized_end=1621, -) - - -_MUTATION = _descriptor.Descriptor( - name='Mutation', - full_name='google.datastore.v1beta3.Mutation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='insert', full_name='google.datastore.v1beta3.Mutation.insert', index=0, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='update', full_name='google.datastore.v1beta3.Mutation.update', index=1, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='upsert', full_name='google.datastore.v1beta3.Mutation.upsert', index=2, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete', full_name='google.datastore.v1beta3.Mutation.delete', index=3, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='operation', full_name='google.datastore.v1beta3.Mutation.operation', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1624, - serialized_end=1852, -) - - -_MUTATIONRESULT = _descriptor.Descriptor( - name='MutationResult', - full_name='google.datastore.v1beta3.MutationResult', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.datastore.v1beta3.MutationResult.key', index=0, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1854, - serialized_end=1914, -) - - -_READOPTIONS = _descriptor.Descriptor( - name='ReadOptions', - full_name='google.datastore.v1beta3.ReadOptions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='read_consistency', full_name='google.datastore.v1beta3.ReadOptions.read_consistency', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transaction', full_name='google.datastore.v1beta3.ReadOptions.transaction', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _READOPTIONS_READCONSISTENCY, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='consistency_type', full_name='google.datastore.v1beta3.ReadOptions.consistency_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1917, - serialized_end=2135, -) - -_LOOKUPREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS -_LOOKUPREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_LOOKUPRESPONSE.fields_by_name['found'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._ENTITYRESULT -_LOOKUPRESPONSE.fields_by_name['missing'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._ENTITYRESULT -_LOOKUPRESPONSE.fields_by_name['deferred'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_RUNQUERYREQUEST.fields_by_name['partition_id'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._PARTITIONID -_RUNQUERYREQUEST.fields_by_name['read_options'].message_type = _READOPTIONS -_RUNQUERYREQUEST.fields_by_name['query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERY -_RUNQUERYREQUEST.fields_by_name['gql_query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._GQLQUERY -_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append( - _RUNQUERYREQUEST.fields_by_name['query']) -_RUNQUERYREQUEST.fields_by_name['query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type'] -_RUNQUERYREQUEST.oneofs_by_name['query_type'].fields.append( - _RUNQUERYREQUEST.fields_by_name['gql_query']) -_RUNQUERYREQUEST.fields_by_name['gql_query'].containing_oneof = _RUNQUERYREQUEST.oneofs_by_name['query_type'] -_RUNQUERYRESPONSE.fields_by_name['batch'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERYRESULTBATCH -_RUNQUERYRESPONSE.fields_by_name['query'].message_type = google_dot_datastore_dot_v1beta3_dot_query__pb2._QUERY -_COMMITREQUEST.fields_by_name['mode'].enum_type = _COMMITREQUEST_MODE -_COMMITREQUEST.fields_by_name['mutations'].message_type = _MUTATION -_COMMITREQUEST_MODE.containing_type = _COMMITREQUEST -_COMMITREQUEST.oneofs_by_name['transaction_selector'].fields.append( - _COMMITREQUEST.fields_by_name['transaction']) -_COMMITREQUEST.fields_by_name['transaction'].containing_oneof = _COMMITREQUEST.oneofs_by_name['transaction_selector'] -_COMMITRESPONSE.fields_by_name['mutation_results'].message_type = _MUTATIONRESULT -_ALLOCATEIDSREQUEST.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_ALLOCATEIDSRESPONSE.fields_by_name['keys'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_MUTATION.fields_by_name['insert'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY -_MUTATION.fields_by_name['update'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY -_MUTATION.fields_by_name['upsert'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY -_MUTATION.fields_by_name['delete'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_MUTATION.oneofs_by_name['operation'].fields.append( - _MUTATION.fields_by_name['insert']) -_MUTATION.fields_by_name['insert'].containing_oneof = _MUTATION.oneofs_by_name['operation'] -_MUTATION.oneofs_by_name['operation'].fields.append( - _MUTATION.fields_by_name['update']) -_MUTATION.fields_by_name['update'].containing_oneof = _MUTATION.oneofs_by_name['operation'] -_MUTATION.oneofs_by_name['operation'].fields.append( - _MUTATION.fields_by_name['upsert']) -_MUTATION.fields_by_name['upsert'].containing_oneof = _MUTATION.oneofs_by_name['operation'] -_MUTATION.oneofs_by_name['operation'].fields.append( - _MUTATION.fields_by_name['delete']) -_MUTATION.fields_by_name['delete'].containing_oneof = _MUTATION.oneofs_by_name['operation'] -_MUTATIONRESULT.fields_by_name['key'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._KEY -_READOPTIONS.fields_by_name['read_consistency'].enum_type = _READOPTIONS_READCONSISTENCY -_READOPTIONS_READCONSISTENCY.containing_type = _READOPTIONS -_READOPTIONS.oneofs_by_name['consistency_type'].fields.append( - _READOPTIONS.fields_by_name['read_consistency']) -_READOPTIONS.fields_by_name['read_consistency'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type'] -_READOPTIONS.oneofs_by_name['consistency_type'].fields.append( - _READOPTIONS.fields_by_name['transaction']) -_READOPTIONS.fields_by_name['transaction'].containing_oneof = _READOPTIONS.oneofs_by_name['consistency_type'] -DESCRIPTOR.message_types_by_name['LookupRequest'] = _LOOKUPREQUEST -DESCRIPTOR.message_types_by_name['LookupResponse'] = _LOOKUPRESPONSE -DESCRIPTOR.message_types_by_name['RunQueryRequest'] = _RUNQUERYREQUEST -DESCRIPTOR.message_types_by_name['RunQueryResponse'] = _RUNQUERYRESPONSE -DESCRIPTOR.message_types_by_name['BeginTransactionRequest'] = _BEGINTRANSACTIONREQUEST -DESCRIPTOR.message_types_by_name['BeginTransactionResponse'] = _BEGINTRANSACTIONRESPONSE -DESCRIPTOR.message_types_by_name['RollbackRequest'] = _ROLLBACKREQUEST -DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE -DESCRIPTOR.message_types_by_name['CommitRequest'] = _COMMITREQUEST -DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE -DESCRIPTOR.message_types_by_name['AllocateIdsRequest'] = _ALLOCATEIDSREQUEST -DESCRIPTOR.message_types_by_name['AllocateIdsResponse'] = _ALLOCATEIDSRESPONSE -DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION -DESCRIPTOR.message_types_by_name['MutationResult'] = _MUTATIONRESULT -DESCRIPTOR.message_types_by_name['ReadOptions'] = _READOPTIONS - -LookupRequest = _reflection.GeneratedProtocolMessageType('LookupRequest', (_message.Message,), dict( - DESCRIPTOR = _LOOKUPREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.LookupRequest) - )) -_sym_db.RegisterMessage(LookupRequest) - -LookupResponse = _reflection.GeneratedProtocolMessageType('LookupResponse', (_message.Message,), dict( - DESCRIPTOR = _LOOKUPRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.LookupResponse) - )) -_sym_db.RegisterMessage(LookupResponse) - -RunQueryRequest = _reflection.GeneratedProtocolMessageType('RunQueryRequest', (_message.Message,), dict( - DESCRIPTOR = _RUNQUERYREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RunQueryRequest) - )) -_sym_db.RegisterMessage(RunQueryRequest) - -RunQueryResponse = _reflection.GeneratedProtocolMessageType('RunQueryResponse', (_message.Message,), dict( - DESCRIPTOR = _RUNQUERYRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RunQueryResponse) - )) -_sym_db.RegisterMessage(RunQueryResponse) - -BeginTransactionRequest = _reflection.GeneratedProtocolMessageType('BeginTransactionRequest', (_message.Message,), dict( - DESCRIPTOR = _BEGINTRANSACTIONREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.BeginTransactionRequest) - )) -_sym_db.RegisterMessage(BeginTransactionRequest) - -BeginTransactionResponse = _reflection.GeneratedProtocolMessageType('BeginTransactionResponse', (_message.Message,), dict( - DESCRIPTOR = _BEGINTRANSACTIONRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.BeginTransactionResponse) - )) -_sym_db.RegisterMessage(BeginTransactionResponse) - -RollbackRequest = _reflection.GeneratedProtocolMessageType('RollbackRequest', (_message.Message,), dict( - DESCRIPTOR = _ROLLBACKREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RollbackRequest) - )) -_sym_db.RegisterMessage(RollbackRequest) - -RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict( - DESCRIPTOR = _ROLLBACKRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.RollbackResponse) - )) -_sym_db.RegisterMessage(RollbackResponse) - -CommitRequest = _reflection.GeneratedProtocolMessageType('CommitRequest', (_message.Message,), dict( - DESCRIPTOR = _COMMITREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.CommitRequest) - )) -_sym_db.RegisterMessage(CommitRequest) - -CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict( - DESCRIPTOR = _COMMITRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.CommitResponse) - )) -_sym_db.RegisterMessage(CommitResponse) - -AllocateIdsRequest = _reflection.GeneratedProtocolMessageType('AllocateIdsRequest', (_message.Message,), dict( - DESCRIPTOR = _ALLOCATEIDSREQUEST, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.AllocateIdsRequest) - )) -_sym_db.RegisterMessage(AllocateIdsRequest) - -AllocateIdsResponse = _reflection.GeneratedProtocolMessageType('AllocateIdsResponse', (_message.Message,), dict( - DESCRIPTOR = _ALLOCATEIDSRESPONSE, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.AllocateIdsResponse) - )) -_sym_db.RegisterMessage(AllocateIdsResponse) - -Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( - DESCRIPTOR = _MUTATION, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Mutation) - )) -_sym_db.RegisterMessage(Mutation) - -MutationResult = _reflection.GeneratedProtocolMessageType('MutationResult', (_message.Message,), dict( - DESCRIPTOR = _MUTATIONRESULT, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.MutationResult) - )) -_sym_db.RegisterMessage(MutationResult) - -ReadOptions = _reflection.GeneratedProtocolMessageType('ReadOptions', (_message.Message,), dict( - DESCRIPTOR = _READOPTIONS, - __module__ = 'google.datastore.v1beta3.datastore_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.ReadOptions) - )) -_sym_db.RegisterMessage(ReadOptions) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\034com.google.datastore.v1beta3B\016DatastoreProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/datastore/_generated/entity_pb2.py b/gcloud/datastore/_generated/entity_pb2.py deleted file mode 100644 index 3295047f731f..000000000000 --- a/gcloud/datastore/_generated/entity_pb2.py +++ /dev/null @@ -1,493 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/datastore/v1beta3/entity.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/datastore/v1beta3/entity.proto', - package='google.datastore.v1beta3', - syntax='proto3', - serialized_pb=b'\n%google/datastore/v1beta3/entity.proto\x12\x18google.datastore.v1beta3\x1a\x1cgoogle/api/annotations.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x18google/type/latlng.proto\"7\n\x0bPartitionId\x12\x12\n\nproject_id\x18\x02 \x01(\t\x12\x14\n\x0cnamespace_id\x18\x04 \x01(\t\"\xc1\x01\n\x03Key\x12;\n\x0cpartition_id\x18\x01 \x01(\x0b\x32%.google.datastore.v1beta3.PartitionId\x12\x37\n\x04path\x18\x02 \x03(\x0b\x32).google.datastore.v1beta3.Key.PathElement\x1a\x44\n\x0bPathElement\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x02id\x18\x02 \x01(\x03H\x00\x12\x0e\n\x04name\x18\x03 \x01(\tH\x00\x42\t\n\x07id_type\"=\n\nArrayValue\x12/\n\x06values\x18\x01 \x03(\x0b\x32\x1f.google.datastore.v1beta3.Value\"\x80\x04\n\x05Value\x12\x30\n\nnull_value\x18\x0b \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x17\n\rboolean_value\x18\x01 \x01(\x08H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x32\n\tkey_value\x18\x05 \x01(\x0b\x32\x1d.google.datastore.v1beta3.KeyH\x00\x12\x16\n\x0cstring_value\x18\x11 \x01(\tH\x00\x12\x14\n\nblob_value\x18\x12 \x01(\x0cH\x00\x12.\n\x0fgeo_point_value\x18\x08 \x01(\x0b\x32\x13.google.type.LatLngH\x00\x12\x38\n\x0c\x65ntity_value\x18\x06 \x01(\x0b\x32 .google.datastore.v1beta3.EntityH\x00\x12;\n\x0b\x61rray_value\x18\t \x01(\x0b\x32$.google.datastore.v1beta3.ArrayValueH\x00\x12\x0f\n\x07meaning\x18\x0e \x01(\x05\x12\x1c\n\x14\x65xclude_from_indexes\x18\x13 \x01(\x08\x42\x0c\n\nvalue_type\"\xce\x01\n\x06\x45ntity\x12*\n\x03key\x18\x01 \x01(\x0b\x32\x1d.google.datastore.v1beta3.Key\x12\x44\n\nproperties\x18\x03 \x03(\x0b\x32\x30.google.datastore.v1beta3.Entity.PropertiesEntry\x1aR\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.datastore.v1beta3.Value:\x02\x38\x01\x42-\n\x1c\x63om.google.datastore.v1beta3B\x0b\x45ntityProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_type_dot_latlng__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_PARTITIONID = _descriptor.Descriptor( - name='PartitionId', - full_name='google.datastore.v1beta3.PartitionId', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='project_id', full_name='google.datastore.v1beta3.PartitionId.project_id', index=0, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='namespace_id', full_name='google.datastore.v1beta3.PartitionId.namespace_id', index=1, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=186, - serialized_end=241, -) - - -_KEY_PATHELEMENT = _descriptor.Descriptor( - name='PathElement', - full_name='google.datastore.v1beta3.Key.PathElement', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='kind', full_name='google.datastore.v1beta3.Key.PathElement.kind', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='id', full_name='google.datastore.v1beta3.Key.PathElement.id', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='name', full_name='google.datastore.v1beta3.Key.PathElement.name', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='id_type', full_name='google.datastore.v1beta3.Key.PathElement.id_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=369, - serialized_end=437, -) - -_KEY = _descriptor.Descriptor( - name='Key', - full_name='google.datastore.v1beta3.Key', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='partition_id', full_name='google.datastore.v1beta3.Key.partition_id', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='path', full_name='google.datastore.v1beta3.Key.path', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_KEY_PATHELEMENT, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=244, - serialized_end=437, -) - - -_ARRAYVALUE = _descriptor.Descriptor( - name='ArrayValue', - full_name='google.datastore.v1beta3.ArrayValue', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='values', full_name='google.datastore.v1beta3.ArrayValue.values', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=439, - serialized_end=500, -) - - -_VALUE = _descriptor.Descriptor( - name='Value', - full_name='google.datastore.v1beta3.Value', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='null_value', full_name='google.datastore.v1beta3.Value.null_value', index=0, - number=11, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='boolean_value', full_name='google.datastore.v1beta3.Value.boolean_value', index=1, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='integer_value', full_name='google.datastore.v1beta3.Value.integer_value', index=2, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='double_value', full_name='google.datastore.v1beta3.Value.double_value', index=3, - number=3, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_value', full_name='google.datastore.v1beta3.Value.timestamp_value', index=4, - number=10, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='key_value', full_name='google.datastore.v1beta3.Value.key_value', index=5, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='string_value', full_name='google.datastore.v1beta3.Value.string_value', index=6, - number=17, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blob_value', full_name='google.datastore.v1beta3.Value.blob_value', index=7, - number=18, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='geo_point_value', full_name='google.datastore.v1beta3.Value.geo_point_value', index=8, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entity_value', full_name='google.datastore.v1beta3.Value.entity_value', index=9, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='array_value', full_name='google.datastore.v1beta3.Value.array_value', index=10, - number=9, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='meaning', full_name='google.datastore.v1beta3.Value.meaning', index=11, - number=14, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exclude_from_indexes', full_name='google.datastore.v1beta3.Value.exclude_from_indexes', index=12, - number=19, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='value_type', full_name='google.datastore.v1beta3.Value.value_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=503, - serialized_end=1015, -) - - -_ENTITY_PROPERTIESENTRY = _descriptor.Descriptor( - name='PropertiesEntry', - full_name='google.datastore.v1beta3.Entity.PropertiesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.datastore.v1beta3.Entity.PropertiesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.datastore.v1beta3.Entity.PropertiesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1142, - serialized_end=1224, -) - -_ENTITY = _descriptor.Descriptor( - name='Entity', - full_name='google.datastore.v1beta3.Entity', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.datastore.v1beta3.Entity.key', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='properties', full_name='google.datastore.v1beta3.Entity.properties', index=1, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_ENTITY_PROPERTIESENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1018, - serialized_end=1224, -) - -_KEY_PATHELEMENT.containing_type = _KEY -_KEY_PATHELEMENT.oneofs_by_name['id_type'].fields.append( - _KEY_PATHELEMENT.fields_by_name['id']) -_KEY_PATHELEMENT.fields_by_name['id'].containing_oneof = _KEY_PATHELEMENT.oneofs_by_name['id_type'] -_KEY_PATHELEMENT.oneofs_by_name['id_type'].fields.append( - _KEY_PATHELEMENT.fields_by_name['name']) -_KEY_PATHELEMENT.fields_by_name['name'].containing_oneof = _KEY_PATHELEMENT.oneofs_by_name['id_type'] -_KEY.fields_by_name['partition_id'].message_type = _PARTITIONID -_KEY.fields_by_name['path'].message_type = _KEY_PATHELEMENT -_ARRAYVALUE.fields_by_name['values'].message_type = _VALUE -_VALUE.fields_by_name['null_value'].enum_type = google_dot_protobuf_dot_struct__pb2._NULLVALUE -_VALUE.fields_by_name['timestamp_value'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_VALUE.fields_by_name['key_value'].message_type = _KEY -_VALUE.fields_by_name['geo_point_value'].message_type = google_dot_type_dot_latlng__pb2._LATLNG -_VALUE.fields_by_name['entity_value'].message_type = _ENTITY -_VALUE.fields_by_name['array_value'].message_type = _ARRAYVALUE -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['null_value']) -_VALUE.fields_by_name['null_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['boolean_value']) -_VALUE.fields_by_name['boolean_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['integer_value']) -_VALUE.fields_by_name['integer_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['double_value']) -_VALUE.fields_by_name['double_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['timestamp_value']) -_VALUE.fields_by_name['timestamp_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['key_value']) -_VALUE.fields_by_name['key_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['string_value']) -_VALUE.fields_by_name['string_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['blob_value']) -_VALUE.fields_by_name['blob_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['geo_point_value']) -_VALUE.fields_by_name['geo_point_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['entity_value']) -_VALUE.fields_by_name['entity_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_VALUE.oneofs_by_name['value_type'].fields.append( - _VALUE.fields_by_name['array_value']) -_VALUE.fields_by_name['array_value'].containing_oneof = _VALUE.oneofs_by_name['value_type'] -_ENTITY_PROPERTIESENTRY.fields_by_name['value'].message_type = _VALUE -_ENTITY_PROPERTIESENTRY.containing_type = _ENTITY -_ENTITY.fields_by_name['key'].message_type = _KEY -_ENTITY.fields_by_name['properties'].message_type = _ENTITY_PROPERTIESENTRY -DESCRIPTOR.message_types_by_name['PartitionId'] = _PARTITIONID -DESCRIPTOR.message_types_by_name['Key'] = _KEY -DESCRIPTOR.message_types_by_name['ArrayValue'] = _ARRAYVALUE -DESCRIPTOR.message_types_by_name['Value'] = _VALUE -DESCRIPTOR.message_types_by_name['Entity'] = _ENTITY - -PartitionId = _reflection.GeneratedProtocolMessageType('PartitionId', (_message.Message,), dict( - DESCRIPTOR = _PARTITIONID, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.PartitionId) - )) -_sym_db.RegisterMessage(PartitionId) - -Key = _reflection.GeneratedProtocolMessageType('Key', (_message.Message,), dict( - - PathElement = _reflection.GeneratedProtocolMessageType('PathElement', (_message.Message,), dict( - DESCRIPTOR = _KEY_PATHELEMENT, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Key.PathElement) - )) - , - DESCRIPTOR = _KEY, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Key) - )) -_sym_db.RegisterMessage(Key) -_sym_db.RegisterMessage(Key.PathElement) - -ArrayValue = _reflection.GeneratedProtocolMessageType('ArrayValue', (_message.Message,), dict( - DESCRIPTOR = _ARRAYVALUE, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.ArrayValue) - )) -_sym_db.RegisterMessage(ArrayValue) - -Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict( - DESCRIPTOR = _VALUE, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Value) - )) -_sym_db.RegisterMessage(Value) - -Entity = _reflection.GeneratedProtocolMessageType('Entity', (_message.Message,), dict( - - PropertiesEntry = _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), dict( - DESCRIPTOR = _ENTITY_PROPERTIESENTRY, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Entity.PropertiesEntry) - )) - , - DESCRIPTOR = _ENTITY, - __module__ = 'google.datastore.v1beta3.entity_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Entity) - )) -_sym_db.RegisterMessage(Entity) -_sym_db.RegisterMessage(Entity.PropertiesEntry) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\034com.google.datastore.v1beta3B\013EntityProtoP\001') -_ENTITY_PROPERTIESENTRY.has_options = True -_ENTITY_PROPERTIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/datastore/_generated/query_pb2.py b/gcloud/datastore/_generated/query_pb2.py deleted file mode 100644 index e843253850be..000000000000 --- a/gcloud/datastore/_generated/query_pb2.py +++ /dev/null @@ -1,917 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/datastore/v1beta3/query.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.datastore._generated import entity_pb2 as google_dot_datastore_dot_v1beta3_dot_entity__pb2 -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/datastore/v1beta3/query.proto', - package='google.datastore.v1beta3', - syntax='proto3', - serialized_pb=b'\n$google/datastore/v1beta3/query.proto\x12\x18google.datastore.v1beta3\x1a\x1cgoogle/api/annotations.proto\x1a%google/datastore/v1beta3/entity.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\xa3\x01\n\x0c\x45ntityResult\x12\x30\n\x06\x65ntity\x18\x01 \x01(\x0b\x32 .google.datastore.v1beta3.Entity\x12\x0e\n\x06\x63ursor\x18\x03 \x01(\x0c\"Q\n\nResultType\x12\x1b\n\x17RESULT_TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46ULL\x10\x01\x12\x0e\n\nPROJECTION\x10\x02\x12\x0c\n\x08KEY_ONLY\x10\x03\"\x8b\x03\n\x05Query\x12\x38\n\nprojection\x18\x02 \x03(\x0b\x32$.google.datastore.v1beta3.Projection\x12\x36\n\x04kind\x18\x03 \x03(\x0b\x32(.google.datastore.v1beta3.KindExpression\x12\x30\n\x06\x66ilter\x18\x04 \x01(\x0b\x32 .google.datastore.v1beta3.Filter\x12\x36\n\x05order\x18\x05 \x03(\x0b\x32\'.google.datastore.v1beta3.PropertyOrder\x12@\n\x0b\x64istinct_on\x18\x06 \x03(\x0b\x32+.google.datastore.v1beta3.PropertyReference\x12\x14\n\x0cstart_cursor\x18\x07 \x01(\x0c\x12\x12\n\nend_cursor\x18\x08 \x01(\x0c\x12\x0e\n\x06offset\x18\n \x01(\x05\x12*\n\x05limit\x18\x0c \x01(\x0b\x32\x1b.google.protobuf.Int32Value\"\x1e\n\x0eKindExpression\x12\x0c\n\x04name\x18\x01 \x01(\t\"!\n\x11PropertyReference\x12\x0c\n\x04name\x18\x02 \x01(\t\"K\n\nProjection\x12=\n\x08property\x18\x01 \x01(\x0b\x32+.google.datastore.v1beta3.PropertyReference\"\xdb\x01\n\rPropertyOrder\x12=\n\x08property\x18\x01 \x01(\x0b\x32+.google.datastore.v1beta3.PropertyReference\x12\x44\n\tdirection\x18\x02 \x01(\x0e\x32\x31.google.datastore.v1beta3.PropertyOrder.Direction\"E\n\tDirection\x12\x19\n\x15\x44IRECTION_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x01\x12\x0e\n\nDESCENDING\x10\x02\"\xa3\x01\n\x06\x46ilter\x12\x45\n\x10\x63omposite_filter\x18\x01 \x01(\x0b\x32).google.datastore.v1beta3.CompositeFilterH\x00\x12\x43\n\x0fproperty_filter\x18\x02 \x01(\x0b\x32(.google.datastore.v1beta3.PropertyFilterH\x00\x42\r\n\x0b\x66ilter_type\"\xb3\x01\n\x0f\x43ompositeFilter\x12>\n\x02op\x18\x01 \x01(\x0e\x32\x32.google.datastore.v1beta3.CompositeFilter.Operator\x12\x31\n\x07\x66ilters\x18\x02 \x03(\x0b\x32 .google.datastore.v1beta3.Filter\"-\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41ND\x10\x01\"\xd6\x02\n\x0ePropertyFilter\x12=\n\x08property\x18\x01 \x01(\x0b\x32+.google.datastore.v1beta3.PropertyReference\x12=\n\x02op\x18\x02 \x01(\x0e\x32\x31.google.datastore.v1beta3.PropertyFilter.Operator\x12.\n\x05value\x18\x03 \x01(\x0b\x32\x1f.google.datastore.v1beta3.Value\"\x95\x01\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\r\n\tLESS_THAN\x10\x01\x12\x16\n\x12LESS_THAN_OR_EQUAL\x10\x02\x12\x10\n\x0cGREATER_THAN\x10\x03\x12\x19\n\x15GREATER_THAN_OR_EQUAL\x10\x04\x12\t\n\x05\x45QUAL\x10\x05\x12\x10\n\x0cHAS_ANCESTOR\x10\x0b\"\xb4\x02\n\x08GqlQuery\x12\x14\n\x0cquery_string\x18\x01 \x01(\t\x12\x16\n\x0e\x61llow_literals\x18\x02 \x01(\x08\x12M\n\x0enamed_bindings\x18\x05 \x03(\x0b\x32\x35.google.datastore.v1beta3.GqlQuery.NamedBindingsEntry\x12H\n\x13positional_bindings\x18\x04 \x03(\x0b\x32+.google.datastore.v1beta3.GqlQueryParameter\x1a\x61\n\x12NamedBindingsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12:\n\x05value\x18\x02 \x01(\x0b\x32+.google.datastore.v1beta3.GqlQueryParameter:\x02\x38\x01\"i\n\x11GqlQueryParameter\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.datastore.v1beta3.ValueH\x00\x12\x10\n\x06\x63ursor\x18\x03 \x01(\x0cH\x00\x42\x10\n\x0eparameter_type\"\xd3\x03\n\x10QueryResultBatch\x12\x17\n\x0fskipped_results\x18\x06 \x01(\x05\x12\x16\n\x0eskipped_cursor\x18\x03 \x01(\x0c\x12M\n\x12\x65ntity_result_type\x18\x01 \x01(\x0e\x32\x31.google.datastore.v1beta3.EntityResult.ResultType\x12>\n\x0e\x65ntity_results\x18\x02 \x03(\x0b\x32&.google.datastore.v1beta3.EntityResult\x12\x12\n\nend_cursor\x18\x04 \x01(\x0c\x12P\n\x0cmore_results\x18\x05 \x01(\x0e\x32:.google.datastore.v1beta3.QueryResultBatch.MoreResultsType\"\x98\x01\n\x0fMoreResultsType\x12!\n\x1dMORE_RESULTS_TYPE_UNSPECIFIED\x10\x00\x12\x10\n\x0cNOT_FINISHED\x10\x01\x12\x1c\n\x18MORE_RESULTS_AFTER_LIMIT\x10\x02\x12\x1d\n\x19MORE_RESULTS_AFTER_CURSOR\x10\x04\x12\x13\n\x0fNO_MORE_RESULTS\x10\x03\x42,\n\x1c\x63om.google.datastore.v1beta3B\nQueryProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_datastore_dot_v1beta3_dot_entity__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_ENTITYRESULT_RESULTTYPE = _descriptor.EnumDescriptor( - name='ResultType', - full_name='google.datastore.v1beta3.EntityResult.ResultType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='RESULT_TYPE_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FULL', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PROJECTION', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='KEY_ONLY', index=3, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=250, - serialized_end=331, -) -_sym_db.RegisterEnumDescriptor(_ENTITYRESULT_RESULTTYPE) - -_PROPERTYORDER_DIRECTION = _descriptor.EnumDescriptor( - name='Direction', - full_name='google.datastore.v1beta3.PropertyOrder.Direction', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DIRECTION_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ASCENDING', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DESCENDING', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1026, - serialized_end=1095, -) -_sym_db.RegisterEnumDescriptor(_PROPERTYORDER_DIRECTION) - -_COMPOSITEFILTER_OPERATOR = _descriptor.EnumDescriptor( - name='Operator', - full_name='google.datastore.v1beta3.CompositeFilter.Operator', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='OPERATOR_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AND', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1398, - serialized_end=1443, -) -_sym_db.RegisterEnumDescriptor(_COMPOSITEFILTER_OPERATOR) - -_PROPERTYFILTER_OPERATOR = _descriptor.EnumDescriptor( - name='Operator', - full_name='google.datastore.v1beta3.PropertyFilter.Operator', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='OPERATOR_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LESS_THAN', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LESS_THAN_OR_EQUAL', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GREATER_THAN', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GREATER_THAN_OR_EQUAL', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EQUAL', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HAS_ANCESTOR', index=6, number=11, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1639, - serialized_end=1788, -) -_sym_db.RegisterEnumDescriptor(_PROPERTYFILTER_OPERATOR) - -_QUERYRESULTBATCH_MORERESULTSTYPE = _descriptor.EnumDescriptor( - name='MoreResultsType', - full_name='google.datastore.v1beta3.QueryResultBatch.MoreResultsType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MORE_RESULTS_TYPE_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NOT_FINISHED', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MORE_RESULTS_AFTER_LIMIT', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MORE_RESULTS_AFTER_CURSOR', index=3, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NO_MORE_RESULTS', index=4, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2524, - serialized_end=2676, -) -_sym_db.RegisterEnumDescriptor(_QUERYRESULTBATCH_MORERESULTSTYPE) - - -_ENTITYRESULT = _descriptor.Descriptor( - name='EntityResult', - full_name='google.datastore.v1beta3.EntityResult', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='entity', full_name='google.datastore.v1beta3.EntityResult.entity', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cursor', full_name='google.datastore.v1beta3.EntityResult.cursor', index=1, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _ENTITYRESULT_RESULTTYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=168, - serialized_end=331, -) - - -_QUERY = _descriptor.Descriptor( - name='Query', - full_name='google.datastore.v1beta3.Query', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='projection', full_name='google.datastore.v1beta3.Query.projection', index=0, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kind', full_name='google.datastore.v1beta3.Query.kind', index=1, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filter', full_name='google.datastore.v1beta3.Query.filter', index=2, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='order', full_name='google.datastore.v1beta3.Query.order', index=3, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='distinct_on', full_name='google.datastore.v1beta3.Query.distinct_on', index=4, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_cursor', full_name='google.datastore.v1beta3.Query.start_cursor', index=5, - number=7, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_cursor', full_name='google.datastore.v1beta3.Query.end_cursor', index=6, - number=8, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset', full_name='google.datastore.v1beta3.Query.offset', index=7, - number=10, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='limit', full_name='google.datastore.v1beta3.Query.limit', index=8, - number=12, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=334, - serialized_end=729, -) - - -_KINDEXPRESSION = _descriptor.Descriptor( - name='KindExpression', - full_name='google.datastore.v1beta3.KindExpression', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.datastore.v1beta3.KindExpression.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=731, - serialized_end=761, -) - - -_PROPERTYREFERENCE = _descriptor.Descriptor( - name='PropertyReference', - full_name='google.datastore.v1beta3.PropertyReference', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.datastore.v1beta3.PropertyReference.name', index=0, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=763, - serialized_end=796, -) - - -_PROJECTION = _descriptor.Descriptor( - name='Projection', - full_name='google.datastore.v1beta3.Projection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='property', full_name='google.datastore.v1beta3.Projection.property', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=798, - serialized_end=873, -) - - -_PROPERTYORDER = _descriptor.Descriptor( - name='PropertyOrder', - full_name='google.datastore.v1beta3.PropertyOrder', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='property', full_name='google.datastore.v1beta3.PropertyOrder.property', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='direction', full_name='google.datastore.v1beta3.PropertyOrder.direction', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _PROPERTYORDER_DIRECTION, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=876, - serialized_end=1095, -) - - -_FILTER = _descriptor.Descriptor( - name='Filter', - full_name='google.datastore.v1beta3.Filter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='composite_filter', full_name='google.datastore.v1beta3.Filter.composite_filter', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='property_filter', full_name='google.datastore.v1beta3.Filter.property_filter', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='filter_type', full_name='google.datastore.v1beta3.Filter.filter_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1098, - serialized_end=1261, -) - - -_COMPOSITEFILTER = _descriptor.Descriptor( - name='CompositeFilter', - full_name='google.datastore.v1beta3.CompositeFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='op', full_name='google.datastore.v1beta3.CompositeFilter.op', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filters', full_name='google.datastore.v1beta3.CompositeFilter.filters', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _COMPOSITEFILTER_OPERATOR, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1264, - serialized_end=1443, -) - - -_PROPERTYFILTER = _descriptor.Descriptor( - name='PropertyFilter', - full_name='google.datastore.v1beta3.PropertyFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='property', full_name='google.datastore.v1beta3.PropertyFilter.property', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='op', full_name='google.datastore.v1beta3.PropertyFilter.op', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.datastore.v1beta3.PropertyFilter.value', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _PROPERTYFILTER_OPERATOR, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1446, - serialized_end=1788, -) - - -_GQLQUERY_NAMEDBINDINGSENTRY = _descriptor.Descriptor( - name='NamedBindingsEntry', - full_name='google.datastore.v1beta3.GqlQuery.NamedBindingsEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.datastore.v1beta3.GqlQuery.NamedBindingsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.datastore.v1beta3.GqlQuery.NamedBindingsEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2002, - serialized_end=2099, -) - -_GQLQUERY = _descriptor.Descriptor( - name='GqlQuery', - full_name='google.datastore.v1beta3.GqlQuery', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='query_string', full_name='google.datastore.v1beta3.GqlQuery.query_string', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='allow_literals', full_name='google.datastore.v1beta3.GqlQuery.allow_literals', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='named_bindings', full_name='google.datastore.v1beta3.GqlQuery.named_bindings', index=2, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='positional_bindings', full_name='google.datastore.v1beta3.GqlQuery.positional_bindings', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_GQLQUERY_NAMEDBINDINGSENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1791, - serialized_end=2099, -) - - -_GQLQUERYPARAMETER = _descriptor.Descriptor( - name='GqlQueryParameter', - full_name='google.datastore.v1beta3.GqlQueryParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='google.datastore.v1beta3.GqlQueryParameter.value', index=0, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cursor', full_name='google.datastore.v1beta3.GqlQueryParameter.cursor', index=1, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='parameter_type', full_name='google.datastore.v1beta3.GqlQueryParameter.parameter_type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2101, - serialized_end=2206, -) - - -_QUERYRESULTBATCH = _descriptor.Descriptor( - name='QueryResultBatch', - full_name='google.datastore.v1beta3.QueryResultBatch', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='skipped_results', full_name='google.datastore.v1beta3.QueryResultBatch.skipped_results', index=0, - number=6, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='skipped_cursor', full_name='google.datastore.v1beta3.QueryResultBatch.skipped_cursor', index=1, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entity_result_type', full_name='google.datastore.v1beta3.QueryResultBatch.entity_result_type', index=2, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entity_results', full_name='google.datastore.v1beta3.QueryResultBatch.entity_results', index=3, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_cursor', full_name='google.datastore.v1beta3.QueryResultBatch.end_cursor', index=4, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='more_results', full_name='google.datastore.v1beta3.QueryResultBatch.more_results', index=5, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _QUERYRESULTBATCH_MORERESULTSTYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2209, - serialized_end=2676, -) - -_ENTITYRESULT.fields_by_name['entity'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._ENTITY -_ENTITYRESULT_RESULTTYPE.containing_type = _ENTITYRESULT -_QUERY.fields_by_name['projection'].message_type = _PROJECTION -_QUERY.fields_by_name['kind'].message_type = _KINDEXPRESSION -_QUERY.fields_by_name['filter'].message_type = _FILTER -_QUERY.fields_by_name['order'].message_type = _PROPERTYORDER -_QUERY.fields_by_name['distinct_on'].message_type = _PROPERTYREFERENCE -_QUERY.fields_by_name['limit'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT32VALUE -_PROJECTION.fields_by_name['property'].message_type = _PROPERTYREFERENCE -_PROPERTYORDER.fields_by_name['property'].message_type = _PROPERTYREFERENCE -_PROPERTYORDER.fields_by_name['direction'].enum_type = _PROPERTYORDER_DIRECTION -_PROPERTYORDER_DIRECTION.containing_type = _PROPERTYORDER -_FILTER.fields_by_name['composite_filter'].message_type = _COMPOSITEFILTER -_FILTER.fields_by_name['property_filter'].message_type = _PROPERTYFILTER -_FILTER.oneofs_by_name['filter_type'].fields.append( - _FILTER.fields_by_name['composite_filter']) -_FILTER.fields_by_name['composite_filter'].containing_oneof = _FILTER.oneofs_by_name['filter_type'] -_FILTER.oneofs_by_name['filter_type'].fields.append( - _FILTER.fields_by_name['property_filter']) -_FILTER.fields_by_name['property_filter'].containing_oneof = _FILTER.oneofs_by_name['filter_type'] -_COMPOSITEFILTER.fields_by_name['op'].enum_type = _COMPOSITEFILTER_OPERATOR -_COMPOSITEFILTER.fields_by_name['filters'].message_type = _FILTER -_COMPOSITEFILTER_OPERATOR.containing_type = _COMPOSITEFILTER -_PROPERTYFILTER.fields_by_name['property'].message_type = _PROPERTYREFERENCE -_PROPERTYFILTER.fields_by_name['op'].enum_type = _PROPERTYFILTER_OPERATOR -_PROPERTYFILTER.fields_by_name['value'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._VALUE -_PROPERTYFILTER_OPERATOR.containing_type = _PROPERTYFILTER -_GQLQUERY_NAMEDBINDINGSENTRY.fields_by_name['value'].message_type = _GQLQUERYPARAMETER -_GQLQUERY_NAMEDBINDINGSENTRY.containing_type = _GQLQUERY -_GQLQUERY.fields_by_name['named_bindings'].message_type = _GQLQUERY_NAMEDBINDINGSENTRY -_GQLQUERY.fields_by_name['positional_bindings'].message_type = _GQLQUERYPARAMETER -_GQLQUERYPARAMETER.fields_by_name['value'].message_type = google_dot_datastore_dot_v1beta3_dot_entity__pb2._VALUE -_GQLQUERYPARAMETER.oneofs_by_name['parameter_type'].fields.append( - _GQLQUERYPARAMETER.fields_by_name['value']) -_GQLQUERYPARAMETER.fields_by_name['value'].containing_oneof = _GQLQUERYPARAMETER.oneofs_by_name['parameter_type'] -_GQLQUERYPARAMETER.oneofs_by_name['parameter_type'].fields.append( - _GQLQUERYPARAMETER.fields_by_name['cursor']) -_GQLQUERYPARAMETER.fields_by_name['cursor'].containing_oneof = _GQLQUERYPARAMETER.oneofs_by_name['parameter_type'] -_QUERYRESULTBATCH.fields_by_name['entity_result_type'].enum_type = _ENTITYRESULT_RESULTTYPE -_QUERYRESULTBATCH.fields_by_name['entity_results'].message_type = _ENTITYRESULT -_QUERYRESULTBATCH.fields_by_name['more_results'].enum_type = _QUERYRESULTBATCH_MORERESULTSTYPE -_QUERYRESULTBATCH_MORERESULTSTYPE.containing_type = _QUERYRESULTBATCH -DESCRIPTOR.message_types_by_name['EntityResult'] = _ENTITYRESULT -DESCRIPTOR.message_types_by_name['Query'] = _QUERY -DESCRIPTOR.message_types_by_name['KindExpression'] = _KINDEXPRESSION -DESCRIPTOR.message_types_by_name['PropertyReference'] = _PROPERTYREFERENCE -DESCRIPTOR.message_types_by_name['Projection'] = _PROJECTION -DESCRIPTOR.message_types_by_name['PropertyOrder'] = _PROPERTYORDER -DESCRIPTOR.message_types_by_name['Filter'] = _FILTER -DESCRIPTOR.message_types_by_name['CompositeFilter'] = _COMPOSITEFILTER -DESCRIPTOR.message_types_by_name['PropertyFilter'] = _PROPERTYFILTER -DESCRIPTOR.message_types_by_name['GqlQuery'] = _GQLQUERY -DESCRIPTOR.message_types_by_name['GqlQueryParameter'] = _GQLQUERYPARAMETER -DESCRIPTOR.message_types_by_name['QueryResultBatch'] = _QUERYRESULTBATCH - -EntityResult = _reflection.GeneratedProtocolMessageType('EntityResult', (_message.Message,), dict( - DESCRIPTOR = _ENTITYRESULT, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.EntityResult) - )) -_sym_db.RegisterMessage(EntityResult) - -Query = _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), dict( - DESCRIPTOR = _QUERY, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Query) - )) -_sym_db.RegisterMessage(Query) - -KindExpression = _reflection.GeneratedProtocolMessageType('KindExpression', (_message.Message,), dict( - DESCRIPTOR = _KINDEXPRESSION, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.KindExpression) - )) -_sym_db.RegisterMessage(KindExpression) - -PropertyReference = _reflection.GeneratedProtocolMessageType('PropertyReference', (_message.Message,), dict( - DESCRIPTOR = _PROPERTYREFERENCE, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.PropertyReference) - )) -_sym_db.RegisterMessage(PropertyReference) - -Projection = _reflection.GeneratedProtocolMessageType('Projection', (_message.Message,), dict( - DESCRIPTOR = _PROJECTION, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Projection) - )) -_sym_db.RegisterMessage(Projection) - -PropertyOrder = _reflection.GeneratedProtocolMessageType('PropertyOrder', (_message.Message,), dict( - DESCRIPTOR = _PROPERTYORDER, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.PropertyOrder) - )) -_sym_db.RegisterMessage(PropertyOrder) - -Filter = _reflection.GeneratedProtocolMessageType('Filter', (_message.Message,), dict( - DESCRIPTOR = _FILTER, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.Filter) - )) -_sym_db.RegisterMessage(Filter) - -CompositeFilter = _reflection.GeneratedProtocolMessageType('CompositeFilter', (_message.Message,), dict( - DESCRIPTOR = _COMPOSITEFILTER, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.CompositeFilter) - )) -_sym_db.RegisterMessage(CompositeFilter) - -PropertyFilter = _reflection.GeneratedProtocolMessageType('PropertyFilter', (_message.Message,), dict( - DESCRIPTOR = _PROPERTYFILTER, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.PropertyFilter) - )) -_sym_db.RegisterMessage(PropertyFilter) - -GqlQuery = _reflection.GeneratedProtocolMessageType('GqlQuery', (_message.Message,), dict( - - NamedBindingsEntry = _reflection.GeneratedProtocolMessageType('NamedBindingsEntry', (_message.Message,), dict( - DESCRIPTOR = _GQLQUERY_NAMEDBINDINGSENTRY, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.GqlQuery.NamedBindingsEntry) - )) - , - DESCRIPTOR = _GQLQUERY, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.GqlQuery) - )) -_sym_db.RegisterMessage(GqlQuery) -_sym_db.RegisterMessage(GqlQuery.NamedBindingsEntry) - -GqlQueryParameter = _reflection.GeneratedProtocolMessageType('GqlQueryParameter', (_message.Message,), dict( - DESCRIPTOR = _GQLQUERYPARAMETER, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.GqlQueryParameter) - )) -_sym_db.RegisterMessage(GqlQueryParameter) - -QueryResultBatch = _reflection.GeneratedProtocolMessageType('QueryResultBatch', (_message.Message,), dict( - DESCRIPTOR = _QUERYRESULTBATCH, - __module__ = 'google.datastore.v1beta3.query_pb2' - # @@protoc_insertion_point(class_scope:google.datastore.v1beta3.QueryResultBatch) - )) -_sym_db.RegisterMessage(QueryResultBatch) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\034com.google.datastore.v1beta3B\nQueryProtoP\001') -_GQLQUERY_NAMEDBINDINGSENTRY.has_options = True -_GQLQUERY_NAMEDBINDINGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/datastore/batch.py b/gcloud/datastore/batch.py deleted file mode 100644 index 5d4fc01b1442..000000000000 --- a/gcloud/datastore/batch.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with a batch of updates / deletes. - -Batches provide the ability to execute multiple operations -in a single request to the Cloud Datastore API. - -See -https://cloud.google.com/datastore/docs/concepts/entities#Datastore_Batch_operations -""" - -from gcloud.datastore import helpers -from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2 - - -class Batch(object): - """An abstraction representing a collected group of updates / deletes. - - Used to build up a bulk mutuation. - - For example, the following snippet of code will put the two ``save`` - operations and the ``delete`` operation into the same mutation, and send - them to the server in a single API request:: - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> batch = client.batch() - >>> batch.put(entity1) - >>> batch.put(entity2) - >>> batch.delete(key3) - >>> batch.commit() - - You can also use a batch as a context manager, in which case - :meth:`commit` will be called automatically if its block exits without - raising an exception:: - - >>> with batch: - ... batch.put(entity1) - ... batch.put(entity2) - ... batch.delete(key3) - - By default, no updates will be sent if the block exits with an error:: - - >>> with batch: - ... do_some_work(batch) - ... raise Exception() # rolls back - - :type client: :class:`gcloud.datastore.client.Client` - :param client: The client used to connect to datastore. - """ - - _id = None # "protected" attribute, always None for non-transactions - - _INITIAL = 0 - """Enum value for _INITIAL status of batch/transaction.""" - - _IN_PROGRESS = 1 - """Enum value for _IN_PROGRESS status of batch/transaction.""" - - _ABORTED = 2 - """Enum value for _ABORTED status of batch/transaction.""" - - _FINISHED = 3 - """Enum value for _FINISHED status of batch/transaction.""" - - def __init__(self, client): - self._client = client - self._commit_request = _datastore_pb2.CommitRequest() - self._partial_key_entities = [] - self._status = self._INITIAL - - def current(self): - """Return the topmost batch / transaction, or None.""" - return self._client.current_batch - - @property - def project(self): - """Getter for project in which the batch will run. - - :rtype: :class:`str` - :returns: The project in which the batch will run. - """ - return self._client.project - - @property - def namespace(self): - """Getter for namespace in which the batch will run. - - :rtype: :class:`str` - :returns: The namespace in which the batch will run. - """ - return self._client.namespace - - @property - def connection(self): - """Getter for connection over which the batch will run. - - :rtype: :class:`gcloud.datastore.connection.Connection` - :returns: The connection over which the batch will run. - """ - return self._client.connection - - def _add_partial_key_entity_pb(self): - """Adds a new mutation for an entity with a partial key. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :returns: The newly created entity protobuf that will be - updated and sent with a commit. - """ - new_mutation = self.mutations.add() - return new_mutation.insert - - def _add_complete_key_entity_pb(self): - """Adds a new mutation for an entity with a completed key. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :returns: The newly created entity protobuf that will be - updated and sent with a commit. - """ - # We use ``upsert`` for entities with completed keys, rather than - # ``insert`` or ``update``, in order not to create race conditions - # based on prior existence / removal of the entity. - new_mutation = self.mutations.add() - return new_mutation.upsert - - def _add_delete_key_pb(self): - """Adds a new mutation for a key to be deleted. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Key` - :returns: The newly created key protobuf that will be - deleted when sent with a commit. - """ - new_mutation = self.mutations.add() - return new_mutation.delete - - @property - def mutations(self): - """Getter for the changes accumulated by this batch. - - Every batch is committed with a single commit request containing all - the work to be done as mutations. Inside a batch, calling :meth:`put` - with an entity, or :meth:`delete` with a key, builds up the request by - adding a new mutation. This getter returns the protobuf that has been - built-up so far. - - :rtype: iterable - :returns: The list of :class:`._generated.datastore_pb2.Mutation` - protobufs to be sent in the commit request. - """ - return self._commit_request.mutations - - def put(self, entity): - """Remember an entity's state to be saved during :meth:`commit`. - - .. note:: - Any existing properties for the entity will be replaced by those - currently set on this instance. Already-stored properties which do - not correspond to keys set on this instance will be removed from - the datastore. - - .. note:: - Property values which are "text" ('unicode' in Python2, 'str' in - Python3) map to 'string_value' in the datastore; values which are - "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. - - When an entity has a partial key, calling :meth:`commit` sends it as - an ``insert`` mutation and the key is completed. On return, - the key for the ``entity`` passed in is updated to match the key ID - assigned by the server. - - :type entity: :class:`gcloud.datastore.entity.Entity` - :param entity: the entity to be saved. - - :raises: ValueError if entity has no key assigned, or if the key's - ``project`` does not match ours. - """ - if entity.key is None: - raise ValueError("Entity must have a key") - - if self.project != entity.key.project: - raise ValueError("Key must be from same project as batch") - - if entity.key.is_partial: - entity_pb = self._add_partial_key_entity_pb() - self._partial_key_entities.append(entity) - else: - entity_pb = self._add_complete_key_entity_pb() - - _assign_entity_to_pb(entity_pb, entity) - - def delete(self, key): - """Remember a key to be deleted during :meth:`commit`. - - :type key: :class:`gcloud.datastore.key.Key` - :param key: the key to be deleted. - - :raises: ValueError if key is not complete, or if the key's - ``project`` does not match ours. - """ - if key.is_partial: - raise ValueError("Key must be complete") - - if self.project != key.project: - raise ValueError("Key must be from same project as batch") - - key_pb = key.to_protobuf() - self._add_delete_key_pb().CopyFrom(key_pb) - - def begin(self): - """Begins a batch. - - This method is called automatically when entering a with - statement, however it can be called explicitly if you don't want - to use a context manager. - - Overridden by :class:`gcloud.datastore.transaction.Transaction`. - - :raises: :class:`ValueError` if the batch has already begun. - """ - if self._status != self._INITIAL: - raise ValueError('Batch already started previously.') - self._status = self._IN_PROGRESS - - def _commit(self): - """Commits the batch. - - This is called by :meth:`commit`. - """ - # NOTE: ``self._commit_request`` will be modified. - _, updated_keys = self.connection.commit( - self.project, self._commit_request, self._id) - # If the back-end returns without error, we are guaranteed that - # :meth:`Connection.commit` will return keys that match (length and - # order) directly ``_partial_key_entities``. - for new_key_pb, entity in zip(updated_keys, - self._partial_key_entities): - new_id = new_key_pb.path[-1].id - entity.key = entity.key.completed_key(new_id) - - def commit(self): - """Commits the batch. - - This is called automatically upon exiting a with statement, - however it can be called explicitly if you don't want to use a - context manager. - """ - try: - self._commit() - finally: - self._status = self._FINISHED - - def rollback(self): - """Rolls back the current batch. - - Marks the batch as aborted (can't be used again). - - Overridden by :class:`gcloud.datastore.transaction.Transaction`. - """ - self._status = self._ABORTED - - def __enter__(self): - self._client._push_batch(self) - self.begin() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - if exc_type is None: - self.commit() - else: - self.rollback() - finally: - self._client._pop_batch() - - -def _assign_entity_to_pb(entity_pb, entity): - """Copy ``entity`` into ``entity_pb``. - - Helper method for ``Batch.put``. - - :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :param entity_pb: The entity owned by a mutation. - - :type entity: :class:`gcloud.datastore.entity.Entity` - :param entity: The entity being updated within the batch / transaction. - """ - bare_entity_pb = helpers.entity_to_protobuf(entity) - bare_entity_pb.key.CopyFrom(bare_entity_pb.key) - entity_pb.CopyFrom(bare_entity_pb) diff --git a/gcloud/datastore/client.py b/gcloud/datastore/client.py deleted file mode 100644 index 1e9b40d6a8bf..000000000000 --- a/gcloud/datastore/client.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Convenience wrapper for invoking APIs/factories w/ a project.""" - -import os - -from gcloud._helpers import _LocalStack -from gcloud._helpers import _determine_default_project as _base_default_project -from gcloud.client import _ClientProjectMixin -from gcloud.client import Client as _BaseClient -from gcloud.datastore import helpers -from gcloud.datastore.connection import Connection -from gcloud.datastore.batch import Batch -from gcloud.datastore.entity import Entity -from gcloud.datastore.key import Key -from gcloud.datastore.query import Query -from gcloud.datastore.transaction import Transaction -from gcloud.environment_vars import GCD_DATASET - - -_MAX_LOOPS = 128 -"""Maximum number of iterations to wait for deferred keys.""" - - -def _get_gcd_project(): - """Gets the GCD application ID if it can be inferred.""" - return os.getenv(GCD_DATASET) - - -def _determine_default_project(project=None): - """Determine default project explicitly or implicitly as fall-back. - - In implicit case, supports four environments. In order of precedence, the - implicit environments are: - - * DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing) - * GCLOUD_PROJECT environment variable - * Google App Engine application ID - * Google Compute Engine project ID (from metadata server) - - :type project: string - :param project: Optional. The project to use as default. - - :rtype: string or ``NoneType`` - :returns: Default project if it can be determined. - """ - if project is None: - project = _get_gcd_project() - - if project is None: - project = _base_default_project(project=project) - - return project - - -def _extended_lookup(connection, project, key_pbs, - missing=None, deferred=None, - eventual=False, transaction_id=None): - """Repeat lookup until all keys found (unless stop requested). - - Helper function for :meth:`Client.get_multi`. - - :type connection: :class:`gcloud.datastore.connection.Connection` - :param connection: The connection used to connect to datastore. - - :type project: string - :param project: The project to make the request for. - - :type key_pbs: list of :class:`gcloud.datastore._generated.entity_pb2.Key` - :param key_pbs: The keys to retrieve from the datastore. - - :type missing: list - :param missing: (Optional) If a list is passed, the key-only entity - protobufs returned by the backend as "missing" will be - copied into it. - - :type deferred: list - :param deferred: (Optional) If a list is passed, the key protobufs returned - by the backend as "deferred" will be copied into it. - - :type eventual: bool - :param eventual: If False (the default), request ``STRONG`` read - consistency. If True, request ``EVENTUAL`` read - consistency. - - :type transaction_id: string - :param transaction_id: If passed, make the request in the scope of - the given transaction. Incompatible with - ``eventual==True``. - - :rtype: list of :class:`gcloud.datastore._generated.entity_pb2.Entity` - :returns: The requested entities. - :raises: :class:`ValueError` if missing / deferred are not null or - empty list. - """ - if missing is not None and missing != []: - raise ValueError('missing must be None or an empty list') - - if deferred is not None and deferred != []: - raise ValueError('deferred must be None or an empty list') - - results = [] - - loop_num = 0 - while loop_num < _MAX_LOOPS: # loop against possible deferred. - loop_num += 1 - - results_found, missing_found, deferred_found = connection.lookup( - project=project, - key_pbs=key_pbs, - eventual=eventual, - transaction_id=transaction_id, - ) - - results.extend(results_found) - - if missing is not None: - missing.extend(missing_found) - - if deferred is not None: - deferred.extend(deferred_found) - break - - if len(deferred_found) == 0: - break - - # We have deferred keys, and the user didn't ask to know about - # them, so retry (but only with the deferred ones). - key_pbs = deferred_found - - return results - - -class Client(_BaseClient, _ClientProjectMixin): - """Convenience wrapper for invoking APIs/factories w/ a project. - - :type project: string - :param project: (optional) The project to pass to proxied API methods. - - :type namespace: string - :param namespace: (optional) namespace to pass to proxied API methods. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - _connection_class = Connection - - def __init__(self, project=None, namespace=None, - credentials=None, http=None): - _ClientProjectMixin.__init__(self, project=project) - self.namespace = namespace - self._batch_stack = _LocalStack() - super(Client, self).__init__(credentials, http) - - @staticmethod - def _determine_default(project): - return _determine_default_project(project) - - def _push_batch(self, batch): - """Push a batch/transaction onto our stack. - - "Protected", intended for use by batch / transaction context mgrs. - - :type batch: :class:`gcloud.datastore.batch.Batch`, or an object - implementing its API. - :param batch: newly-active batch/transaction. - """ - self._batch_stack.push(batch) - - def _pop_batch(self): - """Pop a batch/transaction from our stack. - - "Protected", intended for use by batch / transaction context mgrs. - - :raises: IndexError if the stack is empty. - :rtype: :class:`gcloud.datastore.batch.Batch`, or an object - implementing its API. - :returns: the top-most batch/transaction, after removing it. - """ - return self._batch_stack.pop() - - @property - def current_batch(self): - """Currently-active batch. - - :rtype: :class:`gcloud.datastore.batch.Batch`, or an object - implementing its API, or ``NoneType`` (if no batch is active). - :returns: The batch/transaction at the top of the batch stack. - """ - return self._batch_stack.top - - @property - def current_transaction(self): - """Currently-active transaction. - - :rtype: :class:`gcloud.datastore.transaction.Transaction`, or an object - implementing its API, or ``NoneType`` (if no transaction is - active). - :returns: The transaction at the top of the batch stack. - """ - transaction = self.current_batch - if isinstance(transaction, Transaction): - return transaction - - def get(self, key, missing=None, deferred=None): - """Retrieve an entity from a single key (if it exists). - - .. note:: - - This is just a thin wrapper over :meth:`get_multi`. - The backend API does not make a distinction between a single key or - multiple keys in a lookup request. - - :type key: :class:`gcloud.datastore.key.Key` - :param key: The key to be retrieved from the datastore. - - :type missing: list - :param missing: (Optional) If a list is passed, the key-only entities - returned by the backend as "missing" will be copied - into it. - - :type deferred: list - :param deferred: (Optional) If a list is passed, the keys returned - by the backend as "deferred" will be copied into it. - - :rtype: :class:`gcloud.datastore.entity.Entity` or ``NoneType`` - :returns: The requested entity if it exists. - """ - entities = self.get_multi(keys=[key], missing=missing, - deferred=deferred) - if entities: - return entities[0] - - def get_multi(self, keys, missing=None, deferred=None): - """Retrieve entities, along with their attributes. - - :type keys: list of :class:`gcloud.datastore.key.Key` - :param keys: The keys to be retrieved from the datastore. - - :type missing: list - :param missing: (Optional) If a list is passed, the key-only entities - returned by the backend as "missing" will be copied - into it. If the list is not empty, an error will occur. - - :type deferred: list - :param deferred: (Optional) If a list is passed, the keys returned - by the backend as "deferred" will be copied into it. - If the list is not empty, an error will occur. - - :rtype: list of :class:`gcloud.datastore.entity.Entity` - :returns: The requested entities. - :raises: :class:`ValueError` if one or more of ``keys`` has a project - which does not match our project. - """ - if not keys: - return [] - - ids = set(key.project for key in keys) - for current_id in ids: - if current_id != self.project: - raise ValueError('Keys do not match project') - - transaction = self.current_transaction - - entity_pbs = _extended_lookup( - connection=self.connection, - project=self.project, - key_pbs=[k.to_protobuf() for k in keys], - missing=missing, - deferred=deferred, - transaction_id=transaction and transaction.id, - ) - - if missing is not None: - missing[:] = [ - helpers.entity_from_protobuf(missed_pb) - for missed_pb in missing] - - if deferred is not None: - deferred[:] = [ - helpers.key_from_protobuf(deferred_pb) - for deferred_pb in deferred] - - return [helpers.entity_from_protobuf(entity_pb) - for entity_pb in entity_pbs] - - def put(self, entity): - """Save an entity in the Cloud Datastore. - - .. note:: - - This is just a thin wrapper over :meth:`put_multi`. - The backend API does not make a distinction between a single - entity or multiple entities in a commit request. - - :type entity: :class:`gcloud.datastore.entity.Entity` - :param entity: The entity to be saved to the datastore. - """ - self.put_multi(entities=[entity]) - - def put_multi(self, entities): - """Save entities in the Cloud Datastore. - - :type entities: list of :class:`gcloud.datastore.entity.Entity` - :param entities: The entities to be saved to the datastore. - - :raises: :class:`ValueError` if ``entities`` is a single entity. - """ - if isinstance(entities, Entity): - raise ValueError("Pass a sequence of entities") - - if not entities: - return - - current = self.current_batch - in_batch = current is not None - - if not in_batch: - current = self.batch() - - for entity in entities: - current.put(entity) - - if not in_batch: - current.commit() - - def delete(self, key): - """Delete the key in the Cloud Datastore. - - .. note:: - - This is just a thin wrapper over :meth:`delete_multi`. - The backend API does not make a distinction between a single key or - multiple keys in a commit request. - - :type key: :class:`gcloud.datastore.key.Key` - :param key: The key to be deleted from the datastore. - """ - return self.delete_multi(keys=[key]) - - def delete_multi(self, keys): - """Delete keys from the Cloud Datastore. - - :type keys: list of :class:`gcloud.datastore.key.Key` - :param keys: The keys to be deleted from the datastore. - """ - if not keys: - return - - # We allow partial keys to attempt a delete, the backend will fail. - current = self.current_batch - in_batch = current is not None - - if not in_batch: - current = self.batch() - - for key in keys: - current.delete(key) - - if not in_batch: - current.commit() - - def allocate_ids(self, incomplete_key, num_ids): - """Allocate a list of IDs from a partial key. - - :type incomplete_key: :class:`gcloud.datastore.key.Key` - :param incomplete_key: Partial key to use as base for allocated IDs. - - :type num_ids: int - :param num_ids: The number of IDs to allocate. - - :rtype: list of :class:`gcloud.datastore.key.Key` - :returns: The (complete) keys allocated with ``incomplete_key`` as - root. - :raises: :class:`ValueError` if ``incomplete_key`` is not a - partial key. - """ - if not incomplete_key.is_partial: - raise ValueError(('Key is not partial.', incomplete_key)) - - incomplete_key_pb = incomplete_key.to_protobuf() - incomplete_key_pbs = [incomplete_key_pb] * num_ids - - conn = self.connection - allocated_key_pbs = conn.allocate_ids(incomplete_key.project, - incomplete_key_pbs) - allocated_ids = [allocated_key_pb.path[-1].id - for allocated_key_pb in allocated_key_pbs] - return [incomplete_key.completed_key(allocated_id) - for allocated_id in allocated_ids] - - def key(self, *path_args, **kwargs): - """Proxy to :class:`gcloud.datastore.key.Key`. - - Passes our ``project``. - """ - if 'project' in kwargs: - raise TypeError('Cannot pass project') - kwargs['project'] = self.project - if 'namespace' not in kwargs: - kwargs['namespace'] = self.namespace - return Key(*path_args, **kwargs) - - def batch(self): - """Proxy to :class:`gcloud.datastore.batch.Batch`.""" - return Batch(self) - - def transaction(self): - """Proxy to :class:`gcloud.datastore.transaction.Transaction`.""" - return Transaction(self) - - def query(self, **kwargs): - """Proxy to :class:`gcloud.datastore.query.Query`. - - Passes our ``project``. - """ - if 'client' in kwargs: - raise TypeError('Cannot pass client') - if 'project' in kwargs: - raise TypeError('Cannot pass project') - kwargs['project'] = self.project - if 'namespace' not in kwargs: - kwargs['namespace'] = self.namespace - return Query(self, **kwargs) diff --git a/gcloud/datastore/connection.py b/gcloud/datastore/connection.py deleted file mode 100644 index d949b696b88d..000000000000 --- a/gcloud/datastore/connection.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Connections to gcloud datastore API servers.""" - -import os - -from gcloud import connection -from gcloud.environment_vars import GCD_HOST -from gcloud.exceptions import make_exception -from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2 -from google.rpc import status_pb2 - - -class Connection(connection.Connection): - """A connection to the Google Cloud Datastore via the Protobuf API. - - This class should understand only the basic types (and protobufs) - in method arguments, however should be capable of returning advanced types. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` - :param credentials: The OAuth2 Credentials to use for this connection. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. - - :type api_base_url: string - :param api_base_url: The base of the API call URL. Defaults to - :attr:`API_BASE_URL`. - """ - - API_BASE_URL = 'https://datastore.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v1beta3' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = ('{api_base}/{api_version}/projects' - '/{project}:{method}') - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/datastore',) - """The scopes required for authenticating as a Cloud Datastore consumer.""" - - def __init__(self, credentials=None, http=None, api_base_url=None): - super(Connection, self).__init__(credentials=credentials, http=http) - if api_base_url is None: - try: - # gcd.sh has /datastore/ in the path still since it supports - # v1beta2 and v1beta3 simultaneously. - api_base_url = '%s/datastore' % (os.environ[GCD_HOST],) - except KeyError: - api_base_url = self.__class__.API_BASE_URL - self.api_base_url = api_base_url - - def _request(self, project, method, data): - """Make a request over the Http transport to the Cloud Datastore API. - - :type project: string - :param project: The project to make the request for. - - :type method: string - :param method: The API call method name (ie, ``runQuery``, - ``lookup``, etc) - - :type data: string - :param data: The data to send with the API call. - Typically this is a serialized Protobuf string. - - :rtype: string - :returns: The string response content from the API call. - :raises: :class:`gcloud.exceptions.GCloudError` if the response - code is not 200 OK. - """ - headers = { - 'Content-Type': 'application/x-protobuf', - 'Content-Length': str(len(data)), - 'User-Agent': self.USER_AGENT, - } - headers, content = self.http.request( - uri=self.build_api_url(project=project, method=method), - method='POST', headers=headers, body=data) - - status = headers['status'] - if status != '200': - error_status = status_pb2.Status.FromString(content) - raise make_exception(headers, error_status.message, use_json=False) - - return content - - def _rpc(self, project, method, request_pb, response_pb_cls): - """Make a protobuf RPC request. - - :type project: string - :param project: The project to connect to. This is - usually your project name in the cloud console. - - :type method: string - :param method: The name of the method to invoke. - - :type request_pb: :class:`google.protobuf.message.Message` instance - :param request_pb: the protobuf instance representing the request. - - :type response_pb_cls: A :class:`google.protobuf.message.Message' - subclass. - :param response_pb_cls: The class used to unmarshall the response - protobuf. - """ - response = self._request(project=project, method=method, - data=request_pb.SerializeToString()) - return response_pb_cls.FromString(response) - - def build_api_url(self, project, method, base_url=None, - api_version=None): - """Construct the URL for a particular API call. - - This method is used internally to come up with the URL to use when - making RPCs to the Cloud Datastore API. - - :type project: string - :param project: The project to connect to. This is - usually your project name in the cloud console. - - :type method: string - :param method: The API method to call (e.g. 'runQuery', 'lookup'). - - :type base_url: string - :param base_url: The base URL where the API lives. - You shouldn't have to provide this. - - :type api_version: string - :param api_version: The version of the API to connect to. - You shouldn't have to provide this. - """ - return self.API_URL_TEMPLATE.format( - api_base=(base_url or self.api_base_url), - api_version=(api_version or self.API_VERSION), - project=project, method=method) - - def lookup(self, project, key_pbs, - eventual=False, transaction_id=None): - """Lookup keys from a project in the Cloud Datastore. - - Maps the ``DatastoreService.Lookup`` protobuf RPC. - - This uses mostly protobufs - (:class:`gcloud.datastore._generated.entity_pb2.Key` as input and - :class:`gcloud.datastore._generated.entity_pb2.Entity` as output). It - is used under the hood in - :meth:`Client.get() <.datastore.client.Client.get>`: - - >>> from gcloud import datastore - >>> client = datastore.Client(project='project') - >>> key = client.key('MyKind', 1234) - >>> client.get(key) - [] - - Using a :class:`Connection` directly: - - >>> connection.lookup('project', [key.to_protobuf()]) - [] - - :type project: string - :param project: The project to look up the keys in. - - :type key_pbs: list of - :class:`gcloud.datastore._generated.entity_pb2.Key` - :param key_pbs: The keys to retrieve from the datastore. - - :type eventual: bool - :param eventual: If False (the default), request ``STRONG`` read - consistency. If True, request ``EVENTUAL`` read - consistency. - - :type transaction_id: string - :param transaction_id: If passed, make the request in the scope of - the given transaction. Incompatible with - ``eventual==True``. - - :rtype: tuple - :returns: A triple of (``results``, ``missing``, ``deferred``) where - both ``results`` and ``missing`` are lists of - :class:`gcloud.datastore._generated.entity_pb2.Entity` and - ``deferred`` is a list of - :class:`gcloud.datastore._generated.entity_pb2.Key`. - """ - lookup_request = _datastore_pb2.LookupRequest() - _set_read_options(lookup_request, eventual, transaction_id) - _add_keys_to_request(lookup_request.keys, key_pbs) - - lookup_response = self._rpc(project, 'lookup', lookup_request, - _datastore_pb2.LookupResponse) - - results = [result.entity for result in lookup_response.found] - missing = [result.entity for result in lookup_response.missing] - - return results, missing, list(lookup_response.deferred) - - def run_query(self, project, query_pb, namespace=None, - eventual=False, transaction_id=None): - """Run a query on the Cloud Datastore. - - Maps the ``DatastoreService.RunQuery`` protobuf RPC. - - Given a Query protobuf, sends a ``runQuery`` request to the - Cloud Datastore API and returns a list of entity protobufs - matching the query. - - You typically wouldn't use this method directly, in favor of the - :meth:`gcloud.datastore.query.Query.fetch` method. - - Under the hood, the :class:`gcloud.datastore.query.Query` class - uses this method to fetch data: - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> query = client.query(kind='MyKind') - >>> query.add_filter('property', '=', 'val') - - Using the query iterator's - :meth:`next_page() <.datastore.query.Iterator.next_page>` method: - - >>> query_iter = query.fetch() - >>> entities, more_results, cursor = query_iter.next_page() - >>> entities - [] - >>> more_results - - >>> cursor - - - Under the hood this is doing: - - >>> connection.run_query('project', query.to_protobuf()) - [], cursor, more_results, skipped_results - - :type project: string - :param project: The project over which to run the query. - - :type query_pb: :class:`gcloud.datastore._generated.query_pb2.Query` - :param query_pb: The Protobuf representing the query to run. - - :type namespace: string - :param namespace: The namespace over which to run the query. - - :type eventual: bool - :param eventual: If False (the default), request ``STRONG`` read - consistency. If True, request ``EVENTUAL`` read - consistency. - - :type transaction_id: string - :param transaction_id: If passed, make the request in the scope of - the given transaction. Incompatible with - ``eventual==True``. - """ - request = _datastore_pb2.RunQueryRequest() - _set_read_options(request, eventual, transaction_id) - - if namespace: - request.partition_id.namespace_id = namespace - - request.query.CopyFrom(query_pb) - response = self._rpc(project, 'runQuery', request, - _datastore_pb2.RunQueryResponse) - return ( - [e.entity for e in response.batch.entity_results], - response.batch.end_cursor, # Assume response always has cursor. - response.batch.more_results, - response.batch.skipped_results, - ) - - def begin_transaction(self, project): - """Begin a transaction. - - Maps the ``DatastoreService.BeginTransaction`` protobuf RPC. - - :type project: string - :param project: The project to which the transaction applies. - - :rtype: bytes - :returns: The serialized transaction that was begun. - """ - request = _datastore_pb2.BeginTransactionRequest() - response = self._rpc(project, 'beginTransaction', request, - _datastore_pb2.BeginTransactionResponse) - return response.transaction - - def commit(self, project, request, transaction_id): - """Commit mutations in context of current transation (if any). - - Maps the ``DatastoreService.Commit`` protobuf RPC. - - :type project: string - :param project: The project to which the transaction applies. - - :type request: :class:`._generated.datastore_pb2.CommitRequest` - :param request: The protobuf with the mutations being committed. - - :type transaction_id: string or None - :param transaction_id: The transaction ID returned from - :meth:`begin_transaction`. Non-transactional - batches must pass ``None``. - - .. note:: - - This method will mutate ``request`` before using it. - - :rtype: tuple - :returns': The pair of the number of index updates and a list of - :class:`._generated.entity_pb2.Key` for each incomplete key - that was completed in the commit. - """ - if transaction_id: - request.mode = _datastore_pb2.CommitRequest.TRANSACTIONAL - request.transaction = transaction_id - else: - request.mode = _datastore_pb2.CommitRequest.NON_TRANSACTIONAL - - response = self._rpc(project, 'commit', request, - _datastore_pb2.CommitResponse) - return _parse_commit_response(response) - - def rollback(self, project, transaction_id): - """Rollback the connection's existing transaction. - - Maps the ``DatastoreService.Rollback`` protobuf RPC. - - :type project: string - :param project: The project to which the transaction belongs. - - :type transaction_id: string - :param transaction_id: The transaction ID returned from - :meth:`begin_transaction`. - """ - request = _datastore_pb2.RollbackRequest() - request.transaction = transaction_id - # Nothing to do with this response, so just execute the method. - self._rpc(project, 'rollback', request, - _datastore_pb2.RollbackResponse) - - def allocate_ids(self, project, key_pbs): - """Obtain backend-generated IDs for a set of keys. - - Maps the ``DatastoreService.AllocateIds`` protobuf RPC. - - :type project: string - :param project: The project to which the transaction belongs. - - :type key_pbs: list of - :class:`gcloud.datastore._generated.entity_pb2.Key` - :param key_pbs: The keys for which the backend should allocate IDs. - - :rtype: list of :class:`gcloud.datastore._generated.entity_pb2.Key` - :returns: An equal number of keys, with IDs filled in by the backend. - """ - request = _datastore_pb2.AllocateIdsRequest() - _add_keys_to_request(request.keys, key_pbs) - # Nothing to do with this response, so just execute the method. - response = self._rpc(project, 'allocateIds', request, - _datastore_pb2.AllocateIdsResponse) - return list(response.keys) - - -def _set_read_options(request, eventual, transaction_id): - """Validate rules for read options, and assign to the request. - - Helper method for ``lookup()`` and ``run_query``. - - :raises: :class:`ValueError` if ``eventual`` is ``True`` and the - ``transaction_id`` is not ``None``. - """ - if eventual and (transaction_id is not None): - raise ValueError('eventual must be False when in a transaction') - - opts = request.read_options - if eventual: - opts.read_consistency = _datastore_pb2.ReadOptions.EVENTUAL - elif transaction_id: - opts.transaction = transaction_id - - -def _add_keys_to_request(request_field_pb, key_pbs): - """Add protobuf keys to a request object. - - :type request_field_pb: `RepeatedCompositeFieldContainer` - :param request_field_pb: A repeated proto field that contains keys. - - :type key_pbs: list of :class:`gcloud.datastore._generated.entity_pb2.Key` - :param key_pbs: The keys to add to a request. - """ - for key_pb in key_pbs: - request_field_pb.add().CopyFrom(key_pb) - - -def _parse_commit_response(commit_response_pb): - """Extract response data from a commit response. - - :type commit_response_pb: :class:`._generated.datastore_pb2.CommitResponse` - :param commit_response_pb: The protobuf response from a commit request. - - :rtype: tuple - :returns': The pair of the number of index updates and a list of - :class:`._generated.entity_pb2.Key` for each incomplete key - that was completed in the commit. - """ - mut_results = commit_response_pb.mutation_results - index_updates = commit_response_pb.index_updates - completed_keys = [mut_result.key for mut_result in mut_results - if mut_result.HasField('key')] # Message field (Key) - return index_updates, completed_keys diff --git a/gcloud/datastore/entity.py b/gcloud/datastore/entity.py deleted file mode 100644 index 0d5ce4b18bcb..000000000000 --- a/gcloud/datastore/entity.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Class for representing a single entity in the Cloud Datastore.""" - - -from gcloud._helpers import _ensure_tuple_or_list - - -class Entity(dict): - """Entities are akin to rows in a relational database - - An entity storing the actual instance of data. - - Each entity is officially represented with a - :class:`gcloud.datastore.key.Key` class, however it is possible that - you might create an Entity with only a partial Key (that is, a Key - with a Kind, and possibly a parent, but without an ID). In such a - case, the datastore service will automatically assign an ID to the - partial key. - - Entities in this API act like dictionaries with extras built in that - allow you to delete or persist the data stored on the entity. - - Entities are mutable and act like a subclass of a dictionary. - This means you could take an existing entity and change the key - to duplicate the object. - - Use :func:`gcloud.datastore.get` to retrieve an existing entity. - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> client.get(key) - - - You can the set values on the entity just like you would on any - other dictionary. - - >>> entity['age'] = 20 - >>> entity['name'] = 'JJ' - >>> entity - - - And you can convert an entity to a regular Python dictionary with the - ``dict`` builtin: - - >>> dict(entity) - {'age': 20, 'name': 'JJ'} - - .. note:: - - When saving an entity to the backend, values which are "text" - (``unicode`` in Python2, ``str`` in Python3) will be saved using - the 'text_value' field, after being encoded to UTF-8. When - retrieved from the back-end, such values will be decoded to "text" - again. Values which are "bytes" (``str`` in Python2, ``bytes`` in - Python3), will be saved using the 'blob_value' field, without - any decoding / encoding step. - - :type key: :class:`gcloud.datastore.key.Key` - :param key: Optional key to be set on entity. - - :type exclude_from_indexes: tuple of string - :param exclude_from_indexes: Names of fields whose values are not to be - indexed for this entity. - """ - - def __init__(self, key=None, exclude_from_indexes=()): - super(Entity, self).__init__() - self.key = key - self._exclude_from_indexes = set(_ensure_tuple_or_list( - 'exclude_from_indexes', exclude_from_indexes)) - # NOTE: This will be populated when parsing a protobuf in - # gcloud.datastore.helpers.entity_from_protobuf. - self._meanings = {} - - def __eq__(self, other): - """Compare two entities for equality. - - Entities compare equal if their keys compare equal, and their - properties compare equal. - - :rtype: boolean - :returns: True if the entities compare equal, else False. - """ - if not isinstance(other, Entity): - return False - - return (self.key == other.key and - self._exclude_from_indexes == other._exclude_from_indexes and - self._meanings == other._meanings and - super(Entity, self).__eq__(other)) - - def __ne__(self, other): - """Compare two entities for inequality. - - Entities compare equal if their keys compare equal, and their - properties compare equal. - - :rtype: boolean - :returns: False if the entities compare equal, else True. - """ - return not self.__eq__(other) - - @property - def kind(self): - """Get the kind of the current entity. - - .. note:: - This relies entirely on the :class:`gcloud.datastore.key.Key` - set on the entity. That means that we're not storing the kind - of the entity at all, just the properties and a pointer to a - Key which knows its Kind. - """ - if self.key: - return self.key.kind - - @property - def exclude_from_indexes(self): - """Names of fields which are *not* to be indexed for this entity. - - :rtype: sequence of field names - """ - return frozenset(self._exclude_from_indexes) - - def __repr__(self): - if self.key: - return '' % (self.key.path, - super(Entity, self).__repr__()) - else: - return '' % (super(Entity, self).__repr__()) diff --git a/gcloud/datastore/helpers.py b/gcloud/datastore/helpers.py deleted file mode 100644 index 9cb73011b261..000000000000 --- a/gcloud/datastore/helpers.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for dealing with Cloud Datastore's Protobuf API. - -The non-private functions are part of the API. -""" - -import datetime - -from google.protobuf import struct_pb2 -from google.type import latlng_pb2 -import six - -from gcloud._helpers import _datetime_to_pb_timestamp -from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.datastore._generated import entity_pb2 as _entity_pb2 -from gcloud.datastore.entity import Entity -from gcloud.datastore.key import Key - -__all__ = ('entity_from_protobuf', 'key_from_protobuf') - - -def _get_meaning(value_pb, is_list=False): - """Get the meaning from a protobuf value. - - :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` - :param value_pb: The protobuf value to be checked for an - associated meaning. - - :type is_list: bool - :param is_list: Boolean indicating if the ``value_pb`` contains - a list value. - - :rtype: int - :returns: The meaning for the ``value_pb`` if one is set, else - :data:`None`. - :raises: :class:`ValueError ` if a list value - has disagreeing meanings (in sub-elements) or has some - elements with meanings and some without. - """ - meaning = None - if is_list: - # An empty list will have no values, hence no shared meaning - # set among them. - if len(value_pb.array_value.values) == 0: - return None - - # We check among all the meanings, some of which may be None, - # the rest which may be enum/int values. - all_meanings = set(_get_meaning(sub_value_pb) - for sub_value_pb in value_pb.array_value.values) - meaning = all_meanings.pop() - # The value we popped off should have been unique. If not - # then we can't handle a list with values that have more - # than one meaning. - if all_meanings: - raise ValueError('Different meanings set on values ' - 'within an array_value') - elif value_pb.meaning: # Simple field (int32) - meaning = value_pb.meaning - - return meaning - - -def _new_value_pb(entity_pb, name): - """Add (by name) a new ``Value`` protobuf to an entity protobuf. - - :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :param entity_pb: An entity protobuf to add a new property to. - - :type name: string - :param name: The name of the new property. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Value` - :returns: The new ``Value`` protobuf that was added to the entity. - """ - return entity_pb.properties.get_or_create(name) - - -def _property_tuples(entity_pb): - """Iterator of name, ``Value`` tuples from entity properties. - - :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :param entity_pb: An entity protobuf to add a new property to. - - :rtype: :class:`generator` - :returns: An iterator that yields tuples of a name and ``Value`` - corresponding to properties on the entity. - """ - return six.iteritems(entity_pb.properties) - - -def entity_from_protobuf(pb): - """Factory method for creating an entity based on a protobuf. - - The protobuf should be one returned from the Cloud Datastore - Protobuf API. - - :type pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :param pb: The Protobuf representing the entity. - - :rtype: :class:`gcloud.datastore.entity.Entity` - :returns: The entity derived from the protobuf. - """ - key = None - if pb.HasField('key'): # Message field (Key) - key = key_from_protobuf(pb.key) - - entity_props = {} - entity_meanings = {} - exclude_from_indexes = [] - - for prop_name, value_pb in _property_tuples(pb): - value = _get_value_from_value_pb(value_pb) - entity_props[prop_name] = value - - # Check if the property has an associated meaning. - is_list = isinstance(value, list) - meaning = _get_meaning(value_pb, is_list=is_list) - if meaning is not None: - entity_meanings[prop_name] = (meaning, value) - - # Check if ``value_pb`` was excluded from index. Lists need to be - # special-cased and we require all ``exclude_from_indexes`` values - # in a list agree. - if is_list: - exclude_values = set(value_pb.exclude_from_indexes - for value_pb in value_pb.array_value.values) - if len(exclude_values) != 1: - raise ValueError('For an array_value, subvalues must either ' - 'all be indexed or all excluded from ' - 'indexes.') - - if exclude_values.pop(): - exclude_from_indexes.append(prop_name) - else: - if value_pb.exclude_from_indexes: - exclude_from_indexes.append(prop_name) - - entity = Entity(key=key, exclude_from_indexes=exclude_from_indexes) - entity.update(entity_props) - entity._meanings.update(entity_meanings) - return entity - - -def entity_to_protobuf(entity): - """Converts an entity into a protobuf. - - :type entity: :class:`gcloud.datastore.entity.Entity` - :param entity: The entity to be turned into a protobuf. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` - :returns: The protobuf representing the entity. - """ - entity_pb = _entity_pb2.Entity() - if entity.key is not None: - key_pb = entity.key.to_protobuf() - entity_pb.key.CopyFrom(key_pb) - - for name, value in entity.items(): - value_is_list = isinstance(value, list) - if value_is_list and len(value) == 0: - continue - - value_pb = _new_value_pb(entity_pb, name) - # Set the appropriate value. - _set_protobuf_value(value_pb, value) - - # Add index information to protobuf. - if name in entity.exclude_from_indexes: - if not value_is_list: - value_pb.exclude_from_indexes = True - - for sub_value in value_pb.array_value.values: - sub_value.exclude_from_indexes = True - - # Add meaning information to protobuf. - if name in entity._meanings: - meaning, orig_value = entity._meanings[name] - # Only add the meaning back to the protobuf if the value is - # unchanged from when it was originally read from the API. - if orig_value is value: - # For lists, we set meaning on each sub-element. - if value_is_list: - for sub_value_pb in value_pb.array_value.values: - sub_value_pb.meaning = meaning - else: - value_pb.meaning = meaning - - return entity_pb - - -def key_from_protobuf(pb): - """Factory method for creating a key based on a protobuf. - - The protobuf should be one returned from the Cloud Datastore - Protobuf API. - - :type pb: :class:`gcloud.datastore._generated.entity_pb2.Key` - :param pb: The Protobuf representing the key. - - :rtype: :class:`gcloud.datastore.key.Key` - :returns: a new `Key` instance - """ - path_args = [] - for element in pb.path: - path_args.append(element.kind) - if element.id: # Simple field (int64) - path_args.append(element.id) - # This is safe: we expect proto objects returned will only have - # one of `name` or `id` set. - if element.name: # Simple field (string) - path_args.append(element.name) - - project = None - if pb.partition_id.project_id: # Simple field (string) - project = pb.partition_id.project_id - namespace = None - if pb.partition_id.namespace_id: # Simple field (string) - namespace = pb.partition_id.namespace_id - - return Key(*path_args, namespace=namespace, project=project) - - -def _pb_attr_value(val): - """Given a value, return the protobuf attribute name and proper value. - - The Protobuf API uses different attribute names based on value types - rather than inferring the type. This function simply determines the - proper attribute name based on the type of the value provided and - returns the attribute name as well as a properly formatted value. - - Certain value types need to be coerced into a different type (such - as a `datetime.datetime` into an integer timestamp, or a - `gcloud.datastore.key.Key` into a Protobuf representation. This - function handles that for you. - - .. note:: - Values which are "text" ('unicode' in Python2, 'str' in Python3) map - to 'string_value' in the datastore; values which are "bytes" - ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. - - For example: - - >>> _pb_attr_value(1234) - ('integer_value', 1234) - >>> _pb_attr_value('my_string') - ('string_value', 'my_string') - - :type val: `datetime.datetime`, :class:`gcloud.datastore.key.Key`, - bool, float, integer, string - :param val: The value to be scrutinized. - - :returns: A tuple of the attribute name and proper value type. - """ - - if isinstance(val, datetime.datetime): - name = 'timestamp' - value = _datetime_to_pb_timestamp(val) - elif isinstance(val, Key): - name, value = 'key', val.to_protobuf() - elif isinstance(val, bool): - name, value = 'boolean', val - elif isinstance(val, float): - name, value = 'double', val - elif isinstance(val, six.integer_types): - name, value = 'integer', val - elif isinstance(val, six.text_type): - name, value = 'string', val - elif isinstance(val, (bytes, str)): - name, value = 'blob', val - elif isinstance(val, Entity): - name, value = 'entity', val - elif isinstance(val, list): - name, value = 'array', val - elif isinstance(val, GeoPoint): - name, value = 'geo_point', val.to_protobuf() - elif val is None: - name, value = 'null', struct_pb2.NULL_VALUE - else: - raise ValueError("Unknown protobuf attr type %s" % type(val)) - - return name + '_value', value - - -def _get_value_from_value_pb(value_pb): - """Given a protobuf for a Value, get the correct value. - - The Cloud Datastore Protobuf API returns a Property Protobuf which - has one value set and the rest blank. This function retrieves the - the one value provided. - - Some work is done to coerce the return value into a more useful type - (particularly in the case of a timestamp value, or a key value). - - :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` - :param value_pb: The Value Protobuf. - - :returns: The value provided by the Protobuf. - :raises: :class:`ValueError ` if no value type - has been set. - """ - value_type = value_pb.WhichOneof('value_type') - - if value_type == 'timestamp_value': - result = _pb_timestamp_to_datetime(value_pb.timestamp_value) - - elif value_type == 'key_value': - result = key_from_protobuf(value_pb.key_value) - - elif value_type == 'boolean_value': - result = value_pb.boolean_value - - elif value_type == 'double_value': - result = value_pb.double_value - - elif value_type == 'integer_value': - result = value_pb.integer_value - - elif value_type == 'string_value': - result = value_pb.string_value - - elif value_type == 'blob_value': - result = value_pb.blob_value - - elif value_type == 'entity_value': - result = entity_from_protobuf(value_pb.entity_value) - - elif value_type == 'array_value': - result = [_get_value_from_value_pb(value) - for value in value_pb.array_value.values] - - elif value_type == 'geo_point_value': - result = GeoPoint(value_pb.geo_point_value.latitude, - value_pb.geo_point_value.longitude) - - elif value_type == 'null_value': - result = None - - else: - raise ValueError('Value protobuf did not have any value set') - - return result - - -def _set_protobuf_value(value_pb, val): - """Assign 'val' to the correct subfield of 'value_pb'. - - The Protobuf API uses different attribute names based on value types - rather than inferring the type. - - Some value types (entities, keys, lists) cannot be directly - assigned; this function handles them correctly. - - :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` - :param value_pb: The value protobuf to which the value is being assigned. - - :type val: :class:`datetime.datetime`, boolean, float, integer, string, - :class:`gcloud.datastore.key.Key`, - :class:`gcloud.datastore.entity.Entity` - :param val: The value to be assigned. - """ - attr, val = _pb_attr_value(val) - if attr == 'key_value': - value_pb.key_value.CopyFrom(val) - elif attr == 'timestamp_value': - value_pb.timestamp_value.CopyFrom(val) - elif attr == 'entity_value': - entity_pb = entity_to_protobuf(val) - value_pb.entity_value.CopyFrom(entity_pb) - elif attr == 'array_value': - l_pb = value_pb.array_value.values - for item in val: - i_pb = l_pb.add() - _set_protobuf_value(i_pb, item) - elif attr == 'geo_point_value': - value_pb.geo_point_value.CopyFrom(val) - else: # scalar, just assign - setattr(value_pb, attr, val) - - -class GeoPoint(object): - """Simple container for a geo point value. - - :type latitude: float - :param latitude: Latitude of a point. - - :type longitude: float - :param longitude: Longitude of a point. - """ - - def __init__(self, latitude, longitude): - self.latitude = latitude - self.longitude = longitude - - def to_protobuf(self): - """Convert the current object to protobuf. - - :rtype: :class:`google.type.latlng_pb2.LatLng`. - :returns: The current point as a protobuf. - """ - return latlng_pb2.LatLng(latitude=self.latitude, - longitude=self.longitude) - - def __eq__(self, other): - """Compare two geo points for equality. - - :rtype: boolean - :returns: True if the points compare equal, else False. - """ - if not isinstance(other, GeoPoint): - return False - - return (self.latitude == other.latitude and - self.longitude == other.longitude) - - def __ne__(self, other): - """Compare two geo points for inequality. - - :rtype: boolean - :returns: False if the points compare equal, else True. - """ - return not self.__eq__(other) diff --git a/gcloud/datastore/key.py b/gcloud/datastore/key.py deleted file mode 100644 index a1356dca1481..000000000000 --- a/gcloud/datastore/key.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud datastore keys.""" - -import copy -import six - -from gcloud.datastore._generated import entity_pb2 as _entity_pb2 - - -class Key(object): - """An immutable representation of a datastore Key. - - To create a basic key: - - >>> Key('EntityKind', 1234) - - >>> Key('EntityKind', 'foo') - - - To create a key with a parent: - - >>> Key('Parent', 'foo', 'Child', 1234) - - >>> Key('Child', 1234, parent=parent_key) - - - To create a partial key: - - >>> Key('Parent', 'foo', 'Child') - - - :type path_args: tuple of string and integer - :param path_args: May represent a partial (odd length) or full (even - length) key path. - - :type kwargs: dict - :param kwargs: Keyword arguments to be passed in. - - Accepted keyword arguments are - - * namespace (string): A namespace identifier for the key. - * project (string): The project associated with the key. - * parent (:class:`gcloud.datastore.key.Key`): The parent of the key. - - The project argument is required unless it has been set implicitly. - """ - - def __init__(self, *path_args, **kwargs): - self._flat_path = path_args - parent = self._parent = kwargs.get('parent') - self._namespace = kwargs.get('namespace') - project = kwargs.get('project') - self._project = _validate_project(project, parent) - # _flat_path, _parent, _namespace and _project must be set before - # _combine_args() is called. - self._path = self._combine_args() - - def __eq__(self, other): - """Compare two keys for equality. - - Incomplete keys never compare equal to any other key. - - Completed keys compare equal if they have the same path, project, - and namespace. - - :rtype: bool - :returns: True if the keys compare equal, else False. - """ - if not isinstance(other, Key): - return False - - if self.is_partial or other.is_partial: - return False - - return (self.flat_path == other.flat_path and - self.project == other.project and - self.namespace == other.namespace) - - def __ne__(self, other): - """Compare two keys for inequality. - - Incomplete keys never compare equal to any other key. - - Completed keys compare equal if they have the same path, project, - and namespace. - - :rtype: bool - :returns: False if the keys compare equal, else True. - """ - return not self.__eq__(other) - - def __hash__(self): - """Hash a keys for use in a dictionary lookp. - - :rtype: integer - :returns: a hash of the key's state. - """ - return (hash(self.flat_path) + - hash(self.project) + - hash(self.namespace)) - - @staticmethod - def _parse_path(path_args): - """Parses positional arguments into key path with kinds and IDs. - - :type path_args: tuple - :param path_args: A tuple from positional arguments. Should be - alternating list of kinds (string) and ID/name - parts (int or string). - - :rtype: :class:`list` of :class:`dict` - :returns: A list of key parts with kind and ID or name set. - :raises: :class:`ValueError` if there are no ``path_args``, if one of - the kinds is not a string or if one of the IDs/names is not - a string or an integer. - """ - if len(path_args) == 0: - raise ValueError('Key path must not be empty.') - - kind_list = path_args[::2] - id_or_name_list = path_args[1::2] - # Dummy sentinel value to pad incomplete key to even length path. - partial_ending = object() - if len(path_args) % 2 == 1: - id_or_name_list += (partial_ending,) - - result = [] - for kind, id_or_name in zip(kind_list, id_or_name_list): - curr_key_part = {} - if isinstance(kind, six.string_types): - curr_key_part['kind'] = kind - else: - raise ValueError(kind, 'Kind was not a string.') - - if isinstance(id_or_name, six.string_types): - curr_key_part['name'] = id_or_name - elif isinstance(id_or_name, six.integer_types): - curr_key_part['id'] = id_or_name - elif id_or_name is not partial_ending: - raise ValueError(id_or_name, - 'ID/name was not a string or integer.') - - result.append(curr_key_part) - - return result - - def _combine_args(self): - """Sets protected data by combining raw data set from the constructor. - - If a ``_parent`` is set, updates the ``_flat_path`` and sets the - ``_namespace`` and ``_project`` if not already set. - - :rtype: :class:`list` of :class:`dict` - :returns: A list of key parts with kind and ID or name set. - :raises: :class:`ValueError` if the parent key is not complete. - """ - child_path = self._parse_path(self._flat_path) - - if self._parent is not None: - if self._parent.is_partial: - raise ValueError('Parent key must be complete.') - - # We know that _parent.path() will return a copy. - child_path = self._parent.path + child_path - self._flat_path = self._parent.flat_path + self._flat_path - if (self._namespace is not None and - self._namespace != self._parent.namespace): - raise ValueError('Child namespace must agree with parent\'s.') - self._namespace = self._parent.namespace - if (self._project is not None and - self._project != self._parent.project): - raise ValueError('Child project must agree with parent\'s.') - self._project = self._parent.project - - return child_path - - def _clone(self): - """Duplicates the Key. - - Most attributes are simple types, so don't require copying. Other - attributes like ``parent`` are long-lived and so we re-use them. - - :rtype: :class:`gcloud.datastore.key.Key` - :returns: A new ``Key`` instance with the same data as the current one. - """ - cloned_self = self.__class__(*self.flat_path, - project=self.project, - namespace=self.namespace) - # If the current parent has already been set, we re-use - # the same instance - cloned_self._parent = self._parent - return cloned_self - - def completed_key(self, id_or_name): - """Creates new key from existing partial key by adding final ID/name. - - :type id_or_name: string or integer - :param id_or_name: ID or name to be added to the key. - - :rtype: :class:`gcloud.datastore.key.Key` - :returns: A new ``Key`` instance with the same data as the current one - and an extra ID or name added. - :raises: :class:`ValueError` if the current key is not partial or if - ``id_or_name`` is not a string or integer. - """ - if not self.is_partial: - raise ValueError('Only a partial key can be completed.') - - id_or_name_key = None - if isinstance(id_or_name, six.string_types): - id_or_name_key = 'name' - elif isinstance(id_or_name, six.integer_types): - id_or_name_key = 'id' - else: - raise ValueError(id_or_name, - 'ID/name was not a string or integer.') - - new_key = self._clone() - new_key._path[-1][id_or_name_key] = id_or_name - new_key._flat_path += (id_or_name,) - return new_key - - def to_protobuf(self): - """Return a protobuf corresponding to the key. - - :rtype: :class:`gcloud.datastore._generated.entity_pb2.Key` - :returns: The protobuf representing the key. - """ - key = _entity_pb2.Key() - key.partition_id.project_id = self.project - - if self.namespace: - key.partition_id.namespace_id = self.namespace - - for item in self.path: - element = key.path.add() - if 'kind' in item: - element.kind = item['kind'] - if 'id' in item: - element.id = item['id'] - if 'name' in item: - element.name = item['name'] - - return key - - @property - def is_partial(self): - """Boolean indicating if the key has an ID (or name). - - :rtype: bool - :returns: ``True`` if the last element of the key's path does not have - an ``id`` or a ``name``. - """ - return self.id_or_name is None - - @property - def namespace(self): - """Namespace getter. - - :rtype: string - :returns: The namespace of the current key. - """ - return self._namespace - - @property - def path(self): - """Path getter. - - Returns a copy so that the key remains immutable. - - :rtype: :class:`list` of :class:`dict` - :returns: The (key) path of the current key. - """ - return copy.deepcopy(self._path) - - @property - def flat_path(self): - """Getter for the key path as a tuple. - - :rtype: tuple of string and integer - :returns: The tuple of elements in the path. - """ - return self._flat_path - - @property - def kind(self): - """Kind getter. Based on the last element of path. - - :rtype: string - :returns: The kind of the current key. - """ - return self.path[-1]['kind'] - - @property - def id(self): - """ID getter. Based on the last element of path. - - :rtype: integer - :returns: The (integer) ID of the key. - """ - return self.path[-1].get('id') - - @property - def name(self): - """Name getter. Based on the last element of path. - - :rtype: string - :returns: The (string) name of the key. - """ - return self.path[-1].get('name') - - @property - def id_or_name(self): - """Getter. Based on the last element of path. - - :rtype: integer (if ``id``) or string (if ``name``) - :returns: The last element of the key's path if it is either an ``id`` - or a ``name``. - """ - return self.id or self.name - - @property - def project(self): - """Project getter. - - :rtype: string - :returns: The key's project. - """ - return self._project - - def _make_parent(self): - """Creates a parent key for the current path. - - Extracts all but the last element in the key path and creates a new - key, while still matching the namespace and the project. - - :rtype: :class:`gcloud.datastore.key.Key` or :class:`NoneType` - :returns: A new ``Key`` instance, whose path consists of all but the - last element of current path. If the current key has only - one path element, returns ``None``. - """ - if self.is_partial: - parent_args = self.flat_path[:-1] - else: - parent_args = self.flat_path[:-2] - if parent_args: - return self.__class__(*parent_args, project=self.project, - namespace=self.namespace) - - @property - def parent(self): - """The parent of the current key. - - :rtype: :class:`gcloud.datastore.key.Key` or :class:`NoneType` - :returns: A new ``Key`` instance, whose path consists of all but the - last element of current path. If the current key has only - one path element, returns ``None``. - """ - if self._parent is None: - self._parent = self._make_parent() - - return self._parent - - def __repr__(self): - return '' % (self.path, self.project) - - -def _validate_project(project, parent): - """Ensure the project is set appropriately. - - If ``parent`` is passed, skip the test (it will be checked / fixed up - later). - - If ``project`` is unset, attempt to infer the project from the environment. - - :type project: string - :param project: A project. - - :type parent: :class:`gcloud.datastore.key.Key` or ``NoneType`` - :param parent: The parent of the key or ``None``. - - :rtype: string - :returns: The ``project`` passed in, or implied from the environment. - :raises: :class:`ValueError` if ``project`` is ``None`` and no project - can be inferred from the parent. - """ - if parent is None: - if project is None: - raise ValueError("A Key must have a project set.") - - return project diff --git a/gcloud/datastore/query.py b/gcloud/datastore/query.py deleted file mode 100644 index 15519a01f15b..000000000000 --- a/gcloud/datastore/query.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud datastore queries.""" - -import base64 - -from gcloud._helpers import _ensure_tuple_or_list -from gcloud.datastore._generated import query_pb2 as _query_pb2 -from gcloud.datastore import helpers -from gcloud.datastore.key import Key - - -class Query(object): - """A Query against the Cloud Datastore. - - This class serves as an abstraction for creating a query over data - stored in the Cloud Datastore. - - :type client: :class:`gcloud.datastore.client.Client` - :param client: The client used to connect to datastore. - - :type kind: string - :param kind: The kind to query. - - :type project: string - :param project: The project associated with the query. If not passed, - uses the client's value. - - :type namespace: string or None - :param namespace: The namespace to which to restrict results. If not - passed, uses the client's value. - - :type ancestor: :class:`gcloud.datastore.key.Key` or None - :param ancestor: key of the ancestor to which this query's results are - restricted. - - :type filters: sequence of (property_name, operator, value) tuples - :param filters: property filters applied by this query. - - :type projection: sequence of string - :param projection: fields returned as part of query results. - - :type order: sequence of string - :param order: field names used to order query results. Prepend '-' - to a field name to sort it in descending order. - - :type distinct_on: sequence of string - :param distinct_on: field names used to group query results. - - :raises: ValueError if ``project`` is not passed and no implicit - default is set. - """ - - OPERATORS = { - '<=': _query_pb2.PropertyFilter.LESS_THAN_OR_EQUAL, - '>=': _query_pb2.PropertyFilter.GREATER_THAN_OR_EQUAL, - '<': _query_pb2.PropertyFilter.LESS_THAN, - '>': _query_pb2.PropertyFilter.GREATER_THAN, - '=': _query_pb2.PropertyFilter.EQUAL, - } - """Mapping of operator strings and their protobuf equivalents.""" - - def __init__(self, - client, - kind=None, - project=None, - namespace=None, - ancestor=None, - filters=(), - projection=(), - order=(), - distinct_on=()): - - self._client = client - self._kind = kind - self._project = project or client.project - self._namespace = namespace or client.namespace - self._ancestor = ancestor - self._filters = [] - # Verify filters passed in. - for property_name, operator, value in filters: - self.add_filter(property_name, operator, value) - self._projection = _ensure_tuple_or_list('projection', projection) - self._order = _ensure_tuple_or_list('order', order) - self._distinct_on = _ensure_tuple_or_list('distinct_on', distinct_on) - - @property - def project(self): - """Get the project for this Query. - - :rtype: str - """ - return self._project or self._client.project - - @property - def namespace(self): - """This query's namespace - - :rtype: string or None - :returns: the namespace assigned to this query - """ - return self._namespace or self._client.namespace - - @namespace.setter - def namespace(self, value): - """Update the query's namespace. - - :type value: string - """ - if not isinstance(value, str): - raise ValueError("Namespace must be a string") - self._namespace = value - - @property - def kind(self): - """Get the Kind of the Query. - - :rtype: string - """ - return self._kind - - @kind.setter - def kind(self, value): - """Update the Kind of the Query. - - :type value: string - :param value: updated kind for the query. - - .. note:: - - The protobuf specification allows for ``kind`` to be repeated, - but the current implementation returns an error if more than - one value is passed. If the back-end changes in the future to - allow multiple values, this method will be updated to allow passing - either a string or a sequence of strings. - """ - if not isinstance(value, str): - raise TypeError("Kind must be a string") - self._kind = value - - @property - def ancestor(self): - """The ancestor key for the query. - - :rtype: Key or None - """ - return self._ancestor - - @ancestor.setter - def ancestor(self, value): - """Set the ancestor for the query - - :type value: Key - :param value: the new ancestor key - """ - if not isinstance(value, Key): - raise TypeError("Ancestor must be a Key") - self._ancestor = value - - @ancestor.deleter - def ancestor(self): - """Remove the ancestor for the query.""" - self._ancestor = None - - @property - def filters(self): - """Filters set on the query. - - :rtype: sequence of (property_name, operator, value) tuples. - """ - return self._filters[:] - - def add_filter(self, property_name, operator, value): - """Filter the query based on a property name, operator and a value. - - Expressions take the form of:: - - .add_filter('', '', ) - - where property is a property stored on the entity in the datastore - and operator is one of ``OPERATORS`` - (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``):: - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> query = client.query(kind='Person') - >>> query.add_filter('name', '=', 'James') - >>> query.add_filter('age', '>', 50) - - :type property_name: string - :param property_name: A property name. - - :type operator: string - :param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``. - - :type value: :class:`int`, :class:`str`, :class:`bool`, - :class:`float`, :class:`NoneType`, - :class:`datetime.datetime`, - :class:`gcloud.datastore.key.Key` - :param value: The value to filter on. - - :raises: :class:`ValueError` if ``operation`` is not one of the - specified values, or if a filter names ``'__key__'`` but - passes an invalid value (a key is required). - """ - if self.OPERATORS.get(operator) is None: - error_message = 'Invalid expression: "%s"' % (operator,) - choices_message = 'Please use one of: =, <, <=, >, >=.' - raise ValueError(error_message, choices_message) - - if property_name == '__key__' and not isinstance(value, Key): - raise ValueError('Invalid key: "%s"' % value) - - self._filters.append((property_name, operator, value)) - - @property - def projection(self): - """Fields names returned by the query. - - :rtype: sequence of string - :returns: Names of fields in query results. - """ - return self._projection[:] - - @projection.setter - def projection(self, projection): - """Set the fields returned the query. - - :type projection: string or sequence of strings - :param projection: Each value is a string giving the name of a - property to be included in the projection query. - """ - if isinstance(projection, str): - projection = [projection] - self._projection[:] = projection - - def keys_only(self): - """Set the projection to include only keys.""" - self._projection[:] = ['__key__'] - - def key_filter(self, key, operator='='): - """Filter on a key. - - :type key: :class:`gcloud.datastore.key.Key` - :param key: The key to filter on. - - :type operator: string - :param operator: (Optional) One of ``=``, ``<``, ``<=``, ``>``, ``>=``. - Defaults to ``=``. - """ - self.add_filter('__key__', operator, key) - - @property - def order(self): - """Names of fields used to sort query results. - - :rtype: sequence of string - """ - return self._order[:] - - @order.setter - def order(self, value): - """Set the fields used to sort query results. - - Sort fields will be applied in the order specified. - - :type value: string or sequence of strings - :param value: Each value is a string giving the name of the - property on which to sort, optionally preceded by a - hyphen (-) to specify descending order. - Omitting the hyphen implies ascending order. - """ - if isinstance(value, str): - value = [value] - self._order[:] = value - - @property - def distinct_on(self): - """Names of fields used to group query results. - - :rtype: sequence of string - """ - return self._distinct_on[:] - - @distinct_on.setter - def distinct_on(self, value): - """Set fields used to group query results. - - :type value: string or sequence of strings - :param value: Each value is a string giving the name of a - property to use to group results together. - """ - if isinstance(value, str): - value = [value] - self._distinct_on[:] = value - - def fetch(self, limit=None, offset=0, start_cursor=None, end_cursor=None, - client=None): - """Execute the Query; return an iterator for the matching entities. - - For example:: - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> query = client.query(kind='Person') - >>> query.add_filter('name', '=', 'Sally') - >>> list(query.fetch()) - [, , ...] - >>> list(query.fetch(1)) - [] - - :type limit: integer or None - :param limit: An optional limit passed through to the iterator. - - :type offset: integer - :param offset: An optional offset passed through to the iterator. - - :type start_cursor: bytes - :param start_cursor: An optional cursor passed through to the iterator. - - :type end_cursor: bytes - :param end_cursor: An optional cursor passed through to the iterator. - - :type client: :class:`gcloud.datastore.client.Client` - :param client: client used to connect to datastore. - If not supplied, uses the query's value. - - :rtype: :class:`Iterator` - :raises: ValueError if ``connection`` is not passed and no implicit - default has been set. - """ - if client is None: - client = self._client - - return Iterator( - self, client, limit, offset, start_cursor, end_cursor) - - -class Iterator(object): - """Represent the state of a given execution of a Query. - - :type query: :class:`gcloud.datastore.query.Query` - :param query: Query object holding permanent configuration (i.e. - things that don't change on with each page in - a results set). - - :type client: :class:`gcloud.datastore.client.Client` - :param client: The client used to make a request. - - :type limit: integer - :param limit: (Optional) Limit the number of results returned. - - :type offset: integer - :param offset: (Optional) Defaults to 0. Offset used to begin - a query. - - :type start_cursor: bytes - :param start_cursor: (Optional) Cursor to begin paging through - query results. - - :type end_cursor: bytes - :param end_cursor: (Optional) Cursor to end paging through - query results. - """ - - _NOT_FINISHED = _query_pb2.QueryResultBatch.NOT_FINISHED - - _FINISHED = ( - _query_pb2.QueryResultBatch.NO_MORE_RESULTS, - _query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT, - ) - - def __init__(self, query, client, limit=None, offset=0, - start_cursor=None, end_cursor=None): - self._query = query - self._client = client - self._limit = limit - self._offset = offset - self._start_cursor = start_cursor - self._end_cursor = end_cursor - self._page = self._more_results = None - - def next_page(self): - """Fetch a single "page" of query results. - - Low-level API for fine control: the more convenient API is - to iterate on the current Iterator. - - :rtype: tuple, (entities, more_results, cursor) - """ - pb = _pb_from_query(self._query) - - start_cursor = self._start_cursor - if start_cursor is not None: - pb.start_cursor = base64.urlsafe_b64decode(start_cursor) - - end_cursor = self._end_cursor - if end_cursor is not None: - pb.end_cursor = base64.urlsafe_b64decode(end_cursor) - - if self._limit is not None: - pb.limit.value = self._limit - - pb.offset = self._offset - - transaction = self._client.current_transaction - - query_results = self._client.connection.run_query( - query_pb=pb, - project=self._query.project, - namespace=self._query.namespace, - transaction_id=transaction and transaction.id, - ) - # NOTE: `query_results` contains an extra value that we don't use, - # namely `skipped_results`. - # - # NOTE: The value of `more_results` is not currently useful because - # the back-end always returns an enum - # value of MORE_RESULTS_AFTER_LIMIT even if there are no more - # results. See - # https://github.com/GoogleCloudPlatform/gcloud-python/issues/280 - # for discussion. - entity_pbs, cursor_as_bytes, more_results_enum = query_results[:3] - - if cursor_as_bytes == b'': - self._start_cursor = None - else: - self._start_cursor = base64.urlsafe_b64encode(cursor_as_bytes) - self._end_cursor = None - - if more_results_enum == self._NOT_FINISHED: - self._more_results = True - elif more_results_enum in self._FINISHED: - self._more_results = False - else: - raise ValueError('Unexpected value returned for `more_results`.') - - self._page = [ - helpers.entity_from_protobuf(entity) - for entity in entity_pbs] - return self._page, self._more_results, self._start_cursor - - def __iter__(self): - """Generator yielding all results matching our query. - - :rtype: sequence of :class:`gcloud.datastore.entity.Entity` - """ - self.next_page() - while True: - for entity in self._page: - yield entity - if not self._more_results: - break - self.next_page() - - -def _pb_from_query(query): - """Convert a Query instance to the corresponding protobuf. - - :type query: :class:`Query` - :param query: The source query. - - :rtype: :class:`gcloud.datastore._generated.query_pb2.Query` - :returns: A protobuf that can be sent to the protobuf API. N.b. that - it does not contain "in-flight" fields for ongoing query - executions (cursors, offset, limit). - """ - pb = _query_pb2.Query() - - for projection_name in query.projection: - pb.projection.add().property.name = projection_name - - if query.kind: - pb.kind.add().name = query.kind - - composite_filter = pb.filter.composite_filter - composite_filter.op = _query_pb2.CompositeFilter.AND - - if query.ancestor: - ancestor_pb = query.ancestor.to_protobuf() - - # Filter on __key__ HAS_ANCESTOR == ancestor. - ancestor_filter = composite_filter.filters.add().property_filter - ancestor_filter.property.name = '__key__' - ancestor_filter.op = _query_pb2.PropertyFilter.HAS_ANCESTOR - ancestor_filter.value.key_value.CopyFrom(ancestor_pb) - - for property_name, operator, value in query.filters: - pb_op_enum = query.OPERATORS.get(operator) - - # Add the specific filter - property_filter = composite_filter.filters.add().property_filter - property_filter.property.name = property_name - property_filter.op = pb_op_enum - - # Set the value to filter on based on the type. - if property_name == '__key__': - key_pb = value.to_protobuf() - property_filter.value.key_value.CopyFrom(key_pb) - else: - helpers._set_protobuf_value(property_filter.value, value) - - if not composite_filter.filters: - pb.ClearField('filter') - - for prop in query.order: - property_order = pb.order.add() - - if prop.startswith('-'): - property_order.property.name = prop[1:] - property_order.direction = property_order.DESCENDING - else: - property_order.property.name = prop - property_order.direction = property_order.ASCENDING - - for distinct_on_name in query.distinct_on: - pb.distinct_on.add().name = distinct_on_name - - return pb diff --git a/gcloud/datastore/test_batch.py b/gcloud/datastore/test_batch.py deleted file mode 100644 index 4636f275979f..000000000000 --- a/gcloud/datastore/test_batch.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestBatch(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.datastore.batch import Batch - - return Batch - - def _makeOne(self, client): - return self._getTargetClass()(client) - - def test_ctor(self): - from gcloud.datastore._generated import datastore_pb2 - _PROJECT = 'PROJECT' - _NAMESPACE = 'NAMESPACE' - connection = _Connection() - client = _Client(_PROJECT, connection, _NAMESPACE) - batch = self._makeOne(client) - - self.assertEqual(batch.project, _PROJECT) - self.assertEqual(batch.connection, connection) - self.assertEqual(batch.namespace, _NAMESPACE) - self.assertTrue(batch._id is None) - self.assertEqual(batch._status, batch._INITIAL) - self.assertTrue(isinstance(batch._commit_request, - datastore_pb2.CommitRequest)) - self.assertTrue(batch.mutations is batch._commit_request.mutations) - self.assertEqual(batch._partial_key_entities, []) - - def test_current(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch1 = self._makeOne(client) - batch2 = self._makeOne(client) - self.assertTrue(batch1.current() is None) - self.assertTrue(batch2.current() is None) - with batch1: - self.assertTrue(batch1.current() is batch1) - self.assertTrue(batch2.current() is batch1) - with batch2: - self.assertTrue(batch1.current() is batch2) - self.assertTrue(batch2.current() is batch2) - self.assertTrue(batch1.current() is batch1) - self.assertTrue(batch2.current() is batch1) - self.assertTrue(batch1.current() is None) - self.assertTrue(batch2.current() is None) - - def test_put_entity_wo_key(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - - self.assertRaises(ValueError, batch.put, _Entity()) - - def test_put_entity_w_key_wrong_project(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - entity = _Entity() - entity.key = _Key('OTHER') - - self.assertRaises(ValueError, batch.put, entity) - - def test_put_entity_w_partial_key(self): - _PROJECT = 'PROJECT' - _PROPERTIES = {'foo': 'bar'} - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - entity = _Entity(_PROPERTIES) - key = entity.key = _Key(_PROJECT) - key._id = None - - batch.put(entity) - - mutated_entity = _mutated_pb(self, batch.mutations, 'insert') - self.assertEqual(mutated_entity.key, key._key) - self.assertEqual(batch._partial_key_entities, [entity]) - - def test_put_entity_w_completed_key(self): - from gcloud.datastore.helpers import _property_tuples - - _PROJECT = 'PROJECT' - _PROPERTIES = { - 'foo': 'bar', - 'baz': 'qux', - 'spam': [1, 2, 3], - 'frotz': [], # will be ignored - } - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - entity = _Entity(_PROPERTIES) - entity.exclude_from_indexes = ('baz', 'spam') - key = entity.key = _Key(_PROJECT) - - batch.put(entity) - - mutated_entity = _mutated_pb(self, batch.mutations, 'upsert') - self.assertEqual(mutated_entity.key, key._key) - - prop_dict = dict(_property_tuples(mutated_entity)) - self.assertEqual(len(prop_dict), 3) - self.assertFalse(prop_dict['foo'].exclude_from_indexes) - self.assertTrue(prop_dict['baz'].exclude_from_indexes) - self.assertFalse(prop_dict['spam'].exclude_from_indexes) - spam_values = prop_dict['spam'].array_value.values - self.assertTrue(spam_values[0].exclude_from_indexes) - self.assertTrue(spam_values[1].exclude_from_indexes) - self.assertTrue(spam_values[2].exclude_from_indexes) - self.assertFalse('frotz' in prop_dict) - - def test_delete_w_partial_key(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - key = _Key(_PROJECT) - key._id = None - - self.assertRaises(ValueError, batch.delete, key) - - def test_delete_w_key_wrong_project(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - key = _Key('OTHER') - - self.assertRaises(ValueError, batch.delete, key) - - def test_delete_w_completed_key(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - key = _Key(_PROJECT) - - batch.delete(key) - - mutated_key = _mutated_pb(self, batch.mutations, 'delete') - self.assertEqual(mutated_key, key._key) - - def test_begin(self): - _PROJECT = 'PROJECT' - client = _Client(_PROJECT, None) - batch = self._makeOne(client) - self.assertEqual(batch._status, batch._INITIAL) - batch.begin() - self.assertEqual(batch._status, batch._IN_PROGRESS) - - def test_begin_fail(self): - _PROJECT = 'PROJECT' - client = _Client(_PROJECT, None) - batch = self._makeOne(client) - batch._status = batch._IN_PROGRESS - with self.assertRaises(ValueError): - batch.begin() - - def test_rollback(self): - _PROJECT = 'PROJECT' - client = _Client(_PROJECT, None) - batch = self._makeOne(client) - self.assertEqual(batch._status, batch._INITIAL) - batch.rollback() - self.assertEqual(batch._status, batch._ABORTED) - - def test_commit(self): - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - - self.assertEqual(batch._status, batch._INITIAL) - batch.commit() - self.assertEqual(batch._status, batch._FINISHED) - - self.assertEqual(connection._committed, - [(_PROJECT, batch._commit_request, None)]) - - def test_commit_w_partial_key_entities(self): - _PROJECT = 'PROJECT' - _NEW_ID = 1234 - connection = _Connection(_NEW_ID) - client = _Client(_PROJECT, connection) - batch = self._makeOne(client) - entity = _Entity({}) - key = entity.key = _Key(_PROJECT) - key._id = None - batch._partial_key_entities.append(entity) - - self.assertEqual(batch._status, batch._INITIAL) - batch.commit() - self.assertEqual(batch._status, batch._FINISHED) - - self.assertEqual(connection._committed, - [(_PROJECT, batch._commit_request, None)]) - self.assertFalse(entity.key.is_partial) - self.assertEqual(entity.key._id, _NEW_ID) - - def test_as_context_mgr_wo_error(self): - _PROJECT = 'PROJECT' - _PROPERTIES = {'foo': 'bar'} - connection = _Connection() - entity = _Entity(_PROPERTIES) - key = entity.key = _Key(_PROJECT) - - client = _Client(_PROJECT, connection) - self.assertEqual(list(client._batches), []) - - with self._makeOne(client) as batch: - self.assertEqual(list(client._batches), [batch]) - batch.put(entity) - - self.assertEqual(list(client._batches), []) - - mutated_entity = _mutated_pb(self, batch.mutations, 'upsert') - self.assertEqual(mutated_entity.key, key._key) - self.assertEqual(connection._committed, - [(_PROJECT, batch._commit_request, None)]) - - def test_as_context_mgr_nested(self): - _PROJECT = 'PROJECT' - _PROPERTIES = {'foo': 'bar'} - connection = _Connection() - entity1 = _Entity(_PROPERTIES) - key1 = entity1.key = _Key(_PROJECT) - entity2 = _Entity(_PROPERTIES) - key2 = entity2.key = _Key(_PROJECT) - - client = _Client(_PROJECT, connection) - self.assertEqual(list(client._batches), []) - - with self._makeOne(client) as batch1: - self.assertEqual(list(client._batches), [batch1]) - batch1.put(entity1) - with self._makeOne(client) as batch2: - self.assertEqual(list(client._batches), [batch2, batch1]) - batch2.put(entity2) - - self.assertEqual(list(client._batches), [batch1]) - - self.assertEqual(list(client._batches), []) - - mutated_entity1 = _mutated_pb(self, batch1.mutations, 'upsert') - self.assertEqual(mutated_entity1.key, key1._key) - - mutated_entity2 = _mutated_pb(self, batch2.mutations, 'upsert') - self.assertEqual(mutated_entity2.key, key2._key) - - self.assertEqual(connection._committed, - [(_PROJECT, batch2._commit_request, None), - (_PROJECT, batch1._commit_request, None)]) - - def test_as_context_mgr_w_error(self): - _PROJECT = 'PROJECT' - _PROPERTIES = {'foo': 'bar'} - connection = _Connection() - entity = _Entity(_PROPERTIES) - key = entity.key = _Key(_PROJECT) - - client = _Client(_PROJECT, connection) - self.assertEqual(list(client._batches), []) - - try: - with self._makeOne(client) as batch: - self.assertEqual(list(client._batches), [batch]) - batch.put(entity) - raise ValueError("testing") - except ValueError: - pass - - self.assertEqual(list(client._batches), []) - - mutated_entity = _mutated_pb(self, batch.mutations, 'upsert') - self.assertEqual(mutated_entity.key, key._key) - self.assertEqual(connection._committed, []) - - -class _PathElementPB(object): - - def __init__(self, id): - self.id = id - - -class _KeyPB(object): - - def __init__(self, id): - self.path = [_PathElementPB(id)] - - -class _Connection(object): - _marker = object() - _save_result = (False, None) - - def __init__(self, *new_keys): - self._completed_keys = [_KeyPB(key) for key in new_keys] - self._committed = [] - self._index_updates = 0 - - def commit(self, project, commit_request, transaction_id): - self._committed.append((project, commit_request, transaction_id)) - return self._index_updates, self._completed_keys - - -class _Entity(dict): - key = None - exclude_from_indexes = () - _meanings = {} - - -class _Key(object): - _MARKER = object() - _kind = 'KIND' - _key = 'KEY' - _path = None - _id = 1234 - _stored = None - - def __init__(self, project): - self.project = project - - @property - def is_partial(self): - return self._id is None - - def to_protobuf(self): - from gcloud.datastore._generated import entity_pb2 - key = self._key = entity_pb2.Key() - # Don't assign it, because it will just get ripped out - # key.partition_id.project_id = self.project - - element = key.path.add() - element.kind = self._kind - if self._id is not None: - element.id = self._id - - return key - - def completed_key(self, new_id): - assert self.is_partial - new_key = self.__class__(self.project) - new_key._id = new_id - return new_key - - -class _Client(object): - - def __init__(self, project, connection, namespace=None): - self.project = project - self.connection = connection - self.namespace = namespace - self._batches = [] - - def _push_batch(self, batch): - self._batches.insert(0, batch) - - def _pop_batch(self): - return self._batches.pop(0) - - @property - def current_batch(self): - if self._batches: - return self._batches[0] - - -def _assert_num_mutations(test_case, mutation_pb_list, num_mutations): - test_case.assertEqual(len(mutation_pb_list), num_mutations) - - -def _mutated_pb(test_case, mutation_pb_list, mutation_type): - # Make sure there is only one mutation. - _assert_num_mutations(test_case, mutation_pb_list, 1) - - # We grab the only mutation. - mutated_pb = mutation_pb_list[0] - # Then check if it is the correct type. - test_case.assertEqual(mutated_pb.WhichOneof('operation'), - mutation_type) - - return getattr(mutated_pb, mutation_type) diff --git a/gcloud/datastore/test_client.py b/gcloud/datastore/test_client.py deleted file mode 100644 index a5e4acad608d..000000000000 --- a/gcloud/datastore/test_client.py +++ /dev/null @@ -1,969 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -def _make_entity_pb(project, kind, integer_id, name=None, str_val=None): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - entity_pb = entity_pb2.Entity() - entity_pb.key.partition_id.project_id = project - path_element = entity_pb.key.path.add() - path_element.kind = kind - path_element.id = integer_id - if name is not None and str_val is not None: - value_pb = _new_value_pb(entity_pb, name) - value_pb.string_value = str_val - - return entity_pb - - -class Test__get_gcd_project(unittest2.TestCase): - - def _callFUT(self): - from gcloud.datastore.client import _get_gcd_project - return _get_gcd_project() - - def test_no_value(self): - import os - from gcloud._testing import _Monkey - - environ = {} - with _Monkey(os, getenv=environ.get): - project = self._callFUT() - self.assertEqual(project, None) - - def test_value_set(self): - import os - from gcloud._testing import _Monkey - from gcloud.datastore.client import GCD_DATASET - - MOCK_PROJECT = object() - environ = {GCD_DATASET: MOCK_PROJECT} - with _Monkey(os, getenv=environ.get): - project = self._callFUT() - self.assertEqual(project, MOCK_PROJECT) - - -class Test__determine_default_project(unittest2.TestCase): - - def _callFUT(self, project=None): - from gcloud.datastore.client import ( - _determine_default_project) - return _determine_default_project(project=project) - - def _determine_default_helper(self, gcd=None, fallback=None, - project_called=None): - from gcloud._testing import _Monkey - from gcloud.datastore import client - - _callers = [] - - def gcd_mock(): - _callers.append('gcd_mock') - return gcd - - def fallback_mock(project=None): - _callers.append(('fallback_mock', project)) - return fallback - - patched_methods = { - '_get_gcd_project': gcd_mock, - '_base_default_project': fallback_mock, - } - - with _Monkey(client, **patched_methods): - returned_project = self._callFUT(project_called) - - return returned_project, _callers - - def test_no_value(self): - project, callers = self._determine_default_helper() - self.assertEqual(project, None) - self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)]) - - def test_explicit(self): - PROJECT = object() - project, callers = self._determine_default_helper( - project_called=PROJECT) - self.assertEqual(project, PROJECT) - self.assertEqual(callers, []) - - def test_gcd(self): - PROJECT = object() - project, callers = self._determine_default_helper(gcd=PROJECT) - self.assertEqual(project, PROJECT) - self.assertEqual(callers, ['gcd_mock']) - - def test_fallback(self): - PROJECT = object() - project, callers = self._determine_default_helper(fallback=PROJECT) - self.assertEqual(project, PROJECT) - self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)]) - - -class TestClient(unittest2.TestCase): - - PROJECT = 'PROJECT' - - def setUp(self): - KLASS = self._getTargetClass() - self.original_cnxn_class = KLASS._connection_class - KLASS._connection_class = _MockConnection - - def tearDown(self): - KLASS = self._getTargetClass() - KLASS._connection_class = self.original_cnxn_class - - def _getTargetClass(self): - from gcloud.datastore.client import Client - return Client - - def _makeOne(self, project=PROJECT, namespace=None, - credentials=None, http=None): - return self._getTargetClass()(project=project, - namespace=namespace, - credentials=credentials, - http=http) - - def test_ctor_w_project_no_environ(self): - from gcloud._testing import _Monkey - from gcloud.datastore import client as _MUT - - # Some environments (e.g. AppVeyor CI) run in GCE, so - # this test would fail artificially. - with _Monkey(_MUT, _base_default_project=lambda project: None): - self.assertRaises(EnvironmentError, self._makeOne, None) - - def test_ctor_w_implicit_inputs(self): - from gcloud._testing import _Monkey - from gcloud.datastore import client as _MUT - from gcloud import client as _base_client - - OTHER = 'other' - creds = object() - default_called = [] - - def fallback_mock(project): - default_called.append(project) - return project or OTHER - - klass = self._getTargetClass() - with _Monkey(_MUT, - _determine_default_project=fallback_mock): - with _Monkey(_base_client, - get_credentials=lambda: creds): - client = klass() - self.assertEqual(client.project, OTHER) - self.assertEqual(client.namespace, None) - self.assertTrue(isinstance(client.connection, _MockConnection)) - self.assertTrue(client.connection.credentials is creds) - self.assertTrue(client.connection.http is None) - self.assertTrue(client.current_batch is None) - self.assertTrue(client.current_transaction is None) - self.assertEqual(default_called, [None]) - - def test_ctor_w_explicit_inputs(self): - OTHER = 'other' - NAMESPACE = 'namespace' - creds = object() - http = object() - client = self._makeOne(project=OTHER, - namespace=NAMESPACE, - credentials=creds, - http=http) - self.assertEqual(client.project, OTHER) - self.assertEqual(client.namespace, NAMESPACE) - self.assertTrue(isinstance(client.connection, _MockConnection)) - self.assertTrue(client.connection.credentials is creds) - self.assertTrue(client.connection.http is http) - self.assertTrue(client.current_batch is None) - self.assertEqual(list(client._batch_stack), []) - - def test__push_batch_and__pop_batch(self): - creds = object() - client = self._makeOne(credentials=creds) - batch = client.batch() - xact = client.transaction() - client._push_batch(batch) - self.assertEqual(list(client._batch_stack), [batch]) - self.assertTrue(client.current_batch is batch) - self.assertTrue(client.current_transaction is None) - client._push_batch(xact) - self.assertTrue(client.current_batch is xact) - self.assertTrue(client.current_transaction is xact) - # list(_LocalStack) returns in reverse order. - self.assertEqual(list(client._batch_stack), [xact, batch]) - self.assertTrue(client._pop_batch() is xact) - self.assertEqual(list(client._batch_stack), [batch]) - self.assertTrue(client._pop_batch() is batch) - self.assertEqual(list(client._batch_stack), []) - - def test_get_miss(self): - _called_with = [] - - def _get_multi(*args, **kw): - _called_with.append((args, kw)) - return [] - - creds = object() - client = self._makeOne(credentials=creds) - client.get_multi = _get_multi - - key = object() - - self.assertTrue(client.get(key) is None) - - self.assertEqual(_called_with[0][0], ()) - self.assertEqual(_called_with[0][1]['keys'], [key]) - self.assertTrue(_called_with[0][1]['missing'] is None) - self.assertTrue(_called_with[0][1]['deferred'] is None) - - def test_get_hit(self): - _called_with = [] - _entity = object() - - def _get_multi(*args, **kw): - _called_with.append((args, kw)) - return [_entity] - - creds = object() - client = self._makeOne(credentials=creds) - client.get_multi = _get_multi - - key, missing, deferred = object(), [], [] - - self.assertTrue(client.get(key, missing, deferred) is _entity) - - self.assertEqual(_called_with[0][0], ()) - self.assertEqual(_called_with[0][1]['keys'], [key]) - self.assertTrue(_called_with[0][1]['missing'] is missing) - self.assertTrue(_called_with[0][1]['deferred'] is deferred) - - def test_get_multi_no_keys(self): - creds = object() - client = self._makeOne(credentials=creds) - results = client.get_multi([]) - self.assertEqual(results, []) - - def test_get_multi_miss(self): - from gcloud.datastore.key import Key - - creds = object() - client = self._makeOne(credentials=creds) - client.connection._add_lookup_result() - key = Key('Kind', 1234, project=self.PROJECT) - results = client.get_multi([key]) - self.assertEqual(results, []) - - def test_get_multi_miss_w_missing(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.key import Key - - KIND = 'Kind' - ID = 1234 - - # Make a missing entity pb to be returned from mock backend. - missed = entity_pb2.Entity() - missed.key.partition_id.project_id = self.PROJECT - path_element = missed.key.path.add() - path_element.kind = KIND - path_element.id = ID - - creds = object() - client = self._makeOne(credentials=creds) - # Set missing entity on mock connection. - client.connection._add_lookup_result(missing=[missed]) - - key = Key(KIND, ID, project=self.PROJECT) - missing = [] - entities = client.get_multi([key], missing=missing) - self.assertEqual(entities, []) - self.assertEqual([missed.key.to_protobuf() for missed in missing], - [key.to_protobuf()]) - - def test_get_multi_w_missing_non_empty(self): - from gcloud.datastore.key import Key - - creds = object() - client = self._makeOne(credentials=creds) - key = Key('Kind', 1234, project=self.PROJECT) - - missing = ['this', 'list', 'is', 'not', 'empty'] - self.assertRaises(ValueError, client.get_multi, - [key], missing=missing) - - def test_get_multi_w_deferred_non_empty(self): - from gcloud.datastore.key import Key - - creds = object() - client = self._makeOne(credentials=creds) - key = Key('Kind', 1234, project=self.PROJECT) - - deferred = ['this', 'list', 'is', 'not', 'empty'] - self.assertRaises(ValueError, client.get_multi, - [key], deferred=deferred) - - def test_get_multi_miss_w_deferred(self): - from gcloud.datastore.key import Key - - key = Key('Kind', 1234, project=self.PROJECT) - - # Set deferred entity on mock connection. - creds = object() - client = self._makeOne(credentials=creds) - client.connection._add_lookup_result(deferred=[key.to_protobuf()]) - - deferred = [] - entities = client.get_multi([key], deferred=deferred) - self.assertEqual(entities, []) - self.assertEqual([def_key.to_protobuf() for def_key in deferred], - [key.to_protobuf()]) - - def test_get_multi_w_deferred_from_backend_but_not_passed(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - from gcloud.datastore.key import Key - - key1 = Key('Kind', project=self.PROJECT) - key1_pb = key1.to_protobuf() - key2 = Key('Kind', 2345, project=self.PROJECT) - key2_pb = key2.to_protobuf() - - entity1_pb = entity_pb2.Entity() - entity1_pb.key.CopyFrom(key1_pb) - entity2_pb = entity_pb2.Entity() - entity2_pb.key.CopyFrom(key2_pb) - - creds = object() - client = self._makeOne(credentials=creds) - # mock up two separate requests - client.connection._add_lookup_result([entity1_pb], deferred=[key2_pb]) - client.connection._add_lookup_result([entity2_pb]) - - missing = [] - found = client.get_multi([key1, key2], missing=missing) - self.assertEqual(len(found), 2) - self.assertEqual(len(missing), 0) - - # Check the actual contents on the response. - self.assertTrue(isinstance(found[0], Entity)) - self.assertEqual(found[0].key.path, key1.path) - self.assertEqual(found[0].key.project, key1.project) - - self.assertTrue(isinstance(found[1], Entity)) - self.assertEqual(found[1].key.path, key2.path) - self.assertEqual(found[1].key.project, key2.project) - - cw = client.connection._lookup_cw - self.assertEqual(len(cw), 2) - - ds_id, k_pbs, eventual, tid = cw[0] - self.assertEqual(ds_id, self.PROJECT) - self.assertEqual(len(k_pbs), 2) - self.assertEqual(key1_pb, k_pbs[0]) - self.assertEqual(key2_pb, k_pbs[1]) - self.assertFalse(eventual) - self.assertTrue(tid is None) - - ds_id, k_pbs, eventual, tid = cw[1] - self.assertEqual(ds_id, self.PROJECT) - self.assertEqual(len(k_pbs), 1) - self.assertEqual(key2_pb, k_pbs[0]) - self.assertFalse(eventual) - self.assertTrue(tid is None) - - def test_get_multi_hit(self): - from gcloud.datastore.key import Key - - KIND = 'Kind' - ID = 1234 - PATH = [{'kind': KIND, 'id': ID}] - - # Make a found entity pb to be returned from mock backend. - entity_pb = _make_entity_pb(self.PROJECT, KIND, ID, 'foo', 'Foo') - - # Make a connection to return the entity pb. - creds = object() - client = self._makeOne(credentials=creds) - client.connection._add_lookup_result([entity_pb]) - - key = Key(KIND, ID, project=self.PROJECT) - result, = client.get_multi([key]) - new_key = result.key - - # Check the returned value is as expected. - self.assertFalse(new_key is key) - self.assertEqual(new_key.project, self.PROJECT) - self.assertEqual(new_key.path, PATH) - self.assertEqual(list(result), ['foo']) - self.assertEqual(result['foo'], 'Foo') - - def test_get_multi_hit_multiple_keys_same_project(self): - from gcloud.datastore.key import Key - - KIND = 'Kind' - ID1 = 1234 - ID2 = 2345 - - # Make a found entity pb to be returned from mock backend. - entity_pb1 = _make_entity_pb(self.PROJECT, KIND, ID1) - entity_pb2 = _make_entity_pb(self.PROJECT, KIND, ID2) - - # Make a connection to return the entity pbs. - creds = object() - client = self._makeOne(credentials=creds) - client.connection._add_lookup_result([entity_pb1, entity_pb2]) - - key1 = Key(KIND, ID1, project=self.PROJECT) - key2 = Key(KIND, ID2, project=self.PROJECT) - retrieved1, retrieved2 = client.get_multi([key1, key2]) - - # Check values match. - self.assertEqual(retrieved1.key.path, key1.path) - self.assertEqual(dict(retrieved1), {}) - self.assertEqual(retrieved2.key.path, key2.path) - self.assertEqual(dict(retrieved2), {}) - - def test_get_multi_hit_multiple_keys_different_project(self): - from gcloud.datastore.key import Key - - PROJECT1 = 'PROJECT' - PROJECT2 = 'PROJECT-ALT' - - # Make sure our IDs are actually different. - self.assertNotEqual(PROJECT1, PROJECT2) - - key1 = Key('KIND', 1234, project=PROJECT1) - key2 = Key('KIND', 1234, project=PROJECT2) - - creds = object() - client = self._makeOne(credentials=creds) - - with self.assertRaises(ValueError): - client.get_multi([key1, key2]) - - def test_get_multi_max_loops(self): - from gcloud._testing import _Monkey - from gcloud.datastore import client as _MUT - from gcloud.datastore.key import Key - - KIND = 'Kind' - ID = 1234 - - # Make a found entity pb to be returned from mock backend. - entity_pb = _make_entity_pb(self.PROJECT, KIND, ID, 'foo', 'Foo') - - # Make a connection to return the entity pb. - creds = object() - client = self._makeOne(credentials=creds) - client.connection._add_lookup_result([entity_pb]) - - key = Key(KIND, ID, project=self.PROJECT) - deferred = [] - missing = [] - with _Monkey(_MUT, _MAX_LOOPS=-1): - result = client.get_multi([key], missing=missing, - deferred=deferred) - - # Make sure we have no results, even though the connection has been - # set up as in `test_hit` to return a single result. - self.assertEqual(result, []) - self.assertEqual(missing, []) - self.assertEqual(deferred, []) - - def test_put(self): - _called_with = [] - - def _put_multi(*args, **kw): - _called_with.append((args, kw)) - - creds = object() - client = self._makeOne(credentials=creds) - client.put_multi = _put_multi - entity = object() - - client.put(entity) - - self.assertEqual(_called_with[0][0], ()) - self.assertEqual(_called_with[0][1]['entities'], [entity]) - - def test_put_multi_no_entities(self): - creds = object() - client = self._makeOne(credentials=creds) - self.assertEqual(client.put_multi([]), None) - - def test_put_multi_w_single_empty_entity(self): - # https://github.com/GoogleCloudPlatform/gcloud-python/issues/649 - from gcloud.datastore.entity import Entity - - creds = object() - client = self._makeOne(credentials=creds) - self.assertRaises(ValueError, client.put_multi, Entity()) - - def test_put_multi_no_batch_w_partial_key(self): - from gcloud.datastore.helpers import _property_tuples - from gcloud.datastore.test_batch import _Entity - from gcloud.datastore.test_batch import _Key - from gcloud.datastore.test_batch import _KeyPB - from gcloud.datastore.test_batch import _mutated_pb - - entity = _Entity(foo=u'bar') - key = entity.key = _Key(self.PROJECT) - key._id = None - - creds = object() - client = self._makeOne(credentials=creds) - client.connection._commit.append([_KeyPB(key)]) - - result = client.put_multi([entity]) - self.assertTrue(result is None) - - self.assertEqual(len(client.connection._commit_cw), 1) - (project, - commit_req, transaction_id) = client.connection._commit_cw[0] - self.assertEqual(project, self.PROJECT) - - mutated_entity = _mutated_pb(self, commit_req.mutations, 'insert') - self.assertEqual(mutated_entity.key, key.to_protobuf()) - - prop_list = list(_property_tuples(mutated_entity)) - self.assertTrue(len(prop_list), 1) - name, value_pb = prop_list[0] - self.assertEqual(name, 'foo') - self.assertEqual(value_pb.string_value, u'bar') - - self.assertTrue(transaction_id is None) - - def test_put_multi_existing_batch_w_completed_key(self): - from gcloud.datastore.helpers import _property_tuples - from gcloud.datastore.test_batch import _Entity - from gcloud.datastore.test_batch import _Key - from gcloud.datastore.test_batch import _mutated_pb - - creds = object() - client = self._makeOne(credentials=creds) - entity = _Entity(foo=u'bar') - key = entity.key = _Key(self.PROJECT) - - with _NoCommitBatch(client) as CURR_BATCH: - result = client.put_multi([entity]) - - self.assertEqual(result, None) - mutated_entity = _mutated_pb(self, CURR_BATCH.mutations, 'upsert') - self.assertEqual(mutated_entity.key, key.to_protobuf()) - - prop_list = list(_property_tuples(mutated_entity)) - self.assertTrue(len(prop_list), 1) - name, value_pb = prop_list[0] - self.assertEqual(name, 'foo') - self.assertEqual(value_pb.string_value, u'bar') - - def test_delete(self): - _called_with = [] - - def _delete_multi(*args, **kw): - _called_with.append((args, kw)) - - creds = object() - client = self._makeOne(credentials=creds) - client.delete_multi = _delete_multi - key = object() - - client.delete(key) - - self.assertEqual(_called_with[0][0], ()) - self.assertEqual(_called_with[0][1]['keys'], [key]) - - def test_delete_multi_no_keys(self): - creds = object() - client = self._makeOne(credentials=creds) - result = client.delete_multi([]) - self.assertEqual(result, None) - self.assertEqual(len(client.connection._commit_cw), 0) - - def test_delete_multi_no_batch(self): - from gcloud.datastore.test_batch import _Key - from gcloud.datastore.test_batch import _mutated_pb - - key = _Key(self.PROJECT) - - creds = object() - client = self._makeOne(credentials=creds) - client.connection._commit.append([]) - - result = client.delete_multi([key]) - self.assertEqual(result, None) - self.assertEqual(len(client.connection._commit_cw), 1) - (project, - commit_req, transaction_id) = client.connection._commit_cw[0] - self.assertEqual(project, self.PROJECT) - - mutated_key = _mutated_pb(self, commit_req.mutations, 'delete') - self.assertEqual(mutated_key, key.to_protobuf()) - self.assertTrue(transaction_id is None) - - def test_delete_multi_w_existing_batch(self): - from gcloud.datastore.test_batch import _Key - from gcloud.datastore.test_batch import _mutated_pb - - creds = object() - client = self._makeOne(credentials=creds) - key = _Key(self.PROJECT) - - with _NoCommitBatch(client) as CURR_BATCH: - result = client.delete_multi([key]) - - self.assertEqual(result, None) - mutated_key = _mutated_pb(self, CURR_BATCH.mutations, 'delete') - self.assertEqual(mutated_key, key._key) - self.assertEqual(len(client.connection._commit_cw), 0) - - def test_delete_multi_w_existing_transaction(self): - from gcloud.datastore.test_batch import _Key - from gcloud.datastore.test_batch import _mutated_pb - - creds = object() - client = self._makeOne(credentials=creds) - key = _Key(self.PROJECT) - - with _NoCommitTransaction(client) as CURR_XACT: - result = client.delete_multi([key]) - - self.assertEqual(result, None) - mutated_key = _mutated_pb(self, CURR_XACT.mutations, 'delete') - self.assertEqual(mutated_key, key._key) - self.assertEqual(len(client.connection._commit_cw), 0) - - def test_allocate_ids_w_partial_key(self): - from gcloud.datastore.test_batch import _Key - - NUM_IDS = 2 - - INCOMPLETE_KEY = _Key(self.PROJECT) - INCOMPLETE_KEY._id = None - - creds = object() - client = self._makeOne(credentials=creds) - - result = client.allocate_ids(INCOMPLETE_KEY, NUM_IDS) - - # Check the IDs returned. - self.assertEqual([key._id for key in result], list(range(NUM_IDS))) - - def test_allocate_ids_with_completed_key(self): - from gcloud.datastore.test_batch import _Key - - creds = object() - client = self._makeOne(credentials=creds) - - COMPLETE_KEY = _Key(self.PROJECT) - self.assertRaises(ValueError, client.allocate_ids, COMPLETE_KEY, 2) - - def test_key_w_project(self): - KIND = 'KIND' - ID = 1234 - - creds = object() - client = self._makeOne(credentials=creds) - - self.assertRaises(TypeError, - client.key, KIND, ID, project=self.PROJECT) - - def test_key_wo_project(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - ID = 1234 - - creds = object() - client = self._makeOne(credentials=creds) - - with _Monkey(MUT, Key=_Dummy): - key = client.key(KIND, ID) - - self.assertTrue(isinstance(key, _Dummy)) - self.assertEqual(key.args, (KIND, ID)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': None, - } - self.assertEqual(key.kwargs, expected_kwargs) - - def test_key_w_namespace(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - ID = 1234 - NAMESPACE = object() - - creds = object() - client = self._makeOne(namespace=NAMESPACE, credentials=creds) - - with _Monkey(MUT, Key=_Dummy): - key = client.key(KIND, ID) - - self.assertTrue(isinstance(key, _Dummy)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': NAMESPACE, - } - self.assertEqual(key.kwargs, expected_kwargs) - - def test_key_w_namespace_collision(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - ID = 1234 - NAMESPACE1 = object() - NAMESPACE2 = object() - - creds = object() - client = self._makeOne(namespace=NAMESPACE1, credentials=creds) - - with _Monkey(MUT, Key=_Dummy): - key = client.key(KIND, ID, namespace=NAMESPACE2) - - self.assertTrue(isinstance(key, _Dummy)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': NAMESPACE2, - } - self.assertEqual(key.kwargs, expected_kwargs) - - def test_batch(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - creds = object() - client = self._makeOne(credentials=creds) - - with _Monkey(MUT, Batch=_Dummy): - batch = client.batch() - - self.assertTrue(isinstance(batch, _Dummy)) - self.assertEqual(batch.args, (client,)) - self.assertEqual(batch.kwargs, {}) - - def test_transaction_defaults(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - creds = object() - client = self._makeOne(credentials=creds) - - with _Monkey(MUT, Transaction=_Dummy): - xact = client.transaction() - - self.assertTrue(isinstance(xact, _Dummy)) - self.assertEqual(xact.args, (client,)) - self.assertEqual(xact.kwargs, {}) - - def test_query_w_client(self): - KIND = 'KIND' - - creds = object() - client = self._makeOne(credentials=creds) - other = self._makeOne(credentials=object()) - - self.assertRaises(TypeError, client.query, kind=KIND, client=other) - - def test_query_w_project(self): - KIND = 'KIND' - - creds = object() - client = self._makeOne(credentials=creds) - - self.assertRaises(TypeError, - client.query, kind=KIND, project=self.PROJECT) - - def test_query_w_defaults(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - creds = object() - client = self._makeOne(credentials=creds) - - with _Monkey(MUT, Query=_Dummy): - query = client.query() - - self.assertTrue(isinstance(query, _Dummy)) - self.assertEqual(query.args, (client,)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': None, - } - self.assertEqual(query.kwargs, expected_kwargs) - - def test_query_explicit(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - NAMESPACE = 'NAMESPACE' - ANCESTOR = object() - FILTERS = [('PROPERTY', '==', 'VALUE')] - PROJECTION = ['__key__'] - ORDER = ['PROPERTY'] - DISTINCT_ON = ['DISTINCT_ON'] - - creds = object() - client = self._makeOne(credentials=creds) - - with _Monkey(MUT, Query=_Dummy): - query = client.query( - kind=KIND, - namespace=NAMESPACE, - ancestor=ANCESTOR, - filters=FILTERS, - projection=PROJECTION, - order=ORDER, - distinct_on=DISTINCT_ON, - ) - - self.assertTrue(isinstance(query, _Dummy)) - self.assertEqual(query.args, (client,)) - kwargs = { - 'project': self.PROJECT, - 'kind': KIND, - 'namespace': NAMESPACE, - 'ancestor': ANCESTOR, - 'filters': FILTERS, - 'projection': PROJECTION, - 'order': ORDER, - 'distinct_on': DISTINCT_ON, - } - self.assertEqual(query.kwargs, kwargs) - - def test_query_w_namespace(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - NAMESPACE = object() - - creds = object() - client = self._makeOne(namespace=NAMESPACE, credentials=creds) - - with _Monkey(MUT, Query=_Dummy): - query = client.query(kind=KIND) - - self.assertTrue(isinstance(query, _Dummy)) - self.assertEqual(query.args, (client,)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': NAMESPACE, - 'kind': KIND, - } - self.assertEqual(query.kwargs, expected_kwargs) - - def test_query_w_namespace_collision(self): - from gcloud.datastore import client as MUT - from gcloud._testing import _Monkey - - KIND = 'KIND' - NAMESPACE1 = object() - NAMESPACE2 = object() - - creds = object() - client = self._makeOne(namespace=NAMESPACE1, credentials=creds) - - with _Monkey(MUT, Query=_Dummy): - query = client.query(kind=KIND, namespace=NAMESPACE2) - - self.assertTrue(isinstance(query, _Dummy)) - self.assertEqual(query.args, (client,)) - expected_kwargs = { - 'project': self.PROJECT, - 'namespace': NAMESPACE2, - 'kind': KIND, - } - self.assertEqual(query.kwargs, expected_kwargs) - - -class _Dummy(object): - - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - - -class _MockConnection(object): - - def __init__(self, credentials=None, http=None): - self.credentials = credentials - self.http = http - self._lookup_cw = [] - self._lookup = [] - self._commit_cw = [] - self._commit = [] - self._alloc_cw = [] - self._alloc = [] - self._index_updates = 0 - - def _add_lookup_result(self, results=(), missing=(), deferred=()): - self._lookup.append((list(results), list(missing), list(deferred))) - - def lookup(self, project, key_pbs, eventual=False, transaction_id=None): - self._lookup_cw.append((project, key_pbs, eventual, transaction_id)) - triple, self._lookup = self._lookup[0], self._lookup[1:] - results, missing, deferred = triple - return results, missing, deferred - - def commit(self, project, commit_request, transaction_id): - self._commit_cw.append((project, commit_request, transaction_id)) - response, self._commit = self._commit[0], self._commit[1:] - return self._index_updates, response - - def allocate_ids(self, project, key_pbs): - from gcloud.datastore.test_connection import _KeyProto - self._alloc_cw.append((project, key_pbs)) - num_pbs = len(key_pbs) - return [_KeyProto(i) for i in list(range(num_pbs))] - - -class _NoCommitBatch(object): - - def __init__(self, client): - from gcloud.datastore.batch import Batch - self._client = client - self._batch = Batch(client) - - def __enter__(self): - self._client._push_batch(self._batch) - return self._batch - - def __exit__(self, *args): - self._client._pop_batch() - - -class _NoCommitTransaction(object): - - def __init__(self, client, transaction_id='TRANSACTION'): - from gcloud.datastore.transaction import Transaction - self._client = client - xact = self._transaction = Transaction(client) - xact._id = transaction_id - - def __enter__(self): - self._client._push_batch(self._transaction) - return self._transaction - - def __exit__(self, *args): - self._client._pop_batch() diff --git a/gcloud/datastore/test_connection.py b/gcloud/datastore/test_connection.py deleted file mode 100644 index f7a036bd4796..000000000000 --- a/gcloud/datastore/test_connection.py +++ /dev/null @@ -1,873 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.datastore.connection import Connection - - return Connection - - def _make_key_pb(self, project, id=1234): - from gcloud.datastore.key import Key - path_args = ('Kind',) - if id is not None: - path_args += (id,) - return Key(*path_args, project=project).to_protobuf() - - def _make_query_pb(self, kind): - from gcloud.datastore._generated import query_pb2 - pb = query_pb2.Query() - pb.kind.add().name = kind - return pb - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _verifyProtobufCall(self, called_with, URI, conn): - self.assertEqual(called_with['uri'], URI) - self.assertEqual(called_with['method'], 'POST') - self.assertEqual(called_with['headers']['Content-Type'], - 'application/x-protobuf') - self.assertEqual(called_with['headers']['User-Agent'], - conn.USER_AGENT) - - def test_default_url(self): - klass = self._getTargetClass() - conn = self._makeOne() - self.assertEqual(conn.api_base_url, klass.API_BASE_URL) - - def test_custom_url_from_env(self): - import os - from gcloud._testing import _Monkey - from gcloud.connection import API_BASE_URL - from gcloud.environment_vars import GCD_HOST - - HOST = 'CURR_HOST' - fake_environ = {GCD_HOST: HOST} - - with _Monkey(os, environ=fake_environ): - conn = self._makeOne() - - self.assertNotEqual(conn.api_base_url, API_BASE_URL) - self.assertEqual(conn.api_base_url, HOST + '/datastore') - - def test_custom_url_from_constructor(self): - from gcloud.connection import API_BASE_URL - - HOST = object() - conn = self._makeOne(api_base_url=HOST) - self.assertNotEqual(conn.api_base_url, API_BASE_URL) - self.assertEqual(conn.api_base_url, HOST) - - def test_custom_url_constructor_and_env(self): - import os - from gcloud._testing import _Monkey - from gcloud.connection import API_BASE_URL - from gcloud.environment_vars import GCD_HOST - - HOST1 = object() - HOST2 = object() - fake_environ = {GCD_HOST: HOST1} - - with _Monkey(os, environ=fake_environ): - conn = self._makeOne(api_base_url=HOST2) - - self.assertNotEqual(conn.api_base_url, API_BASE_URL) - self.assertNotEqual(conn.api_base_url, HOST1) - self.assertEqual(conn.api_base_url, HOST2) - - def test_ctor_defaults(self): - conn = self._makeOne() - self.assertEqual(conn.credentials, None) - - def test_ctor_explicit(self): - class Creds(object): - - def create_scoped_required(self): - return False - - creds = Creds() - conn = self._makeOne(creds) - self.assertTrue(conn.credentials is creds) - - def test_http_w_existing(self): - conn = self._makeOne() - conn._http = http = object() - self.assertTrue(conn.http is http) - - def test_http_wo_creds(self): - import httplib2 - - conn = self._makeOne() - self.assertTrue(isinstance(conn.http, httplib2.Http)) - - def test_http_w_creds(self): - import httplib2 - - authorized = object() - - class Creds(object): - - def authorize(self, http): - self._called_with = http - return authorized - - def create_scoped_required(self): - return False - - creds = Creds() - conn = self._makeOne(creds) - self.assertTrue(conn.http is authorized) - self.assertTrue(isinstance(creds._called_with, httplib2.Http)) - - def test__request_w_200(self): - PROJECT = 'PROJECT' - METHOD = 'METHOD' - DATA = b'DATA' - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':' + METHOD, - ]) - http = conn._http = Http({'status': '200'}, 'CONTENT') - self.assertEqual(conn._request(PROJECT, METHOD, DATA), 'CONTENT') - self._verifyProtobufCall(http._called_with, URI, conn) - self.assertEqual(http._called_with['body'], DATA) - - def test__request_not_200(self): - from gcloud.exceptions import BadRequest - from google.rpc import status_pb2 - - error = status_pb2.Status() - error.message = 'Entity value is indexed.' - error.code = 9 # FAILED_PRECONDITION - - PROJECT = 'PROJECT' - METHOD = 'METHOD' - DATA = 'DATA' - conn = self._makeOne() - conn._http = Http({'status': '400'}, error.SerializeToString()) - with self.assertRaises(BadRequest) as e: - conn._request(PROJECT, METHOD, DATA) - expected_message = '400 Entity value is indexed.' - self.assertEqual(str(e.exception), expected_message) - - def test__rpc(self): - - class ReqPB(object): - - def SerializeToString(self): - return REQPB - - class RspPB(object): - - def __init__(self, pb): - self._pb = pb - - @classmethod - def FromString(cls, pb): - return cls(pb) - - REQPB = b'REQPB' - PROJECT = 'PROJECT' - METHOD = 'METHOD' - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':' + METHOD, - ]) - http = conn._http = Http({'status': '200'}, 'CONTENT') - response = conn._rpc(PROJECT, METHOD, ReqPB(), RspPB) - self.assertTrue(isinstance(response, RspPB)) - self.assertEqual(response._pb, 'CONTENT') - self._verifyProtobufCall(http._called_with, URI, conn) - self.assertEqual(http._called_with['body'], REQPB) - - def test_build_api_url_w_default_base_version(self): - PROJECT = 'PROJECT' - METHOD = 'METHOD' - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':' + METHOD, - ]) - self.assertEqual(conn.build_api_url(PROJECT, METHOD), URI) - - def test_build_api_url_w_explicit_base_version(self): - BASE = 'http://example.com/' - VER = '3.1415926' - PROJECT = 'PROJECT' - METHOD = 'METHOD' - conn = self._makeOne() - URI = '/'.join([ - BASE, - VER, - 'projects', - PROJECT + ':' + METHOD, - ]) - self.assertEqual(conn.build_api_url(PROJECT, METHOD, BASE, VER), - URI) - - def test_lookup_single_key_empty_response(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.LookupResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - found, missing, deferred = conn.lookup(PROJECT, [key_pb]) - self.assertEqual(len(found), 0) - self.assertEqual(len(missing), 0) - self.assertEqual(len(deferred), 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 1) - self.assertEqual(key_pb, keys[0]) - - def test_lookup_single_key_empty_response_w_eventual(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.LookupResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - found, missing, deferred = conn.lookup(PROJECT, [key_pb], - eventual=True) - self.assertEqual(len(found), 0) - self.assertEqual(len(missing), 0) - self.assertEqual(len(deferred), 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 1) - self.assertEqual(key_pb, keys[0]) - self.assertEqual(request.read_options.read_consistency, - datastore_pb2.ReadOptions.EVENTUAL) - self.assertEqual(request.read_options.transaction, b'') - - def test_lookup_single_key_empty_response_w_eventual_and_transaction(self): - PROJECT = 'PROJECT' - TRANSACTION = b'TRANSACTION' - key_pb = self._make_key_pb(PROJECT) - conn = self._makeOne() - self.assertRaises(ValueError, conn.lookup, PROJECT, key_pb, - eventual=True, transaction_id=TRANSACTION) - - def test_lookup_single_key_empty_response_w_transaction(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - TRANSACTION = b'TRANSACTION' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.LookupResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - found, missing, deferred = conn.lookup(PROJECT, [key_pb], - transaction_id=TRANSACTION) - self.assertEqual(len(found), 0) - self.assertEqual(len(missing), 0) - self.assertEqual(len(deferred), 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 1) - self.assertEqual(key_pb, keys[0]) - self.assertEqual(request.read_options.transaction, TRANSACTION) - - def test_lookup_single_key_nonempty_response(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import entity_pb2 - - PROJECT = 'PROJECT' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.LookupResponse() - entity = entity_pb2.Entity() - entity.key.CopyFrom(key_pb) - rsp_pb.found.add(entity=entity) - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - (found,), missing, deferred = conn.lookup(PROJECT, [key_pb]) - self.assertEqual(len(missing), 0) - self.assertEqual(len(deferred), 0) - self.assertEqual(found.key.path[0].kind, 'Kind') - self.assertEqual(found.key.path[0].id, 1234) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 1) - self.assertEqual(key_pb, keys[0]) - - def test_lookup_multiple_keys_empty_response(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - key_pb1 = self._make_key_pb(PROJECT) - key_pb2 = self._make_key_pb(PROJECT, id=2345) - rsp_pb = datastore_pb2.LookupResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - found, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2]) - self.assertEqual(len(found), 0) - self.assertEqual(len(missing), 0) - self.assertEqual(len(deferred), 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 2) - self.assertEqual(key_pb1, keys[0]) - self.assertEqual(key_pb2, keys[1]) - - def test_lookup_multiple_keys_w_missing(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - key_pb1 = self._make_key_pb(PROJECT) - key_pb2 = self._make_key_pb(PROJECT, id=2345) - rsp_pb = datastore_pb2.LookupResponse() - er_1 = rsp_pb.missing.add() - er_1.entity.key.CopyFrom(key_pb1) - er_2 = rsp_pb.missing.add() - er_2.entity.key.CopyFrom(key_pb2) - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - result, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2]) - self.assertEqual(result, []) - self.assertEqual(len(deferred), 0) - self.assertEqual([missed.key for missed in missing], - [key_pb1, key_pb2]) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 2) - self.assertEqual(key_pb1, keys[0]) - self.assertEqual(key_pb2, keys[1]) - - def test_lookup_multiple_keys_w_deferred(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - key_pb1 = self._make_key_pb(PROJECT) - key_pb2 = self._make_key_pb(PROJECT, id=2345) - rsp_pb = datastore_pb2.LookupResponse() - rsp_pb.deferred.add().CopyFrom(key_pb1) - rsp_pb.deferred.add().CopyFrom(key_pb2) - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':lookup', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - result, missing, deferred = conn.lookup(PROJECT, [key_pb1, key_pb2]) - self.assertEqual(result, []) - self.assertEqual(len(missing), 0) - self.assertEqual([def_key for def_key in deferred], [key_pb1, key_pb2]) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - self.assertEqual(cw['uri'], URI) - self.assertEqual(cw['method'], 'POST') - self.assertEqual(cw['headers']['Content-Type'], - 'application/x-protobuf') - self.assertEqual(cw['headers']['User-Agent'], conn.USER_AGENT) - rq_class = datastore_pb2.LookupRequest - request = rq_class() - request.ParseFromString(cw['body']) - keys = list(request.keys) - self.assertEqual(len(keys), 2) - self.assertEqual(key_pb1, keys[0]) - self.assertEqual(key_pb2, keys[1]) - - def test_run_query_w_eventual_no_transaction(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import query_pb2 - - PROJECT = 'PROJECT' - KIND = 'Nonesuch' - CURSOR = b'\x00' - q_pb = self._make_query_pb(KIND) - rsp_pb = datastore_pb2.RunQueryResponse() - rsp_pb.batch.end_cursor = CURSOR - no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS - rsp_pb.batch.more_results = no_more - rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':runQuery', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - pbs, end, more, skipped = conn.run_query(PROJECT, q_pb, - eventual=True) - self.assertEqual(pbs, []) - self.assertEqual(end, CURSOR) - self.assertTrue(more) - self.assertEqual(skipped, 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.RunQueryRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.partition_id.namespace_id, '') - self.assertEqual(request.query, q_pb) - self.assertEqual(request.read_options.read_consistency, - datastore_pb2.ReadOptions.EVENTUAL) - self.assertEqual(request.read_options.transaction, b'') - - def test_run_query_wo_eventual_w_transaction(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import query_pb2 - - PROJECT = 'PROJECT' - KIND = 'Nonesuch' - CURSOR = b'\x00' - TRANSACTION = b'TRANSACTION' - q_pb = self._make_query_pb(KIND) - rsp_pb = datastore_pb2.RunQueryResponse() - rsp_pb.batch.end_cursor = CURSOR - no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS - rsp_pb.batch.more_results = no_more - rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':runQuery', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - pbs, end, more, skipped = conn.run_query( - PROJECT, q_pb, transaction_id=TRANSACTION) - self.assertEqual(pbs, []) - self.assertEqual(end, CURSOR) - self.assertTrue(more) - self.assertEqual(skipped, 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.RunQueryRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.partition_id.namespace_id, '') - self.assertEqual(request.query, q_pb) - self.assertEqual( - request.read_options.read_consistency, - datastore_pb2.ReadOptions.READ_CONSISTENCY_UNSPECIFIED) - self.assertEqual(request.read_options.transaction, TRANSACTION) - - def test_run_query_w_eventual_and_transaction(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import query_pb2 - - PROJECT = 'PROJECT' - KIND = 'Nonesuch' - CURSOR = b'\x00' - TRANSACTION = b'TRANSACTION' - q_pb = self._make_query_pb(KIND) - rsp_pb = datastore_pb2.RunQueryResponse() - rsp_pb.batch.end_cursor = CURSOR - no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS - rsp_pb.batch.more_results = no_more - rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL - conn = self._makeOne() - self.assertRaises(ValueError, conn.run_query, PROJECT, q_pb, - eventual=True, transaction_id=TRANSACTION) - - def test_run_query_wo_namespace_empty_result(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import query_pb2 - - PROJECT = 'PROJECT' - KIND = 'Nonesuch' - CURSOR = b'\x00' - q_pb = self._make_query_pb(KIND) - rsp_pb = datastore_pb2.RunQueryResponse() - rsp_pb.batch.end_cursor = CURSOR - no_more = query_pb2.QueryResultBatch.NO_MORE_RESULTS - rsp_pb.batch.more_results = no_more - rsp_pb.batch.entity_result_type = query_pb2.EntityResult.FULL - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':runQuery', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - pbs, end, more, skipped = conn.run_query(PROJECT, q_pb) - self.assertEqual(pbs, []) - self.assertEqual(end, CURSOR) - self.assertTrue(more) - self.assertEqual(skipped, 0) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.RunQueryRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.partition_id.namespace_id, '') - self.assertEqual(request.query, q_pb) - - def test_run_query_w_namespace_nonempty_result(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import entity_pb2 - - PROJECT = 'PROJECT' - KIND = 'Kind' - entity = entity_pb2.Entity() - q_pb = self._make_query_pb(KIND) - rsp_pb = datastore_pb2.RunQueryResponse() - rsp_pb.batch.entity_results.add(entity=entity) - rsp_pb.batch.entity_result_type = 1 # FULL - rsp_pb.batch.more_results = 3 # NO_MORE_RESULTS - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':runQuery', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - pbs = conn.run_query(PROJECT, q_pb, 'NS')[0] - self.assertEqual(len(pbs), 1) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.RunQueryRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.partition_id.namespace_id, 'NS') - self.assertEqual(request.query, q_pb) - - def test_begin_transaction(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - TRANSACTION = b'TRANSACTION' - rsp_pb = datastore_pb2.BeginTransactionResponse() - rsp_pb.transaction = TRANSACTION - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':beginTransaction', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - self.assertEqual(conn.begin_transaction(PROJECT), TRANSACTION) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.BeginTransactionRequest - request = rq_class() - request.ParseFromString(cw['body']) - - def test_commit_wo_transaction(self): - from gcloud._testing import _Monkey - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore import connection as MUT - from gcloud.datastore.helpers import _new_value_pb - - PROJECT = 'PROJECT' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.CommitResponse() - req_pb = datastore_pb2.CommitRequest() - mutation = req_pb.mutations.add() - insert = mutation.upsert - insert.key.CopyFrom(key_pb) - value_pb = _new_value_pb(insert, 'foo') - value_pb.string_value = u'Foo' - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':commit', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - - # Set up mock for parsing the response. - expected_result = object() - _parsed = [] - - def mock_parse(response): - _parsed.append(response) - return expected_result - - with _Monkey(MUT, _parse_commit_response=mock_parse): - result = conn.commit(PROJECT, req_pb, None) - - self.assertTrue(result is expected_result) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.CommitRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.transaction, b'') - self.assertEqual(list(request.mutations), [mutation]) - self.assertEqual(request.mode, rq_class.NON_TRANSACTIONAL) - self.assertEqual(_parsed, [rsp_pb]) - - def test_commit_w_transaction(self): - from gcloud._testing import _Monkey - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore import connection as MUT - from gcloud.datastore.helpers import _new_value_pb - - PROJECT = 'PROJECT' - key_pb = self._make_key_pb(PROJECT) - rsp_pb = datastore_pb2.CommitResponse() - req_pb = datastore_pb2.CommitRequest() - mutation = req_pb.mutations.add() - insert = mutation.upsert - insert.key.CopyFrom(key_pb) - value_pb = _new_value_pb(insert, 'foo') - value_pb.string_value = u'Foo' - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':commit', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - - # Set up mock for parsing the response. - expected_result = object() - _parsed = [] - - def mock_parse(response): - _parsed.append(response) - return expected_result - - with _Monkey(MUT, _parse_commit_response=mock_parse): - result = conn.commit(PROJECT, req_pb, b'xact') - - self.assertTrue(result is expected_result) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.CommitRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.transaction, b'xact') - self.assertEqual(list(request.mutations), [mutation]) - self.assertEqual(request.mode, rq_class.TRANSACTIONAL) - self.assertEqual(_parsed, [rsp_pb]) - - def test_rollback_ok(self): - from gcloud.datastore._generated import datastore_pb2 - PROJECT = 'PROJECT' - TRANSACTION = b'xact' - - rsp_pb = datastore_pb2.RollbackResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':rollback', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - self.assertEqual(conn.rollback(PROJECT, TRANSACTION), None) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.RollbackRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(request.transaction, TRANSACTION) - - def test_allocate_ids_empty(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - rsp_pb = datastore_pb2.AllocateIdsResponse() - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':allocateIds', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - self.assertEqual(conn.allocate_ids(PROJECT, []), []) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.AllocateIdsRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(list(request.keys), []) - - def test_allocate_ids_non_empty(self): - from gcloud.datastore._generated import datastore_pb2 - - PROJECT = 'PROJECT' - before_key_pbs = [ - self._make_key_pb(PROJECT, id=None), - self._make_key_pb(PROJECT, id=None), - ] - after_key_pbs = [ - self._make_key_pb(PROJECT), - self._make_key_pb(PROJECT, id=2345), - ] - rsp_pb = datastore_pb2.AllocateIdsResponse() - rsp_pb.keys.add().CopyFrom(after_key_pbs[0]) - rsp_pb.keys.add().CopyFrom(after_key_pbs[1]) - conn = self._makeOne() - URI = '/'.join([ - conn.api_base_url, - conn.API_VERSION, - 'projects', - PROJECT + ':allocateIds', - ]) - http = conn._http = Http({'status': '200'}, rsp_pb.SerializeToString()) - self.assertEqual(conn.allocate_ids(PROJECT, before_key_pbs), - after_key_pbs) - cw = http._called_with - self._verifyProtobufCall(cw, URI, conn) - rq_class = datastore_pb2.AllocateIdsRequest - request = rq_class() - request.ParseFromString(cw['body']) - self.assertEqual(len(request.keys), len(before_key_pbs)) - for key_before, key_after in zip(before_key_pbs, request.keys): - self.assertEqual(key_before, key_after) - - -class Test__parse_commit_response(unittest2.TestCase): - - def _callFUT(self, commit_response_pb): - from gcloud.datastore.connection import _parse_commit_response - return _parse_commit_response(commit_response_pb) - - def test_it(self): - from gcloud.datastore._generated import datastore_pb2 - from gcloud.datastore._generated import entity_pb2 - - index_updates = 1337 - keys = [ - entity_pb2.Key( - path=[ - entity_pb2.Key.PathElement( - kind='Foo', - id=1234, - ), - ], - ), - entity_pb2.Key( - path=[ - entity_pb2.Key.PathElement( - kind='Bar', - name='baz', - ), - ], - ), - ] - response = datastore_pb2.CommitResponse( - mutation_results=[ - datastore_pb2.MutationResult(key=key) for key in keys - ], - index_updates=index_updates, - ) - result = self._callFUT(response) - self.assertEqual(result, (index_updates, keys)) - - -class Http(object): - - _called_with = None - - def __init__(self, headers, content): - from httplib2 import Response - self._response = Response(headers) - self._content = content - - def request(self, **kw): - self._called_with = kw - return self._response, self._content - - -class _PathElementProto(object): - - def __init__(self, _id): - self.id = _id - - -class _KeyProto(object): - - def __init__(self, id_): - self.path = [_PathElementProto(id_)] diff --git a/gcloud/datastore/test_entity.py b/gcloud/datastore/test_entity.py deleted file mode 100644 index ce9e635f0d75..000000000000 --- a/gcloud/datastore/test_entity.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - -_PROJECT = 'PROJECT' -_KIND = 'KIND' -_ID = 1234 - - -class TestEntity(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.datastore.entity import Entity - return Entity - - def _makeOne(self, key=None, exclude_from_indexes=()): - klass = self._getTargetClass() - return klass(key=key, exclude_from_indexes=exclude_from_indexes) - - def test_ctor_defaults(self): - klass = self._getTargetClass() - entity = klass() - self.assertEqual(entity.key, None) - self.assertEqual(entity.kind, None) - self.assertEqual(sorted(entity.exclude_from_indexes), []) - - def test_ctor_explicit(self): - _EXCLUDE_FROM_INDEXES = ['foo', 'bar'] - key = _Key() - entity = self._makeOne( - key=key, exclude_from_indexes=_EXCLUDE_FROM_INDEXES) - self.assertEqual(sorted(entity.exclude_from_indexes), - sorted(_EXCLUDE_FROM_INDEXES)) - - def test_ctor_bad_exclude_from_indexes(self): - BAD_EXCLUDE_FROM_INDEXES = object() - key = _Key() - self.assertRaises(TypeError, self._makeOne, key=key, - exclude_from_indexes=BAD_EXCLUDE_FROM_INDEXES) - - def test___eq_____ne___w_non_entity(self): - from gcloud.datastore.key import Key - key = Key(_KIND, _ID, project=_PROJECT) - entity = self._makeOne(key=key) - self.assertFalse(entity == object()) - self.assertTrue(entity != object()) - - def test___eq_____ne___w_different_keys(self): - from gcloud.datastore.key import Key - _ID1 = 1234 - _ID2 = 2345 - key1 = Key(_KIND, _ID1, project=_PROJECT) - entity1 = self._makeOne(key=key1) - key2 = Key(_KIND, _ID2, project=_PROJECT) - entity2 = self._makeOne(key=key2) - self.assertFalse(entity1 == entity2) - self.assertTrue(entity1 != entity2) - - def test___eq_____ne___w_same_keys(self): - from gcloud.datastore.key import Key - - name = 'foo' - value = 42 - meaning = 9 - - key1 = Key(_KIND, _ID, project=_PROJECT) - entity1 = self._makeOne(key=key1, exclude_from_indexes=(name,)) - entity1[name] = value - entity1._meanings[name] = (meaning, value) - - key2 = Key(_KIND, _ID, project=_PROJECT) - entity2 = self._makeOne(key=key2, exclude_from_indexes=(name,)) - entity2[name] = value - entity2._meanings[name] = (meaning, value) - - self.assertTrue(entity1 == entity2) - self.assertFalse(entity1 != entity2) - - def test___eq_____ne___w_same_keys_different_props(self): - from gcloud.datastore.key import Key - key1 = Key(_KIND, _ID, project=_PROJECT) - entity1 = self._makeOne(key=key1) - entity1['foo'] = 'Foo' - key2 = Key(_KIND, _ID, project=_PROJECT) - entity2 = self._makeOne(key=key2) - entity1['bar'] = 'Bar' - self.assertFalse(entity1 == entity2) - self.assertTrue(entity1 != entity2) - - def test___eq_____ne___w_same_keys_props_w_equiv_keys_as_value(self): - from gcloud.datastore.key import Key - key1 = Key(_KIND, _ID, project=_PROJECT) - key2 = Key(_KIND, _ID, project=_PROJECT) - entity1 = self._makeOne(key=key1) - entity1['some_key'] = key1 - entity2 = self._makeOne(key=key1) - entity2['some_key'] = key2 - self.assertTrue(entity1 == entity2) - self.assertFalse(entity1 != entity2) - - def test___eq_____ne___w_same_keys_props_w_diff_keys_as_value(self): - from gcloud.datastore.key import Key - _ID1 = 1234 - _ID2 = 2345 - key1 = Key(_KIND, _ID1, project=_PROJECT) - key2 = Key(_KIND, _ID2, project=_PROJECT) - entity1 = self._makeOne(key=key1) - entity1['some_key'] = key1 - entity2 = self._makeOne(key=key1) - entity2['some_key'] = key2 - self.assertFalse(entity1 == entity2) - self.assertTrue(entity1 != entity2) - - def test___eq_____ne___w_same_keys_props_w_equiv_entities_as_value(self): - from gcloud.datastore.key import Key - key = Key(_KIND, _ID, project=_PROJECT) - entity1 = self._makeOne(key=key) - sub1 = self._makeOne() - sub1.update({'foo': 'Foo'}) - entity1['some_entity'] = sub1 - entity2 = self._makeOne(key=key) - sub2 = self._makeOne() - sub2.update({'foo': 'Foo'}) - entity2['some_entity'] = sub2 - self.assertTrue(entity1 == entity2) - self.assertFalse(entity1 != entity2) - - def test___eq_____ne___w_same_keys_props_w_diff_entities_as_value(self): - from gcloud.datastore.key import Key - key = Key(_KIND, _ID, project=_PROJECT) - entity1 = self._makeOne(key=key) - sub1 = self._makeOne() - sub1.update({'foo': 'Foo'}) - entity1['some_entity'] = sub1 - entity2 = self._makeOne(key=key) - sub2 = self._makeOne() - sub2.update({'foo': 'Bar'}) - entity2['some_entity'] = sub2 - self.assertFalse(entity1 == entity2) - self.assertTrue(entity1 != entity2) - - def test__eq__same_value_different_exclude(self): - from gcloud.datastore.key import Key - - name = 'foo' - value = 42 - key = Key(_KIND, _ID, project=_PROJECT) - - entity1 = self._makeOne(key=key, exclude_from_indexes=(name,)) - entity1[name] = value - - entity2 = self._makeOne(key=key, exclude_from_indexes=()) - entity2[name] = value - - self.assertFalse(entity1 == entity2) - - def test__eq__same_value_different_meanings(self): - from gcloud.datastore.key import Key - - name = 'foo' - value = 42 - meaning = 9 - key = Key(_KIND, _ID, project=_PROJECT) - - entity1 = self._makeOne(key=key, exclude_from_indexes=(name,)) - entity1[name] = value - - entity2 = self._makeOne(key=key, exclude_from_indexes=(name,)) - entity2[name] = value - entity2._meanings[name] = (meaning, value) - - self.assertFalse(entity1 == entity2) - - def test___repr___no_key_empty(self): - entity = self._makeOne() - self.assertEqual(repr(entity), '') - - def test___repr___w_key_non_empty(self): - key = _Key() - key._path = '/bar/baz' - entity = self._makeOne(key=key) - entity['foo'] = 'Foo' - self.assertEqual(repr(entity), "") - - -class _Key(object): - _MARKER = object() - _key = 'KEY' - _partial = False - _path = None - _id = None - _stored = None - - def __init__(self, project=_PROJECT): - self.project = project - - @property - def path(self): - return self._path diff --git a/gcloud/datastore/test_helpers.py b/gcloud/datastore/test_helpers.py deleted file mode 100644 index caa5e9cec2b1..000000000000 --- a/gcloud/datastore/test_helpers.py +++ /dev/null @@ -1,900 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test__new_value_pb(unittest2.TestCase): - - def _callFUT(self, entity_pb, name): - from gcloud.datastore.helpers import _new_value_pb - return _new_value_pb(entity_pb, name) - - def test_it(self): - from gcloud.datastore._generated import entity_pb2 - - entity_pb = entity_pb2.Entity() - name = 'foo' - result = self._callFUT(entity_pb, name) - - self.assertTrue(isinstance(result, entity_pb2.Value)) - self.assertEqual(len(entity_pb.properties), 1) - self.assertEqual(entity_pb.properties[name], result) - - -class Test__property_tuples(unittest2.TestCase): - - def _callFUT(self, entity_pb): - from gcloud.datastore.helpers import _property_tuples - return _property_tuples(entity_pb) - - def test_it(self): - import types - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - entity_pb = entity_pb2.Entity() - name1 = 'foo' - name2 = 'bar' - val_pb1 = _new_value_pb(entity_pb, name1) - val_pb2 = _new_value_pb(entity_pb, name2) - - result = self._callFUT(entity_pb) - self.assertTrue(isinstance(result, types.GeneratorType)) - self.assertEqual(sorted(result), - sorted([(name1, val_pb1), (name2, val_pb2)])) - - -class Test_entity_from_protobuf(unittest2.TestCase): - - def _callFUT(self, val): - from gcloud.datastore.helpers import entity_from_protobuf - return entity_from_protobuf(val) - - def test_it(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - entity_pb = entity_pb2.Entity() - entity_pb.key.partition_id.project_id = _PROJECT - entity_pb.key.path.add(kind=_KIND, id=_ID) - - value_pb = _new_value_pb(entity_pb, 'foo') - value_pb.string_value = 'Foo' - - unindexed_val_pb = _new_value_pb(entity_pb, 'bar') - unindexed_val_pb.integer_value = 10 - unindexed_val_pb.exclude_from_indexes = True - - array_val_pb1 = _new_value_pb(entity_pb, 'baz') - array_pb1 = array_val_pb1.array_value.values - - unindexed_array_val_pb = array_pb1.add() - unindexed_array_val_pb.integer_value = 11 - unindexed_array_val_pb.exclude_from_indexes = True - - array_val_pb2 = _new_value_pb(entity_pb, 'qux') - array_pb2 = array_val_pb2.array_value.values - - indexed_array_val_pb = array_pb2.add() - indexed_array_val_pb.integer_value = 12 - - entity = self._callFUT(entity_pb) - self.assertEqual(entity.kind, _KIND) - self.assertEqual(entity.exclude_from_indexes, - frozenset(['bar', 'baz'])) - entity_props = dict(entity) - self.assertEqual(entity_props, - {'foo': 'Foo', 'bar': 10, 'baz': [11], 'qux': [12]}) - - # Also check the key. - key = entity.key - self.assertEqual(key.project, _PROJECT) - self.assertEqual(key.namespace, None) - self.assertEqual(key.kind, _KIND) - self.assertEqual(key.id, _ID) - - def test_mismatched_value_indexed(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - entity_pb = entity_pb2.Entity() - entity_pb.key.partition_id.project_id = _PROJECT - entity_pb.key.path.add(kind=_KIND, id=_ID) - - array_val_pb = _new_value_pb(entity_pb, 'baz') - array_pb = array_val_pb.array_value.values - - unindexed_value_pb1 = array_pb.add() - unindexed_value_pb1.integer_value = 10 - unindexed_value_pb1.exclude_from_indexes = True - - unindexed_value_pb2 = array_pb.add() - unindexed_value_pb2.integer_value = 11 - - with self.assertRaises(ValueError): - self._callFUT(entity_pb) - - def test_entity_no_key(self): - from gcloud.datastore._generated import entity_pb2 - - entity_pb = entity_pb2.Entity() - entity = self._callFUT(entity_pb) - - self.assertEqual(entity.key, None) - self.assertEqual(dict(entity), {}) - - def test_entity_with_meaning(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - entity_pb = entity_pb2.Entity() - name = 'hello' - value_pb = _new_value_pb(entity_pb, name) - value_pb.meaning = meaning = 9 - value_pb.string_value = val = u'something' - - entity = self._callFUT(entity_pb) - self.assertEqual(entity.key, None) - self.assertEqual(dict(entity), {name: val}) - self.assertEqual(entity._meanings, {name: (meaning, val)}) - - def test_nested_entity_no_key(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - - PROJECT = 'FOO' - KIND = 'KIND' - INSIDE_NAME = 'IFOO' - OUTSIDE_NAME = 'OBAR' - INSIDE_VALUE = 1337 - - entity_inside = entity_pb2.Entity() - inside_val_pb = _new_value_pb(entity_inside, INSIDE_NAME) - inside_val_pb.integer_value = INSIDE_VALUE - - entity_pb = entity_pb2.Entity() - entity_pb.key.partition_id.project_id = PROJECT - element = entity_pb.key.path.add() - element.kind = KIND - - outside_val_pb = _new_value_pb(entity_pb, OUTSIDE_NAME) - outside_val_pb.entity_value.CopyFrom(entity_inside) - - entity = self._callFUT(entity_pb) - self.assertEqual(entity.key.project, PROJECT) - self.assertEqual(entity.key.flat_path, (KIND,)) - self.assertEqual(len(entity), 1) - - inside_entity = entity[OUTSIDE_NAME] - self.assertEqual(inside_entity.key, None) - self.assertEqual(len(inside_entity), 1) - self.assertEqual(inside_entity[INSIDE_NAME], INSIDE_VALUE) - - -class Test_entity_to_protobuf(unittest2.TestCase): - - def _callFUT(self, entity): - from gcloud.datastore.helpers import entity_to_protobuf - return entity_to_protobuf(entity) - - def _compareEntityProto(self, entity_pb1, entity_pb2): - from gcloud.datastore.helpers import _property_tuples - - self.assertEqual(entity_pb1.key, entity_pb2.key) - value_list1 = sorted(_property_tuples(entity_pb1)) - value_list2 = sorted(_property_tuples(entity_pb2)) - self.assertEqual(len(value_list1), len(value_list2)) - for pair1, pair2 in zip(value_list1, value_list2): - name1, val1 = pair1 - name2, val2 = pair2 - self.assertEqual(name1, name2) - if val1.HasField('entity_value'): # Message field (Entity) - self.assertEqual(val1.meaning, val2.meaning) - self._compareEntityProto(val1.entity_value, - val2.entity_value) - else: - self.assertEqual(val1, val2) - - def test_empty(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - - entity = Entity() - entity_pb = self._callFUT(entity) - self._compareEntityProto(entity_pb, entity_pb2.Entity()) - - def test_key_only(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - from gcloud.datastore.key import Key - - kind, name = 'PATH', 'NAME' - project = 'PROJECT' - key = Key(kind, name, project=project) - entity = Entity(key=key) - entity_pb = self._callFUT(entity) - - expected_pb = entity_pb2.Entity() - expected_pb.key.partition_id.project_id = project - path_elt = expected_pb.key.path.add() - path_elt.kind = kind - path_elt.name = name - - self._compareEntityProto(entity_pb, expected_pb) - - def test_simple_fields(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - from gcloud.datastore.helpers import _new_value_pb - - entity = Entity() - name1 = 'foo' - entity[name1] = value1 = 42 - name2 = 'bar' - entity[name2] = value2 = u'some-string' - entity_pb = self._callFUT(entity) - - expected_pb = entity_pb2.Entity() - val_pb1 = _new_value_pb(expected_pb, name1) - val_pb1.integer_value = value1 - val_pb2 = _new_value_pb(expected_pb, name2) - val_pb2.string_value = value2 - - self._compareEntityProto(entity_pb, expected_pb) - - def test_with_empty_list(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - - entity = Entity() - entity['foo'] = [] - entity_pb = self._callFUT(entity) - - self._compareEntityProto(entity_pb, entity_pb2.Entity()) - - def test_inverts_to_protobuf(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import _new_value_pb - from gcloud.datastore.helpers import entity_from_protobuf - - original_pb = entity_pb2.Entity() - # Add a key. - original_pb.key.partition_id.project_id = project = 'PROJECT' - elem1 = original_pb.key.path.add() - elem1.kind = 'Family' - elem1.id = 1234 - elem2 = original_pb.key.path.add() - elem2.kind = 'King' - elem2.name = 'Spades' - - # Add an integer property. - val_pb1 = _new_value_pb(original_pb, 'foo') - val_pb1.integer_value = 1337 - val_pb1.exclude_from_indexes = True - # Add a string property. - val_pb2 = _new_value_pb(original_pb, 'bar') - val_pb2.string_value = u'hello' - - # Add a nested (entity) property. - val_pb3 = _new_value_pb(original_pb, 'entity-baz') - sub_pb = entity_pb2.Entity() - sub_val_pb1 = _new_value_pb(sub_pb, 'x') - sub_val_pb1.double_value = 3.14 - sub_val_pb2 = _new_value_pb(sub_pb, 'y') - sub_val_pb2.double_value = 2.718281828 - val_pb3.meaning = 9 - val_pb3.entity_value.CopyFrom(sub_pb) - - # Add a list property. - val_pb4 = _new_value_pb(original_pb, 'list-quux') - array_val1 = val_pb4.array_value.values.add() - array_val1.exclude_from_indexes = False - array_val1.meaning = meaning = 22 - array_val1.blob_value = b'\xe2\x98\x83' - array_val2 = val_pb4.array_value.values.add() - array_val2.exclude_from_indexes = False - array_val2.meaning = meaning - array_val2.blob_value = b'\xe2\x98\x85' - - # Convert to the user-space Entity. - entity = entity_from_protobuf(original_pb) - # Convert the user-space Entity back to a protobuf. - new_pb = self._callFUT(entity) - - # NOTE: entity_to_protobuf() strips the project so we "cheat". - new_pb.key.partition_id.project_id = project - self._compareEntityProto(original_pb, new_pb) - - def test_meaning_with_change(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - from gcloud.datastore.helpers import _new_value_pb - - entity = Entity() - name = 'foo' - entity[name] = value = 42 - entity._meanings[name] = (9, 1337) - entity_pb = self._callFUT(entity) - - expected_pb = entity_pb2.Entity() - value_pb = _new_value_pb(expected_pb, name) - value_pb.integer_value = value - # NOTE: No meaning is used since the value differs from the - # value stored. - self._compareEntityProto(entity_pb, expected_pb) - - -class Test_key_from_protobuf(unittest2.TestCase): - - def _callFUT(self, val): - from gcloud.datastore.helpers import key_from_protobuf - - return key_from_protobuf(val) - - def _makePB(self, project=None, namespace=None, path=()): - from gcloud.datastore._generated import entity_pb2 - pb = entity_pb2.Key() - if project is not None: - pb.partition_id.project_id = project - if namespace is not None: - pb.partition_id.namespace_id = namespace - for elem in path: - added = pb.path.add() - added.kind = elem['kind'] - if 'id' in elem: - added.id = elem['id'] - if 'name' in elem: - added.name = elem['name'] - return pb - - def test_wo_namespace_in_pb(self): - _PROJECT = 'PROJECT' - pb = self._makePB(path=[{'kind': 'KIND'}], project=_PROJECT) - key = self._callFUT(pb) - self.assertEqual(key.project, _PROJECT) - self.assertEqual(key.namespace, None) - - def test_w_namespace_in_pb(self): - _PROJECT = 'PROJECT' - _NAMESPACE = 'NAMESPACE' - pb = self._makePB(path=[{'kind': 'KIND'}], namespace=_NAMESPACE, - project=_PROJECT) - key = self._callFUT(pb) - self.assertEqual(key.project, _PROJECT) - self.assertEqual(key.namespace, _NAMESPACE) - - def test_w_nested_path_in_pb(self): - _PATH = [ - {'kind': 'PARENT', 'name': 'NAME'}, - {'kind': 'CHILD', 'id': 1234}, - {'kind': 'GRANDCHILD', 'id': 5678}, - ] - pb = self._makePB(path=_PATH, project='PROJECT') - key = self._callFUT(pb) - self.assertEqual(key.path, _PATH) - - def test_w_nothing_in_pb(self): - pb = self._makePB() - self.assertRaises(ValueError, self._callFUT, pb) - - -class Test__pb_attr_value(unittest2.TestCase): - - def _callFUT(self, val): - from gcloud.datastore.helpers import _pb_attr_value - - return _pb_attr_value(val) - - def test_datetime_naive(self): - import calendar - import datetime - from gcloud._helpers import UTC - - micros = 4375 - naive = datetime.datetime(2014, 9, 16, 10, 19, 32, micros) # No zone. - utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC) - name, value = self._callFUT(naive) - self.assertEqual(name, 'timestamp_value') - self.assertEqual(value.seconds, calendar.timegm(utc.timetuple())) - self.assertEqual(value.nanos, 1000 * micros) - - def test_datetime_w_zone(self): - import calendar - import datetime - from gcloud._helpers import UTC - - micros = 4375 - utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC) - name, value = self._callFUT(utc) - self.assertEqual(name, 'timestamp_value') - self.assertEqual(value.seconds, calendar.timegm(utc.timetuple())) - self.assertEqual(value.nanos, 1000 * micros) - - def test_key(self): - from gcloud.datastore.key import Key - - key = Key('PATH', 1234, project='PROJECT') - name, value = self._callFUT(key) - self.assertEqual(name, 'key_value') - self.assertEqual(value, key.to_protobuf()) - - def test_bool(self): - name, value = self._callFUT(False) - self.assertEqual(name, 'boolean_value') - self.assertEqual(value, False) - - def test_float(self): - name, value = self._callFUT(3.1415926) - self.assertEqual(name, 'double_value') - self.assertEqual(value, 3.1415926) - - def test_int(self): - name, value = self._callFUT(42) - self.assertEqual(name, 'integer_value') - self.assertEqual(value, 42) - - def test_long(self): - must_be_long = (1 << 63) - 1 - name, value = self._callFUT(must_be_long) - self.assertEqual(name, 'integer_value') - self.assertEqual(value, must_be_long) - - def test_native_str(self): - import six - name, value = self._callFUT('str') - if six.PY2: - self.assertEqual(name, 'blob_value') - else: # pragma: NO COVER - self.assertEqual(name, 'string_value') - self.assertEqual(value, 'str') - - def test_bytes(self): - name, value = self._callFUT(b'bytes') - self.assertEqual(name, 'blob_value') - self.assertEqual(value, b'bytes') - - def test_unicode(self): - name, value = self._callFUT(u'str') - self.assertEqual(name, 'string_value') - self.assertEqual(value, u'str') - - def test_entity(self): - from gcloud.datastore.entity import Entity - entity = Entity() - name, value = self._callFUT(entity) - self.assertEqual(name, 'entity_value') - self.assertTrue(value is entity) - - def test_array(self): - values = ['a', 0, 3.14] - name, value = self._callFUT(values) - self.assertEqual(name, 'array_value') - self.assertTrue(value is values) - - def test_geo_point(self): - from google.type import latlng_pb2 - from gcloud.datastore.helpers import GeoPoint - - lat = 42.42 - lng = 99.0007 - geo_pt = GeoPoint(latitude=lat, longitude=lng) - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - name, value = self._callFUT(geo_pt) - self.assertEqual(name, 'geo_point_value') - self.assertEqual(value, geo_pt_pb) - - def test_null(self): - from google.protobuf import struct_pb2 - - name, value = self._callFUT(None) - self.assertEqual(name, 'null_value') - self.assertEqual(value, struct_pb2.NULL_VALUE) - - def test_object(self): - self.assertRaises(ValueError, self._callFUT, object()) - - -class Test__get_value_from_value_pb(unittest2.TestCase): - - def _callFUT(self, pb): - from gcloud.datastore.helpers import _get_value_from_value_pb - - return _get_value_from_value_pb(pb) - - def _makePB(self, attr_name, value): - from gcloud.datastore._generated import entity_pb2 - - pb = entity_pb2.Value() - setattr(pb, attr_name, value) - return pb - - def test_datetime(self): - import calendar - import datetime - from gcloud._helpers import UTC - from gcloud.datastore._generated import entity_pb2 - - micros = 4375 - utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC) - pb = entity_pb2.Value() - pb.timestamp_value.seconds = calendar.timegm(utc.timetuple()) - pb.timestamp_value.nanos = 1000 * micros - self.assertEqual(self._callFUT(pb), utc) - - def test_key(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.key import Key - - pb = entity_pb2.Value() - expected = Key('KIND', 1234, project='PROJECT').to_protobuf() - pb.key_value.CopyFrom(expected) - found = self._callFUT(pb) - self.assertEqual(found.to_protobuf(), expected) - - def test_bool(self): - pb = self._makePB('boolean_value', False) - self.assertEqual(self._callFUT(pb), False) - - def test_float(self): - pb = self._makePB('double_value', 3.1415926) - self.assertEqual(self._callFUT(pb), 3.1415926) - - def test_int(self): - pb = self._makePB('integer_value', 42) - self.assertEqual(self._callFUT(pb), 42) - - def test_bytes(self): - pb = self._makePB('blob_value', b'str') - self.assertEqual(self._callFUT(pb), b'str') - - def test_unicode(self): - pb = self._makePB('string_value', u'str') - self.assertEqual(self._callFUT(pb), u'str') - - def test_entity(self): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.entity import Entity - from gcloud.datastore.helpers import _new_value_pb - - pb = entity_pb2.Value() - entity_pb = pb.entity_value - entity_pb.key.path.add(kind='KIND') - entity_pb.key.partition_id.project_id = 'PROJECT' - - value_pb = _new_value_pb(entity_pb, 'foo') - value_pb.string_value = 'Foo' - entity = self._callFUT(pb) - self.assertTrue(isinstance(entity, Entity)) - self.assertEqual(entity['foo'], 'Foo') - - def test_array(self): - from gcloud.datastore._generated import entity_pb2 - - pb = entity_pb2.Value() - array_pb = pb.array_value.values - item_pb = array_pb.add() - item_pb.string_value = 'Foo' - item_pb = array_pb.add() - item_pb.string_value = 'Bar' - items = self._callFUT(pb) - self.assertEqual(items, ['Foo', 'Bar']) - - def test_geo_point(self): - from google.type import latlng_pb2 - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore.helpers import GeoPoint - - lat = -3.14 - lng = 13.37 - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - pb = entity_pb2.Value(geo_point_value=geo_pt_pb) - result = self._callFUT(pb) - self.assertIsInstance(result, GeoPoint) - self.assertEqual(result.latitude, lat) - self.assertEqual(result.longitude, lng) - - def test_null(self): - from google.protobuf import struct_pb2 - from gcloud.datastore._generated import entity_pb2 - - pb = entity_pb2.Value(null_value=struct_pb2.NULL_VALUE) - result = self._callFUT(pb) - self.assertIsNone(result) - - def test_unknown(self): - from gcloud.datastore._generated import entity_pb2 - - pb = entity_pb2.Value() - with self.assertRaises(ValueError): - self._callFUT(pb) - - -class Test_set_protobuf_value(unittest2.TestCase): - - def _callFUT(self, value_pb, val): - from gcloud.datastore.helpers import _set_protobuf_value - - return _set_protobuf_value(value_pb, val) - - def _makePB(self): - from gcloud.datastore._generated import entity_pb2 - return entity_pb2.Value() - - def test_datetime(self): - import calendar - import datetime - from gcloud._helpers import UTC - - pb = self._makePB() - micros = 4375 - utc = datetime.datetime(2014, 9, 16, 10, 19, 32, micros, UTC) - self._callFUT(pb, utc) - value = pb.timestamp_value - self.assertEqual(value.seconds, calendar.timegm(utc.timetuple())) - self.assertEqual(value.nanos, 1000 * micros) - - def test_key(self): - from gcloud.datastore.key import Key - - pb = self._makePB() - key = Key('KIND', 1234, project='PROJECT') - self._callFUT(pb, key) - value = pb.key_value - self.assertEqual(value, key.to_protobuf()) - - def test_none(self): - pb = self._makePB() - self._callFUT(pb, None) - self.assertEqual(pb.WhichOneof('value_type'), 'null_value') - - def test_bool(self): - pb = self._makePB() - self._callFUT(pb, False) - value = pb.boolean_value - self.assertEqual(value, False) - - def test_float(self): - pb = self._makePB() - self._callFUT(pb, 3.1415926) - value = pb.double_value - self.assertEqual(value, 3.1415926) - - def test_int(self): - pb = self._makePB() - self._callFUT(pb, 42) - value = pb.integer_value - self.assertEqual(value, 42) - - def test_long(self): - pb = self._makePB() - must_be_long = (1 << 63) - 1 - self._callFUT(pb, must_be_long) - value = pb.integer_value - self.assertEqual(value, must_be_long) - - def test_native_str(self): - import six - pb = self._makePB() - self._callFUT(pb, 'str') - if six.PY2: - value = pb.blob_value - else: # pragma: NO COVER - value = pb.string_value - self.assertEqual(value, 'str') - - def test_bytes(self): - pb = self._makePB() - self._callFUT(pb, b'str') - value = pb.blob_value - self.assertEqual(value, b'str') - - def test_unicode(self): - pb = self._makePB() - self._callFUT(pb, u'str') - value = pb.string_value - self.assertEqual(value, u'str') - - def test_entity_empty_wo_key(self): - from gcloud.datastore.entity import Entity - from gcloud.datastore.helpers import _property_tuples - - pb = self._makePB() - entity = Entity() - self._callFUT(pb, entity) - value = pb.entity_value - self.assertEqual(value.key.SerializeToString(), b'') - self.assertEqual(len(list(_property_tuples(value))), 0) - - def test_entity_w_key(self): - from gcloud.datastore.entity import Entity - from gcloud.datastore.helpers import _property_tuples - from gcloud.datastore.key import Key - - name = 'foo' - value = u'Foo' - pb = self._makePB() - key = Key('KIND', 123, project='PROJECT') - entity = Entity(key=key) - entity[name] = value - self._callFUT(pb, entity) - entity_pb = pb.entity_value - self.assertEqual(entity_pb.key, key.to_protobuf()) - - prop_dict = dict(_property_tuples(entity_pb)) - self.assertEqual(len(prop_dict), 1) - self.assertEqual(list(prop_dict.keys()), [name]) - self.assertEqual(prop_dict[name].string_value, value) - - def test_array(self): - pb = self._makePB() - values = [u'a', 0, 3.14] - self._callFUT(pb, values) - marshalled = pb.array_value.values - self.assertEqual(len(marshalled), len(values)) - self.assertEqual(marshalled[0].string_value, values[0]) - self.assertEqual(marshalled[1].integer_value, values[1]) - self.assertEqual(marshalled[2].double_value, values[2]) - - def test_geo_point(self): - from google.type import latlng_pb2 - from gcloud.datastore.helpers import GeoPoint - - pb = self._makePB() - lat = 9.11 - lng = 3.337 - geo_pt = GeoPoint(latitude=lat, longitude=lng) - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - self._callFUT(pb, geo_pt) - self.assertEqual(pb.geo_point_value, geo_pt_pb) - - -class Test__get_meaning(unittest2.TestCase): - - def _callFUT(self, *args, **kwargs): - from gcloud.datastore.helpers import _get_meaning - return _get_meaning(*args, **kwargs) - - def test_no_meaning(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - result = self._callFUT(value_pb) - self.assertEqual(result, None) - - def test_single(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - value_pb.meaning = meaning = 22 - value_pb.string_value = u'hi' - result = self._callFUT(value_pb) - self.assertEqual(meaning, result) - - def test_empty_array_value(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - value_pb.array_value.values.add() - value_pb.array_value.values.pop() - - result = self._callFUT(value_pb, is_list=True) - self.assertEqual(None, result) - - def test_array_value(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - meaning = 9 - sub_value_pb1 = value_pb.array_value.values.add() - sub_value_pb2 = value_pb.array_value.values.add() - - sub_value_pb1.meaning = sub_value_pb2.meaning = meaning - sub_value_pb1.string_value = u'hi' - sub_value_pb2.string_value = u'bye' - - result = self._callFUT(value_pb, is_list=True) - self.assertEqual(meaning, result) - - def test_array_value_disagreeing(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - meaning1 = 9 - meaning2 = 10 - sub_value_pb1 = value_pb.array_value.values.add() - sub_value_pb2 = value_pb.array_value.values.add() - - sub_value_pb1.meaning = meaning1 - sub_value_pb2.meaning = meaning2 - sub_value_pb1.string_value = u'hi' - sub_value_pb2.string_value = u'bye' - - with self.assertRaises(ValueError): - self._callFUT(value_pb, is_list=True) - - def test_array_value_partially_unset(self): - from gcloud.datastore._generated import entity_pb2 - - value_pb = entity_pb2.Value() - meaning1 = 9 - sub_value_pb1 = value_pb.array_value.values.add() - sub_value_pb2 = value_pb.array_value.values.add() - - sub_value_pb1.meaning = meaning1 - sub_value_pb1.string_value = u'hi' - sub_value_pb2.string_value = u'bye' - - with self.assertRaises(ValueError): - self._callFUT(value_pb, is_list=True) - - -class TestGeoPoint(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.datastore.helpers import GeoPoint - return GeoPoint - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_constructor(self): - lat = 81.2 - lng = 359.9999 - geo_pt = self._makeOne(lat, lng) - self.assertEqual(geo_pt.latitude, lat) - self.assertEqual(geo_pt.longitude, lng) - - def test_to_protobuf(self): - from google.type import latlng_pb2 - - lat = 0.0001 - lng = 20.03 - geo_pt = self._makeOne(lat, lng) - result = geo_pt.to_protobuf() - geo_pt_pb = latlng_pb2.LatLng(latitude=lat, longitude=lng) - self.assertEqual(result, geo_pt_pb) - - def test___eq__(self): - lat = 0.0001 - lng = 20.03 - geo_pt1 = self._makeOne(lat, lng) - geo_pt2 = self._makeOne(lat, lng) - self.assertEqual(geo_pt1, geo_pt2) - - def test___eq__type_differ(self): - lat = 0.0001 - lng = 20.03 - geo_pt1 = self._makeOne(lat, lng) - geo_pt2 = object() - self.assertNotEqual(geo_pt1, geo_pt2) - - def test___ne__same_value(self): - lat = 0.0001 - lng = 20.03 - geo_pt1 = self._makeOne(lat, lng) - geo_pt2 = self._makeOne(lat, lng) - comparison_val = (geo_pt1 != geo_pt2) - self.assertFalse(comparison_val) - - def test___ne__(self): - geo_pt1 = self._makeOne(0.0, 1.0) - geo_pt2 = self._makeOne(2.0, 3.0) - self.assertNotEqual(geo_pt1, geo_pt2) diff --git a/gcloud/datastore/test_key.py b/gcloud/datastore/test_key.py deleted file mode 100644 index ce214e418f4a..000000000000 --- a/gcloud/datastore/test_key.py +++ /dev/null @@ -1,431 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestKey(unittest2.TestCase): - - _DEFAULT_PROJECT = 'PROJECT' - - def _getTargetClass(self): - from gcloud.datastore.key import Key - return Key - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_empty(self): - self.assertRaises(ValueError, self._makeOne) - - def test_ctor_no_project(self): - klass = self._getTargetClass() - self.assertRaises(ValueError, klass, 'KIND') - - def test_ctor_w_explicit_project_empty_path(self): - _PROJECT = 'PROJECT' - self.assertRaises(ValueError, self._makeOne, project=_PROJECT) - - def test_ctor_parent(self): - _PARENT_KIND = 'KIND1' - _PARENT_ID = 1234 - _PARENT_PROJECT = 'PROJECT-ALT' - _PARENT_NAMESPACE = 'NAMESPACE' - _CHILD_KIND = 'KIND2' - _CHILD_ID = 2345 - _PATH = [ - {'kind': _PARENT_KIND, 'id': _PARENT_ID}, - {'kind': _CHILD_KIND, 'id': _CHILD_ID}, - ] - parent_key = self._makeOne(_PARENT_KIND, _PARENT_ID, - project=_PARENT_PROJECT, - namespace=_PARENT_NAMESPACE) - key = self._makeOne(_CHILD_KIND, _CHILD_ID, parent=parent_key) - self.assertEqual(key.project, parent_key.project) - self.assertEqual(key.namespace, parent_key.namespace) - self.assertEqual(key.kind, _CHILD_KIND) - self.assertEqual(key.path, _PATH) - self.assertTrue(key.parent is parent_key) - - def test_ctor_partial_parent(self): - parent_key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - with self.assertRaises(ValueError): - self._makeOne('KIND2', 1234, parent=parent_key) - - def test_ctor_parent_bad_type(self): - with self.assertRaises(AttributeError): - self._makeOne('KIND2', 1234, parent=('KIND1', 1234), - project=self._DEFAULT_PROJECT) - - def test_ctor_parent_bad_namespace(self): - parent_key = self._makeOne('KIND', 1234, namespace='FOO', - project=self._DEFAULT_PROJECT) - with self.assertRaises(ValueError): - self._makeOne('KIND2', 1234, namespace='BAR', parent=parent_key, - project=self._DEFAULT_PROJECT) - - def test_ctor_parent_bad_project(self): - parent_key = self._makeOne('KIND', 1234, project='FOO') - with self.assertRaises(ValueError): - self._makeOne('KIND2', 1234, parent=parent_key, - project='BAR') - - def test_ctor_parent_empty_path(self): - parent_key = self._makeOne('KIND', 1234, - project=self._DEFAULT_PROJECT) - with self.assertRaises(ValueError): - self._makeOne(parent=parent_key) - - def test_ctor_explicit(self): - _PROJECT = 'PROJECT-ALT' - _NAMESPACE = 'NAMESPACE' - _KIND = 'KIND' - _ID = 1234 - _PATH = [{'kind': _KIND, 'id': _ID}] - key = self._makeOne(_KIND, _ID, namespace=_NAMESPACE, - project=_PROJECT) - self.assertEqual(key.project, _PROJECT) - self.assertEqual(key.namespace, _NAMESPACE) - self.assertEqual(key.kind, _KIND) - self.assertEqual(key.path, _PATH) - - def test_ctor_bad_kind(self): - self.assertRaises(ValueError, self._makeOne, object(), - project=self._DEFAULT_PROJECT) - - def test_ctor_bad_id_or_name(self): - self.assertRaises(ValueError, self._makeOne, 'KIND', object(), - project=self._DEFAULT_PROJECT) - self.assertRaises(ValueError, self._makeOne, 'KIND', None, - project=self._DEFAULT_PROJECT) - self.assertRaises(ValueError, self._makeOne, 'KIND', 10, 'KIND2', None, - project=self._DEFAULT_PROJECT) - - def test__clone(self): - _PROJECT = 'PROJECT-ALT' - _NAMESPACE = 'NAMESPACE' - _KIND = 'KIND' - _ID = 1234 - _PATH = [{'kind': _KIND, 'id': _ID}] - key = self._makeOne(_KIND, _ID, namespace=_NAMESPACE, - project=_PROJECT) - clone = key._clone() - self.assertEqual(clone.project, _PROJECT) - self.assertEqual(clone.namespace, _NAMESPACE) - self.assertEqual(clone.kind, _KIND) - self.assertEqual(clone.path, _PATH) - - def test__clone_with_parent(self): - _PROJECT = 'PROJECT-ALT' - _NAMESPACE = 'NAMESPACE' - _KIND1 = 'PARENT' - _KIND2 = 'KIND' - _ID1 = 1234 - _ID2 = 2345 - _PATH = [{'kind': _KIND1, 'id': _ID1}, {'kind': _KIND2, 'id': _ID2}] - - parent = self._makeOne(_KIND1, _ID1, namespace=_NAMESPACE, - project=_PROJECT) - key = self._makeOne(_KIND2, _ID2, parent=parent) - self.assertTrue(key.parent is parent) - clone = key._clone() - self.assertTrue(clone.parent is key.parent) - self.assertEqual(clone.project, _PROJECT) - self.assertEqual(clone.namespace, _NAMESPACE) - self.assertEqual(clone.path, _PATH) - - def test___eq_____ne___w_non_key(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _NAME = 'one' - key = self._makeOne(_KIND, _NAME, project=_PROJECT) - self.assertFalse(key == object()) - self.assertTrue(key != object()) - - def test___eq_____ne___two_incomplete_keys_same_kind(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - key1 = self._makeOne(_KIND, project=_PROJECT) - key2 = self._makeOne(_KIND, project=_PROJECT) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___incomplete_key_w_complete_key_same_kind(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - key1 = self._makeOne(_KIND, project=_PROJECT) - key2 = self._makeOne(_KIND, _ID, project=_PROJECT) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___complete_key_w_incomplete_key_same_kind(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - key1 = self._makeOne(_KIND, _ID, project=_PROJECT) - key2 = self._makeOne(_KIND, project=_PROJECT) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_different_ids(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID1 = 1234 - _ID2 = 2345 - key1 = self._makeOne(_KIND, _ID1, project=_PROJECT) - key2 = self._makeOne(_KIND, _ID2, project=_PROJECT) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_and_id(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - key1 = self._makeOne(_KIND, _ID, project=_PROJECT) - key2 = self._makeOne(_KIND, _ID, project=_PROJECT) - self.assertTrue(key1 == key2) - self.assertFalse(key1 != key2) - - def test___eq_____ne___same_kind_and_id_different_project(self): - _PROJECT1 = 'PROJECT1' - _PROJECT2 = 'PROJECT2' - _KIND = 'KIND' - _ID = 1234 - key1 = self._makeOne(_KIND, _ID, project=_PROJECT1) - key2 = self._makeOne(_KIND, _ID, project=_PROJECT2) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_and_id_different_namespace(self): - _PROJECT = 'PROJECT' - _NAMESPACE1 = 'NAMESPACE1' - _NAMESPACE2 = 'NAMESPACE2' - _KIND = 'KIND' - _ID = 1234 - key1 = self._makeOne(_KIND, _ID, project=_PROJECT, - namespace=_NAMESPACE1) - key2 = self._makeOne(_KIND, _ID, project=_PROJECT, - namespace=_NAMESPACE2) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_different_names(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _NAME1 = 'one' - _NAME2 = 'two' - key1 = self._makeOne(_KIND, _NAME1, project=_PROJECT) - key2 = self._makeOne(_KIND, _NAME2, project=_PROJECT) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_and_name(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _NAME = 'one' - key1 = self._makeOne(_KIND, _NAME, project=_PROJECT) - key2 = self._makeOne(_KIND, _NAME, project=_PROJECT) - self.assertTrue(key1 == key2) - self.assertFalse(key1 != key2) - - def test___eq_____ne___same_kind_and_name_different_project(self): - _PROJECT1 = 'PROJECT1' - _PROJECT2 = 'PROJECT2' - _KIND = 'KIND' - _NAME = 'one' - key1 = self._makeOne(_KIND, _NAME, project=_PROJECT1) - key2 = self._makeOne(_KIND, _NAME, project=_PROJECT2) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___eq_____ne___same_kind_and_name_different_namespace(self): - _PROJECT = 'PROJECT' - _NAMESPACE1 = 'NAMESPACE1' - _NAMESPACE2 = 'NAMESPACE2' - _KIND = 'KIND' - _NAME = 'one' - key1 = self._makeOne(_KIND, _NAME, project=_PROJECT, - namespace=_NAMESPACE1) - key2 = self._makeOne(_KIND, _NAME, project=_PROJECT, - namespace=_NAMESPACE2) - self.assertFalse(key1 == key2) - self.assertTrue(key1 != key2) - - def test___hash___incomplete(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - key = self._makeOne(_KIND, project=_PROJECT) - self.assertNotEqual(hash(key), - hash(_KIND) + hash(_PROJECT) + hash(None)) - - def test___hash___completed_w_id(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 1234 - key = self._makeOne(_KIND, _ID, project=_PROJECT) - self.assertNotEqual(hash(key), - hash(_KIND) + hash(_ID) + - hash(_PROJECT) + hash(None)) - - def test___hash___completed_w_name(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _NAME = 'NAME' - key = self._makeOne(_KIND, _NAME, project=_PROJECT) - self.assertNotEqual(hash(key), - hash(_KIND) + hash(_NAME) + - hash(_PROJECT) + hash(None)) - - def test_completed_key_on_partial_w_id(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - _ID = 1234 - new_key = key.completed_key(_ID) - self.assertFalse(key is new_key) - self.assertEqual(new_key.id, _ID) - self.assertEqual(new_key.name, None) - - def test_completed_key_on_partial_w_name(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - _NAME = 'NAME' - new_key = key.completed_key(_NAME) - self.assertFalse(key is new_key) - self.assertEqual(new_key.id, None) - self.assertEqual(new_key.name, _NAME) - - def test_completed_key_on_partial_w_invalid(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - self.assertRaises(ValueError, key.completed_key, object()) - - def test_completed_key_on_complete(self): - key = self._makeOne('KIND', 1234, project=self._DEFAULT_PROJECT) - self.assertRaises(ValueError, key.completed_key, 5678) - - def test_to_protobuf_defaults(self): - from gcloud.datastore._generated import entity_pb2 - - _KIND = 'KIND' - key = self._makeOne(_KIND, project=self._DEFAULT_PROJECT) - pb = key.to_protobuf() - self.assertTrue(isinstance(pb, entity_pb2.Key)) - - # Check partition ID. - self.assertEqual(pb.partition_id.project_id, self._DEFAULT_PROJECT) - # Unset values are False-y. - self.assertEqual(pb.partition_id.namespace_id, '') - - # Check the element PB matches the partial key and kind. - elem, = list(pb.path) - self.assertEqual(elem.kind, _KIND) - # Unset values are False-y. - self.assertEqual(elem.name, '') - # Unset values are False-y. - self.assertEqual(elem.id, 0) - - def test_to_protobuf_w_explicit_project(self): - _PROJECT = 'PROJECT-ALT' - key = self._makeOne('KIND', project=_PROJECT) - pb = key.to_protobuf() - self.assertEqual(pb.partition_id.project_id, _PROJECT) - - def test_to_protobuf_w_explicit_namespace(self): - _NAMESPACE = 'NAMESPACE' - key = self._makeOne('KIND', namespace=_NAMESPACE, - project=self._DEFAULT_PROJECT) - pb = key.to_protobuf() - self.assertEqual(pb.partition_id.namespace_id, _NAMESPACE) - - def test_to_protobuf_w_explicit_path(self): - _PARENT = 'PARENT' - _CHILD = 'CHILD' - _ID = 1234 - _NAME = 'NAME' - key = self._makeOne(_PARENT, _NAME, _CHILD, _ID, - project=self._DEFAULT_PROJECT) - pb = key.to_protobuf() - elems = list(pb.path) - self.assertEqual(len(elems), 2) - self.assertEqual(elems[0].kind, _PARENT) - self.assertEqual(elems[0].name, _NAME) - self.assertEqual(elems[1].kind, _CHILD) - self.assertEqual(elems[1].id, _ID) - - def test_to_protobuf_w_no_kind(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - # Force the 'kind' to be unset. Maybe `to_protobuf` should fail - # on this? The backend certainly will. - key._path[-1].pop('kind') - pb = key.to_protobuf() - # Unset values are False-y. - self.assertEqual(pb.path[0].kind, '') - - def test_is_partial_no_name_or_id(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - self.assertTrue(key.is_partial) - - def test_is_partial_w_id(self): - _ID = 1234 - key = self._makeOne('KIND', _ID, project=self._DEFAULT_PROJECT) - self.assertFalse(key.is_partial) - - def test_is_partial_w_name(self): - _NAME = 'NAME' - key = self._makeOne('KIND', _NAME, project=self._DEFAULT_PROJECT) - self.assertFalse(key.is_partial) - - def test_id_or_name_no_name_or_id(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - self.assertEqual(key.id_or_name, None) - - def test_id_or_name_no_name_or_id_child(self): - key = self._makeOne('KIND1', 1234, 'KIND2', - project=self._DEFAULT_PROJECT) - self.assertEqual(key.id_or_name, None) - - def test_id_or_name_w_id_only(self): - _ID = 1234 - key = self._makeOne('KIND', _ID, project=self._DEFAULT_PROJECT) - self.assertEqual(key.id_or_name, _ID) - - def test_id_or_name_w_name_only(self): - _NAME = 'NAME' - key = self._makeOne('KIND', _NAME, project=self._DEFAULT_PROJECT) - self.assertEqual(key.id_or_name, _NAME) - - def test_parent_default(self): - key = self._makeOne('KIND', project=self._DEFAULT_PROJECT) - self.assertEqual(key.parent, None) - - def test_parent_explicit_top_level(self): - key = self._makeOne('KIND', 1234, project=self._DEFAULT_PROJECT) - self.assertEqual(key.parent, None) - - def test_parent_explicit_nested(self): - _PARENT_KIND = 'KIND1' - _PARENT_ID = 1234 - _PARENT_PATH = [{'kind': _PARENT_KIND, 'id': _PARENT_ID}] - key = self._makeOne(_PARENT_KIND, _PARENT_ID, 'KIND2', - project=self._DEFAULT_PROJECT) - self.assertEqual(key.parent.path, _PARENT_PATH) - - def test_parent_multiple_calls(self): - _PARENT_KIND = 'KIND1' - _PARENT_ID = 1234 - _PARENT_PATH = [{'kind': _PARENT_KIND, 'id': _PARENT_ID}] - key = self._makeOne(_PARENT_KIND, _PARENT_ID, 'KIND2', - project=self._DEFAULT_PROJECT) - parent = key.parent - self.assertEqual(parent.path, _PARENT_PATH) - new_parent = key.parent - self.assertTrue(parent is new_parent) diff --git a/gcloud/datastore/test_query.py b/gcloud/datastore/test_query.py deleted file mode 100644 index 4108de7acd2e..000000000000 --- a/gcloud/datastore/test_query.py +++ /dev/null @@ -1,692 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestQuery(unittest2.TestCase): - - _PROJECT = 'PROJECT' - - def _getTargetClass(self): - from gcloud.datastore.query import Query - return Query - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _makeClient(self, connection=None): - if connection is None: - connection = _Connection() - return _Client(self._PROJECT, connection) - - def test_ctor_defaults(self): - client = self._makeClient() - query = self._makeOne(client) - self.assertTrue(query._client is client) - self.assertEqual(query.project, client.project) - self.assertEqual(query.kind, None) - self.assertEqual(query.namespace, client.namespace) - self.assertEqual(query.ancestor, None) - self.assertEqual(query.filters, []) - self.assertEqual(query.projection, []) - self.assertEqual(query.order, []) - self.assertEqual(query.distinct_on, []) - - def test_ctor_explicit(self): - from gcloud.datastore.key import Key - _PROJECT = 'OTHER_PROJECT' - _KIND = 'KIND' - _NAMESPACE = 'OTHER_NAMESPACE' - client = self._makeClient() - ancestor = Key('ANCESTOR', 123, project=_PROJECT) - FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)] - PROJECTION = ['foo', 'bar', 'baz'] - ORDER = ['foo', 'bar'] - DISTINCT_ON = ['foo'] - query = self._makeOne( - client, - kind=_KIND, - project=_PROJECT, - namespace=_NAMESPACE, - ancestor=ancestor, - filters=FILTERS, - projection=PROJECTION, - order=ORDER, - distinct_on=DISTINCT_ON, - ) - self.assertTrue(query._client is client) - self.assertEqual(query.project, _PROJECT) - self.assertEqual(query.kind, _KIND) - self.assertEqual(query.namespace, _NAMESPACE) - self.assertEqual(query.ancestor.path, ancestor.path) - self.assertEqual(query.filters, FILTERS) - self.assertEqual(query.projection, PROJECTION) - self.assertEqual(query.order, ORDER) - self.assertEqual(query.distinct_on, DISTINCT_ON) - - def test_ctor_bad_projection(self): - BAD_PROJECTION = object() - self.assertRaises(TypeError, self._makeOne, self._makeClient(), - projection=BAD_PROJECTION) - - def test_ctor_bad_order(self): - BAD_ORDER = object() - self.assertRaises(TypeError, self._makeOne, self._makeClient(), - order=BAD_ORDER) - - def test_ctor_bad_distinct_on(self): - BAD_DISTINCT_ON = object() - self.assertRaises(TypeError, self._makeOne, self._makeClient(), - distinct_on=BAD_DISTINCT_ON) - - def test_ctor_bad_filters(self): - FILTERS_CANT_UNPACK = [('one', 'two')] - self.assertRaises(ValueError, self._makeOne, self._makeClient(), - filters=FILTERS_CANT_UNPACK) - - def test_namespace_setter_w_non_string(self): - query = self._makeOne(self._makeClient()) - - def _assign(val): - query.namespace = val - - self.assertRaises(ValueError, _assign, object()) - - def test_namespace_setter(self): - _NAMESPACE = 'OTHER_NAMESPACE' - query = self._makeOne(self._makeClient()) - query.namespace = _NAMESPACE - self.assertEqual(query.namespace, _NAMESPACE) - - def test_kind_setter_w_non_string(self): - query = self._makeOne(self._makeClient()) - - def _assign(val): - query.kind = val - - self.assertRaises(TypeError, _assign, object()) - - def test_kind_setter_wo_existing(self): - _KIND = 'KIND' - query = self._makeOne(self._makeClient()) - query.kind = _KIND - self.assertEqual(query.kind, _KIND) - - def test_kind_setter_w_existing(self): - _KIND_BEFORE = 'KIND_BEFORE' - _KIND_AFTER = 'KIND_AFTER' - query = self._makeOne(self._makeClient(), kind=_KIND_BEFORE) - self.assertEqual(query.kind, _KIND_BEFORE) - query.kind = _KIND_AFTER - self.assertEqual(query.project, self._PROJECT) - self.assertEqual(query.kind, _KIND_AFTER) - - def test_ancestor_setter_w_non_key(self): - query = self._makeOne(self._makeClient()) - - def _assign(val): - query.ancestor = val - - self.assertRaises(TypeError, _assign, object()) - self.assertRaises(TypeError, _assign, ['KIND', 'NAME']) - - def test_ancestor_setter_w_key(self): - from gcloud.datastore.key import Key - _NAME = u'NAME' - key = Key('KIND', 123, project=self._PROJECT) - query = self._makeOne(self._makeClient()) - query.add_filter('name', '=', _NAME) - query.ancestor = key - self.assertEqual(query.ancestor.path, key.path) - - def test_ancestor_deleter_w_key(self): - from gcloud.datastore.key import Key - key = Key('KIND', 123, project=self._PROJECT) - query = self._makeOne(client=self._makeClient(), ancestor=key) - del query.ancestor - self.assertTrue(query.ancestor is None) - - def test_add_filter_setter_w_unknown_operator(self): - query = self._makeOne(self._makeClient()) - self.assertRaises(ValueError, query.add_filter, - 'firstname', '~~', 'John') - - def test_add_filter_w_known_operator(self): - query = self._makeOne(self._makeClient()) - query.add_filter('firstname', '=', u'John') - self.assertEqual(query.filters, [('firstname', '=', u'John')]) - - def test_add_filter_w_all_operators(self): - query = self._makeOne(self._makeClient()) - query.add_filter('leq_prop', '<=', u'val1') - query.add_filter('geq_prop', '>=', u'val2') - query.add_filter('lt_prop', '<', u'val3') - query.add_filter('gt_prop', '>', u'val4') - query.add_filter('eq_prop', '=', u'val5') - self.assertEqual(len(query.filters), 5) - self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1')) - self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2')) - self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3')) - self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4')) - self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5')) - - def test_add_filter_w_known_operator_and_entity(self): - from gcloud.datastore.entity import Entity - query = self._makeOne(self._makeClient()) - other = Entity() - other['firstname'] = u'John' - other['lastname'] = u'Smith' - query.add_filter('other', '=', other) - self.assertEqual(query.filters, [('other', '=', other)]) - - def test_add_filter_w_whitespace_property_name(self): - query = self._makeOne(self._makeClient()) - PROPERTY_NAME = ' property with lots of space ' - query.add_filter(PROPERTY_NAME, '=', u'John') - self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')]) - - def test_add_filter___key__valid_key(self): - from gcloud.datastore.key import Key - query = self._makeOne(self._makeClient()) - key = Key('Foo', project=self._PROJECT) - query.add_filter('__key__', '=', key) - self.assertEqual(query.filters, [('__key__', '=', key)]) - - def test_filter___key__not_equal_operator(self): - from gcloud.datastore.key import Key - key = Key('Foo', project=self._PROJECT) - query = self._makeOne(self._makeClient()) - query.add_filter('__key__', '<', key) - self.assertEqual(query.filters, [('__key__', '<', key)]) - - def test_filter___key__invalid_value(self): - query = self._makeOne(self._makeClient()) - self.assertRaises(ValueError, query.add_filter, '__key__', '=', None) - - def test_projection_setter_empty(self): - query = self._makeOne(self._makeClient()) - query.projection = [] - self.assertEqual(query.projection, []) - - def test_projection_setter_string(self): - query = self._makeOne(self._makeClient()) - query.projection = 'field1' - self.assertEqual(query.projection, ['field1']) - - def test_projection_setter_non_empty(self): - query = self._makeOne(self._makeClient()) - query.projection = ['field1', 'field2'] - self.assertEqual(query.projection, ['field1', 'field2']) - - def test_projection_setter_multiple_calls(self): - _PROJECTION1 = ['field1', 'field2'] - _PROJECTION2 = ['field3'] - query = self._makeOne(self._makeClient()) - query.projection = _PROJECTION1 - self.assertEqual(query.projection, _PROJECTION1) - query.projection = _PROJECTION2 - self.assertEqual(query.projection, _PROJECTION2) - - def test_keys_only(self): - query = self._makeOne(self._makeClient()) - query.keys_only() - self.assertEqual(query.projection, ['__key__']) - - def test_key_filter_defaults(self): - from gcloud.datastore.key import Key - - client = self._makeClient() - query = self._makeOne(client) - self.assertEqual(query.filters, []) - key = Key('Kind', 1234, project='project') - query.key_filter(key) - self.assertEqual(query.filters, [('__key__', '=', key)]) - - def test_key_filter_explicit(self): - from gcloud.datastore.key import Key - - client = self._makeClient() - query = self._makeOne(client) - self.assertEqual(query.filters, []) - key = Key('Kind', 1234, project='project') - query.key_filter(key, operator='>') - self.assertEqual(query.filters, [('__key__', '>', key)]) - - def test_order_setter_empty(self): - query = self._makeOne(self._makeClient(), order=['foo', '-bar']) - query.order = [] - self.assertEqual(query.order, []) - - def test_order_setter_string(self): - query = self._makeOne(self._makeClient()) - query.order = 'field' - self.assertEqual(query.order, ['field']) - - def test_order_setter_single_item_list_desc(self): - query = self._makeOne(self._makeClient()) - query.order = ['-field'] - self.assertEqual(query.order, ['-field']) - - def test_order_setter_multiple(self): - query = self._makeOne(self._makeClient()) - query.order = ['foo', '-bar'] - self.assertEqual(query.order, ['foo', '-bar']) - - def test_distinct_on_setter_empty(self): - query = self._makeOne(self._makeClient(), distinct_on=['foo', 'bar']) - query.distinct_on = [] - self.assertEqual(query.distinct_on, []) - - def test_distinct_on_setter_string(self): - query = self._makeOne(self._makeClient()) - query.distinct_on = 'field1' - self.assertEqual(query.distinct_on, ['field1']) - - def test_distinct_on_setter_non_empty(self): - query = self._makeOne(self._makeClient()) - query.distinct_on = ['field1', 'field2'] - self.assertEqual(query.distinct_on, ['field1', 'field2']) - - def test_distinct_on_multiple_calls(self): - _DISTINCT_ON1 = ['field1', 'field2'] - _DISTINCT_ON2 = ['field3'] - query = self._makeOne(self._makeClient()) - query.distinct_on = _DISTINCT_ON1 - self.assertEqual(query.distinct_on, _DISTINCT_ON1) - query.distinct_on = _DISTINCT_ON2 - self.assertEqual(query.distinct_on, _DISTINCT_ON2) - - def test_fetch_defaults_w_client_attr(self): - connection = _Connection() - client = self._makeClient(connection) - query = self._makeOne(client) - iterator = query.fetch() - self.assertTrue(iterator._query is query) - self.assertTrue(iterator._client is client) - self.assertEqual(iterator._limit, None) - self.assertEqual(iterator._offset, 0) - - def test_fetch_w_explicit_client(self): - connection = _Connection() - client = self._makeClient(connection) - other_client = self._makeClient(connection) - query = self._makeOne(client) - iterator = query.fetch(limit=7, offset=8, client=other_client) - self.assertTrue(iterator._query is query) - self.assertTrue(iterator._client is other_client) - self.assertEqual(iterator._limit, 7) - self.assertEqual(iterator._offset, 8) - - -class TestIterator(unittest2.TestCase): - _PROJECT = 'PROJECT' - _NAMESPACE = 'NAMESPACE' - _KIND = 'KIND' - _ID = 123 - _START = b'\x00' - _END = b'\xFF' - - def _getTargetClass(self): - from gcloud.datastore.query import Iterator - return Iterator - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _addQueryResults(self, connection, cursor=_END, more=False): - from gcloud.datastore._generated import entity_pb2 - from gcloud.datastore._generated import query_pb2 - from gcloud.datastore.helpers import _new_value_pb - - MORE = query_pb2.QueryResultBatch.NOT_FINISHED - NO_MORE = query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT - _ID = 123 - entity_pb = entity_pb2.Entity() - entity_pb.key.partition_id.project_id = self._PROJECT - path_element = entity_pb.key.path.add() - path_element.kind = self._KIND - path_element.id = _ID - value_pb = _new_value_pb(entity_pb, 'foo') - value_pb.string_value = u'Foo' - connection._results.append( - ([entity_pb], cursor, MORE if more else NO_MORE)) - - def _makeClient(self, connection=None): - if connection is None: - connection = _Connection() - return _Client(self._PROJECT, connection) - - def test_ctor_defaults(self): - connection = _Connection() - query = object() - iterator = self._makeOne(query, connection) - self.assertTrue(iterator._query is query) - self.assertEqual(iterator._limit, None) - self.assertEqual(iterator._offset, 0) - - def test_ctor_explicit(self): - client = self._makeClient() - query = _Query(client) - iterator = self._makeOne(query, client, 13, 29) - self.assertTrue(iterator._query is query) - self.assertEqual(iterator._limit, 13) - self.assertEqual(iterator._offset, 29) - - def test_next_page_no_cursors_no_more(self): - from gcloud.datastore.query import _pb_from_query - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection, cursor=b'') - iterator = self._makeOne(query, client) - entities, more_results, cursor = iterator.next_page() - - self.assertEqual(cursor, None) - self.assertFalse(more_results) - self.assertFalse(iterator._more_results) - self.assertEqual(len(entities), 1) - self.assertEqual(entities[0].key.path, - [{'kind': self._KIND, 'id': self._ID}]) - self.assertEqual(entities[0]['foo'], u'Foo') - qpb = _pb_from_query(query) - qpb.offset = 0 - EXPECTED = { - 'project': self._PROJECT, - 'query_pb': qpb, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - self.assertEqual(connection._called_with, [EXPECTED]) - - def test_next_page_no_cursors_no_more_w_offset_and_limit(self): - from gcloud.datastore.query import _pb_from_query - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection, cursor=b'') - iterator = self._makeOne(query, client, 13, 29) - entities, more_results, cursor = iterator.next_page() - - self.assertEqual(cursor, None) - self.assertFalse(more_results) - self.assertFalse(iterator._more_results) - self.assertEqual(len(entities), 1) - self.assertEqual(entities[0].key.path, - [{'kind': self._KIND, 'id': self._ID}]) - self.assertEqual(entities[0]['foo'], u'Foo') - qpb = _pb_from_query(query) - qpb.limit.value = 13 - qpb.offset = 29 - EXPECTED = { - 'project': self._PROJECT, - 'query_pb': qpb, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - self.assertEqual(connection._called_with, [EXPECTED]) - - def test_next_page_w_cursors_w_more(self): - from base64 import urlsafe_b64decode - from base64 import urlsafe_b64encode - from gcloud.datastore.query import _pb_from_query - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection, cursor=self._END, more=True) - iterator = self._makeOne(query, client) - iterator._start_cursor = self._START - iterator._end_cursor = self._END - entities, more_results, cursor = iterator.next_page() - - self.assertEqual(cursor, urlsafe_b64encode(self._END)) - self.assertTrue(more_results) - self.assertTrue(iterator._more_results) - self.assertEqual(iterator._end_cursor, None) - self.assertEqual(urlsafe_b64decode(iterator._start_cursor), self._END) - self.assertEqual(len(entities), 1) - self.assertEqual(entities[0].key.path, - [{'kind': self._KIND, 'id': self._ID}]) - self.assertEqual(entities[0]['foo'], u'Foo') - qpb = _pb_from_query(query) - qpb.offset = 0 - qpb.start_cursor = urlsafe_b64decode(self._START) - qpb.end_cursor = urlsafe_b64decode(self._END) - EXPECTED = { - 'project': self._PROJECT, - 'query_pb': qpb, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - self.assertEqual(connection._called_with, [EXPECTED]) - - def test_next_page_w_cursors_w_bogus_more(self): - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection, cursor=self._END, more=True) - epb, cursor, _ = connection._results.pop() - connection._results.append((epb, cursor, 4)) # invalid enum - iterator = self._makeOne(query, client) - self.assertRaises(ValueError, iterator.next_page) - - def test___iter___no_more(self): - from gcloud.datastore.query import _pb_from_query - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection) - iterator = self._makeOne(query, client) - entities = list(iterator) - - self.assertFalse(iterator._more_results) - self.assertEqual(len(entities), 1) - self.assertEqual(entities[0].key.path, - [{'kind': self._KIND, 'id': self._ID}]) - self.assertEqual(entities[0]['foo'], u'Foo') - qpb = _pb_from_query(query) - qpb.offset = 0 - EXPECTED = { - 'project': self._PROJECT, - 'query_pb': qpb, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - self.assertEqual(connection._called_with, [EXPECTED]) - - def test___iter___w_more(self): - from gcloud.datastore.query import _pb_from_query - connection = _Connection() - client = self._makeClient(connection) - query = _Query(client, self._KIND, self._PROJECT, self._NAMESPACE) - self._addQueryResults(connection, cursor=self._END, more=True) - self._addQueryResults(connection) - iterator = self._makeOne(query, client) - entities = list(iterator) - - self.assertFalse(iterator._more_results) - self.assertEqual(len(entities), 2) - for entity in entities: - self.assertEqual( - entity.key.path, - [{'kind': self._KIND, 'id': self._ID}]) - self.assertEqual(entities[1]['foo'], u'Foo') - qpb1 = _pb_from_query(query) - qpb1.offset = 0 - qpb2 = _pb_from_query(query) - qpb2.offset = 0 - qpb2.start_cursor = self._END - EXPECTED1 = { - 'project': self._PROJECT, - 'query_pb': qpb1, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - EXPECTED2 = { - 'project': self._PROJECT, - 'query_pb': qpb2, - 'namespace': self._NAMESPACE, - 'transaction_id': None, - } - self.assertEqual(len(connection._called_with), 2) - self.assertEqual(connection._called_with[0], EXPECTED1) - self.assertEqual(connection._called_with[1], EXPECTED2) - - -class Test__pb_from_query(unittest2.TestCase): - - def _callFUT(self, query): - from gcloud.datastore.query import _pb_from_query - return _pb_from_query(query) - - def test_empty(self): - from gcloud.datastore._generated import query_pb2 - - pb = self._callFUT(_Query()) - self.assertEqual(list(pb.projection), []) - self.assertEqual(list(pb.kind), []) - self.assertEqual(list(pb.order), []) - self.assertEqual(list(pb.distinct_on), []) - self.assertEqual(pb.filter.property_filter.property.name, '') - cfilter = pb.filter.composite_filter - self.assertEqual(cfilter.op, - query_pb2.CompositeFilter.OPERATOR_UNSPECIFIED) - self.assertEqual(list(cfilter.filters), []) - self.assertEqual(pb.start_cursor, b'') - self.assertEqual(pb.end_cursor, b'') - self.assertEqual(pb.limit.value, 0) - self.assertEqual(pb.offset, 0) - - def test_projection(self): - pb = self._callFUT(_Query(projection=['a', 'b', 'c'])) - self.assertEqual([item.property.name for item in pb.projection], - ['a', 'b', 'c']) - - def test_kind(self): - pb = self._callFUT(_Query(kind='KIND')) - self.assertEqual([item.name for item in pb.kind], ['KIND']) - - def test_ancestor(self): - from gcloud.datastore.key import Key - from gcloud.datastore._generated import query_pb2 - - ancestor = Key('Ancestor', 123, project='PROJECT') - pb = self._callFUT(_Query(ancestor=ancestor)) - cfilter = pb.filter.composite_filter - self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND) - self.assertEqual(len(cfilter.filters), 1) - pfilter = cfilter.filters[0].property_filter - self.assertEqual(pfilter.property.name, '__key__') - ancestor_pb = ancestor.to_protobuf() - self.assertEqual(pfilter.value.key_value, ancestor_pb) - - def test_filter(self): - from gcloud.datastore._generated import query_pb2 - - query = _Query(filters=[('name', '=', u'John')]) - query.OPERATORS = { - '=': query_pb2.PropertyFilter.EQUAL, - } - pb = self._callFUT(query) - cfilter = pb.filter.composite_filter - self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND) - self.assertEqual(len(cfilter.filters), 1) - pfilter = cfilter.filters[0].property_filter - self.assertEqual(pfilter.property.name, 'name') - self.assertEqual(pfilter.value.string_value, u'John') - - def test_filter_key(self): - from gcloud.datastore.key import Key - from gcloud.datastore._generated import query_pb2 - - key = Key('Kind', 123, project='PROJECT') - query = _Query(filters=[('__key__', '=', key)]) - query.OPERATORS = { - '=': query_pb2.PropertyFilter.EQUAL, - } - pb = self._callFUT(query) - cfilter = pb.filter.composite_filter - self.assertEqual(cfilter.op, query_pb2.CompositeFilter.AND) - self.assertEqual(len(cfilter.filters), 1) - pfilter = cfilter.filters[0].property_filter - self.assertEqual(pfilter.property.name, '__key__') - key_pb = key.to_protobuf() - self.assertEqual(pfilter.value.key_value, key_pb) - - def test_order(self): - from gcloud.datastore._generated import query_pb2 - - pb = self._callFUT(_Query(order=['a', '-b', 'c'])) - self.assertEqual([item.property.name for item in pb.order], - ['a', 'b', 'c']) - self.assertEqual([item.direction for item in pb.order], - [query_pb2.PropertyOrder.ASCENDING, - query_pb2.PropertyOrder.DESCENDING, - query_pb2.PropertyOrder.ASCENDING]) - - def test_distinct_on(self): - pb = self._callFUT(_Query(distinct_on=['a', 'b', 'c'])) - self.assertEqual([item.name for item in pb.distinct_on], - ['a', 'b', 'c']) - - -class _Query(object): - - def __init__(self, - client=object(), - kind=None, - project=None, - namespace=None, - ancestor=None, - filters=(), - projection=(), - order=(), - distinct_on=()): - self._client = client - self.kind = kind - self.project = project - self.namespace = namespace - self.ancestor = ancestor - self.filters = filters - self.projection = projection - self.order = order - self.distinct_on = distinct_on - - -class _Connection(object): - - _called_with = None - _cursor = b'\x00' - _skipped = 0 - - def __init__(self): - self._results = [] - self._called_with = [] - - def run_query(self, **kw): - self._called_with.append(kw) - result, self._results = self._results[0], self._results[1:] - return result - - -class _Client(object): - - def __init__(self, project, connection, namespace=None): - self.project = project - self.connection = connection - self.namespace = namespace - - @property - def current_transaction(self): - pass diff --git a/gcloud/datastore/test_transaction.py b/gcloud/datastore/test_transaction.py deleted file mode 100644 index 5f780f63388a..000000000000 --- a/gcloud/datastore/test_transaction.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestTransaction(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.datastore.transaction import Transaction - return Transaction - - def _makeOne(self, client, **kw): - return self._getTargetClass()(client, **kw) - - def test_ctor_defaults(self): - from gcloud.datastore._generated import datastore_pb2 - - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - self.assertEqual(xact.project, _PROJECT) - self.assertEqual(xact.connection, connection) - self.assertEqual(xact.id, None) - self.assertEqual(xact._status, self._getTargetClass()._INITIAL) - self.assertTrue(isinstance(xact._commit_request, - datastore_pb2.CommitRequest)) - self.assertTrue(xact.mutations is xact._commit_request.mutations) - self.assertEqual(len(xact._partial_key_entities), 0) - - def test_current(self): - from gcloud.datastore.test_client import _NoCommitBatch - _PROJECT = 'PROJECT' - connection = _Connection() - client = _Client(_PROJECT, connection) - xact1 = self._makeOne(client) - xact2 = self._makeOne(client) - self.assertTrue(xact1.current() is None) - self.assertTrue(xact2.current() is None) - with xact1: - self.assertTrue(xact1.current() is xact1) - self.assertTrue(xact2.current() is xact1) - with _NoCommitBatch(client): - self.assertTrue(xact1.current() is None) - self.assertTrue(xact2.current() is None) - with xact2: - self.assertTrue(xact1.current() is xact2) - self.assertTrue(xact2.current() is xact2) - with _NoCommitBatch(client): - self.assertTrue(xact1.current() is None) - self.assertTrue(xact2.current() is None) - self.assertTrue(xact1.current() is xact1) - self.assertTrue(xact2.current() is xact1) - self.assertTrue(xact1.current() is None) - self.assertTrue(xact2.current() is None) - - def test_begin(self): - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact.begin() - self.assertEqual(xact.id, 234) - self.assertEqual(connection._begun, _PROJECT) - - def test_begin_tombstoned(self): - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact.begin() - self.assertEqual(xact.id, 234) - self.assertEqual(connection._begun, _PROJECT) - - xact.rollback() - self.assertEqual(xact.id, None) - - self.assertRaises(ValueError, xact.begin) - - def test_rollback(self): - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact.begin() - xact.rollback() - self.assertEqual(xact.id, None) - self.assertEqual(connection._rolled_back, (_PROJECT, 234)) - - def test_commit_no_partial_keys(self): - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact._commit_request = commit_request = object() - xact.begin() - xact.commit() - self.assertEqual(connection._committed, - (_PROJECT, commit_request, 234)) - self.assertEqual(xact.id, None) - - def test_commit_w_partial_keys(self): - _PROJECT = 'PROJECT' - _KIND = 'KIND' - _ID = 123 - connection = _Connection(234) - connection._completed_keys = [_make_key(_KIND, _ID, _PROJECT)] - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - entity = _Entity() - xact.put(entity) - xact._commit_request = commit_request = object() - xact.begin() - xact.commit() - self.assertEqual(connection._committed, - (_PROJECT, commit_request, 234)) - self.assertEqual(xact.id, None) - self.assertEqual(entity.key.path, [{'kind': _KIND, 'id': _ID}]) - - def test_context_manager_no_raise(self): - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact._commit_request = commit_request = object() - with xact: - self.assertEqual(xact.id, 234) - self.assertEqual(connection._begun, _PROJECT) - self.assertEqual(connection._committed, - (_PROJECT, commit_request, 234)) - self.assertEqual(xact.id, None) - - def test_context_manager_w_raise(self): - - class Foo(Exception): - pass - - _PROJECT = 'PROJECT' - connection = _Connection(234) - client = _Client(_PROJECT, connection) - xact = self._makeOne(client) - xact._mutation = object() - try: - with xact: - self.assertEqual(xact.id, 234) - self.assertEqual(connection._begun, _PROJECT) - raise Foo() - except Foo: - self.assertEqual(xact.id, None) - self.assertEqual(connection._rolled_back, (_PROJECT, 234)) - self.assertEqual(connection._committed, None) - self.assertEqual(xact.id, None) - - -def _make_key(kind, id_, project): - from gcloud.datastore._generated import entity_pb2 - - key = entity_pb2.Key() - key.partition_id.project_id = project - elem = key.path.add() - elem.kind = kind - elem.id = id_ - return key - - -class _Connection(object): - _marker = object() - _begun = _rolled_back = _committed = None - - def __init__(self, xact_id=123): - self._xact_id = xact_id - self._completed_keys = [] - self._index_updates = 0 - - def begin_transaction(self, project): - self._begun = project - return self._xact_id - - def rollback(self, project, transaction_id): - self._rolled_back = project, transaction_id - - def commit(self, project, commit_request, transaction_id): - self._committed = (project, commit_request, transaction_id) - return self._index_updates, self._completed_keys - - -class _Entity(dict): - - def __init__(self): - super(_Entity, self).__init__() - from gcloud.datastore.key import Key - self.key = Key('KIND', project='PROJECT') - - -class _Client(object): - - def __init__(self, project, connection, namespace=None): - self.project = project - self.connection = connection - self.namespace = namespace - self._batches = [] - - def _push_batch(self, batch): - self._batches.insert(0, batch) - - def _pop_batch(self): - return self._batches.pop(0) - - @property - def current_batch(self): - return self._batches and self._batches[0] or None diff --git a/gcloud/datastore/transaction.py b/gcloud/datastore/transaction.py deleted file mode 100644 index dc78c7ba99f9..000000000000 --- a/gcloud/datastore/transaction.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud datastore transactions.""" - -from gcloud.datastore.batch import Batch - - -class Transaction(Batch): - """An abstraction representing datastore Transactions. - - Transactions can be used to build up a bulk mutation and ensure all - or none succeed (transactionally). - - For example, the following snippet of code will put the two ``save`` - operations (either ``insert`` or ``upsert``) into the same - mutation, and execute those within a transaction:: - - >>> from gcloud import datastore - >>> client = datastore.Client() - >>> with client.transaction(): - ... client.put_multi([entity1, entity2]) - - Because it derives from :class:`Batch <.datastore.batch.Batch>`, - :class:`Transaction` also provides :meth:`put` and :meth:`delete` methods:: - - >>> with client.transaction() as xact: - ... xact.put(entity1) - ... xact.delete(entity2.key) - - By default, the transaction is rolled back if the transaction block - exits with an error:: - - >>> with client.transaction(): - ... do_some_work() - ... raise SomeException() # rolls back - - If the transaction block exists without an exception, it will commit - by default. - - .. warning:: Inside a transaction, automatically assigned IDs for - entities will not be available at save time! That means, if you - try:: - - >>> with client.transaction(): - ... entity = datastore.Entity(key=client.key('Thing')) - ... client.put(entity) - - ``entity`` won't have a complete key until the transaction is - committed. - - Once you exit the transaction (or call :meth:`commit`), the - automatically generated ID will be assigned to the entity:: - - >>> with client.transaction(): - ... entity = datastore.Entity(key=client.key('Thing')) - ... client.put(entity) - ... print(entity.key.is_partial) # There is no ID on this key. - ... - True - >>> print(entity.key.is_partial) # There *is* an ID. - False - - If you don't want to use the context manager you can initialize a - transaction manually:: - - >>> transaction = client.transaction() - >>> transaction.begin() - >>> - >>> entity = datastore.Entity(key=client.key('Thing')) - >>> transaction.put(entity) - >>> - >>> if error: - ... transaction.rollback() - ... else: - ... transaction.commit() - - :type client: :class:`gcloud.datastore.client.Client` - :param client: the client used to connect to datastore. - """ - - def __init__(self, client): - super(Transaction, self).__init__(client) - self._id = None - - @property - def id(self): - """Getter for the transaction ID. - - :rtype: string - :returns: The ID of the current transaction. - """ - return self._id - - def current(self): - """Return the topmost transaction. - - .. note:: - - If the topmost element on the stack is not a transaction, - returns None. - - :rtype: :class:`gcloud.datastore.transaction.Transaction` or None - """ - top = super(Transaction, self).current() - if isinstance(top, Transaction): - return top - - def begin(self): - """Begins a transaction. - - This method is called automatically when entering a with - statement, however it can be called explicitly if you don't want - to use a context manager. - - :raises: :class:`ValueError` if the transaction has already begun. - """ - super(Transaction, self).begin() - self._id = self.connection.begin_transaction(self.project) - - def rollback(self): - """Rolls back the current transaction. - - This method has necessary side-effects: - - - Sets the current connection's transaction reference to None. - - Sets the current transaction's ID to None. - """ - try: - self.connection.rollback(self.project, self._id) - finally: - super(Transaction, self).rollback() - # Clear our own ID in case this gets accidentally reused. - self._id = None - - def commit(self): - """Commits the transaction. - - This is called automatically upon exiting a with statement, - however it can be called explicitly if you don't want to use a - context manager. - - This method has necessary side-effects: - - - Sets the current transaction's ID to None. - """ - try: - super(Transaction, self).commit() - finally: - # Clear our own ID in case this gets accidentally reused. - self._id = None diff --git a/gcloud/dns/__init__.py b/gcloud/dns/__init__.py deleted file mode 100644 index f92c143d0277..000000000000 --- a/gcloud/dns/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud DNS API wrapper. - -The main concepts with this API are: - -- :class:`gcloud.DNS.zone.ManagedZone` represents an collection of tables. -- :class:`gcloud.DNS.resource_record_set.ResourceRecordSet` represents a - single resource definition within a zone. -- :class:`gcloud.DNS.changes.Changes` represents a set of changes (adding/ - deleting resource record sets) to a zone. -""" - -from gcloud.dns.zone import Changes -from gcloud.dns.client import Client -from gcloud.dns.connection import Connection -from gcloud.dns.zone import ManagedZone -from gcloud.dns.resource_record_set import ResourceRecordSet - - -SCOPE = Connection.SCOPE diff --git a/gcloud/dns/changes.py b/gcloud/dns/changes.py deleted file mode 100644 index e3e05e723397..000000000000 --- a/gcloud/dns/changes.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API ResourceRecordSets.""" - -import six - -from gcloud._helpers import _rfc3339_to_datetime -from gcloud.exceptions import NotFound -from gcloud.dns.resource_record_set import ResourceRecordSet - - -class Changes(object): - """Changes are bundled additions / deletions of DNS resource records. - - Changes are owned by a :class:`gcloud.dns.zone.ManagedZone` instance. - - See: - https://cloud.google.com/dns/api/v1/changes - - :type zone: :class:`gcloud.dns.zone.ManagedZone` - :param zone: A zone which holds one or more record sets. - """ - - def __init__(self, zone): - self.zone = zone - self._properties = {} - self._additions = self._deletions = () - - @classmethod - def from_api_repr(cls, resource, zone): - """Factory: construct a change set given its API representation - - :type resource: dict - :param resource: change set representation returned from the API - - :type zone: :class:`gcloud.dns.zone.ManagedZone` - :param zone: A zone which holds zero or more change sets. - - :rtype: :class:`gcloud.dns.changes.Changes` - :returns: RRS parsed from ``resource``. - """ - changes = cls(zone=zone) - changes._set_properties(resource) - return changes - - def _set_properties(self, resource): - """Helper method for :meth:`from_api_repr`, :meth:`create`, etc. - - :type resource: dict - :param resource: change set representation returned from the API - """ - resource = resource.copy() - self._additions = tuple([ - ResourceRecordSet.from_api_repr(added_res, self.zone) - for added_res in resource.pop('additions', ())]) - self._deletions = tuple([ - ResourceRecordSet.from_api_repr(added_res, self.zone) - for added_res in resource.pop('deletions', ())]) - self._properties = resource - - @property - def path(self): - """URL path for change set APIs. - - :rtype: string - :returns: the path based on project, zone, and change set names. - """ - return '/projects/%s/managedZones/%s/changes/%s' % ( - self.zone.project, self.zone.name, self.name) - - @property - def name(self): - """Name of the change set. - - :rtype: string or ``NoneType`` - :returns: Name, as set by the back-end, or None. - """ - return self._properties.get('id') - - @name.setter - def name(self, value): - """Update name of the change set. - - :type value: string - :param value: New name for the changeset. - """ - if not isinstance(value, six.string_types): - raise ValueError("Pass a string") - self._properties['id'] = value - - @property - def status(self): - """Status of the change set. - - :rtype: string or ``NoneType`` - :returns: Status, as set by the back-end, or None. - """ - return self._properties.get('status') - - @property - def started(self): - """Time when the change set was started. - - :rtype: ``datetime.datetime`` or ``NoneType`` - :returns: Time, as set by the back-end, or None. - """ - stamp = self._properties.get('startTime') - if stamp is not None: - return _rfc3339_to_datetime(stamp) - - @property - def additions(self): - """Resource record sets to be added to the zone. - - :rtype: sequence of - :class:`gcloud.dns.resource_record_set.ResourceRecordSet`. - :returns: record sets appended via :meth:`add_record_set` - """ - return self._additions - - @property - def deletions(self): - """Resource record sets to be deleted from the zone. - - :rtype: sequence of - :class:`gcloud.dns.resource_record_set.ResourceRecordSet`. - :returns: record sets appended via :meth:`delete_record_set` - """ - return self._deletions - - def add_record_set(self, record_set): - """Append a record set to the 'additions' for the change set. - - :type record_set: - :class:`gcloud.dns.resource_record_set.ResourceRecordSet` - :param record_set: the record set to append - - :raises: ``ValueError`` if ``record_set`` is not of the required type. - """ - if not isinstance(record_set, ResourceRecordSet): - raise ValueError("Pass a ResourceRecordSet") - self._additions += (record_set,) - - def delete_record_set(self, record_set): - """Append a record set to the 'deletions' for the change set. - - :type record_set: - :class:`gcloud.dns.resource_record_set.ResourceRecordSet` - :param record_set: the record set to append - - :raises: ``ValueError`` if ``record_set`` is not of the required type. - """ - if not isinstance(record_set, ResourceRecordSet): - raise ValueError("Pass a ResourceRecordSet") - self._deletions += (record_set,) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - - :rtype: :class:`gcloud.dns.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.zone._client - return client - - def _build_resource(self): - """Generate a resource for ``create``.""" - additions = [{ - 'name': added.name, - 'type': added.record_type, - 'ttl': str(added.ttl), - 'rrdatas': added.rrdatas, - } for added in self.additions] - - deletions = [{ - 'name': deleted.name, - 'type': deleted.record_type, - 'ttl': str(deleted.ttl), - 'rrdatas': deleted.rrdatas, - } for deleted in self.deletions] - - return { - 'additions': additions, - 'deletions': deletions, - } - - def create(self, client=None): - """API call: create the change set via a POST request - - See: - https://cloud.google.com/dns/api/v1/changes/create - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - if len(self.additions) == 0 and len(self.deletions) == 0: - raise ValueError("No record sets added or deleted") - client = self._require_client(client) - path = '/projects/%s/managedZones/%s/changes' % ( - self.zone.project, self.zone.name) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test for the existence of the change set via a GET request - - See - https://cloud.google.com/dns/api/v1/changes/get - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - try: - client.connection.api_request(method='GET', path=self.path, - query_params={'fields': 'id'}) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: refresh zone properties via a GET request - - See - https://cloud.google.com/dns/api/v1/changes/get - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) diff --git a/gcloud/dns/client.py b/gcloud/dns/client.py deleted file mode 100644 index ddc4ded769be..000000000000 --- a/gcloud/dns/client.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud DNS API.""" - - -from gcloud.client import JSONClient -from gcloud.dns.connection import Connection -from gcloud.dns.zone import ManagedZone - - -class Client(JSONClient): - """Client to bundle configuration needed for API requests. - - :type project: string - :param project: the project which the client acts on behalf of. Will be - passed when creating a zone. If not passed, - falls back to the default inferred from the environment. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def quotas(self): - """Return DNS quots for the project associated with this client. - - See: - https://cloud.google.com/dns/api/v1/projects/get - - :rtype: mapping - :returns: keys for the mapping correspond to those of the ``quota`` - sub-mapping of the project resource. - """ - path = '/projects/%s' % (self.project,) - resp = self.connection.api_request(method='GET', path=path) - - return dict([(key, int(value)) - for key, value in resp['quota'].items() if key != 'kind']) - - def list_zones(self, max_results=None, page_token=None): - """List zones for the project associated with this client. - - See: - https://cloud.google.com/dns/api/v1/managedZones/list - - :type max_results: int - :param max_results: maximum number of zones to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of zones. If - not passed, the API will return the first page of - zones. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.dns.zone.ManagedZone`, plus a - "next page token" string: if the token is not None, - indicates that more zones can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/managedZones' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - zones = [ManagedZone.from_api_repr(resource, self) - for resource in resp['managedZones']] - return zones, resp.get('nextPageToken') - - def zone(self, name, dns_name): - """Construct a zone bound to this client. - - :type name: string - :param name: Name of the zone. - - :type dns_name: string - :param dns_name: DNS name of the zone. - - :rtype: :class:`gcloud.dns.zone.ManagedZone` - :returns: a new ``ManagedZone`` instance - """ - return ManagedZone(name, dns_name, client=self) diff --git a/gcloud/dns/connection.py b/gcloud/dns/connection.py deleted file mode 100644 index e2b382fd9daa..000000000000 --- a/gcloud/dns/connection.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud dns connections.""" - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud DNS via the JSON REST API.""" - - API_BASE_URL = 'https://www.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v1' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/dns/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/ndev.clouddns.readwrite',) - """The scopes required for authenticating as a Cloud DNS consumer.""" diff --git a/gcloud/dns/resource_record_set.py b/gcloud/dns/resource_record_set.py deleted file mode 100644 index dbd95b3b17c5..000000000000 --- a/gcloud/dns/resource_record_set.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API ResourceRecordSets.""" - - -class ResourceRecordSet(object): - """ResourceRecordSets are DNS resource records. - - RRS are owned by a :class:`gcloud.dns.zone.ManagedZone` instance. - - See: - https://cloud.google.com/dns/api/v1/resourceRecordSets - - :type name: string - :param name: the name of the record set - - :type record_type: string - :param record_type: the RR type of the zone - - :type ttl: integer - :param ttl: TTL (in seconds) for caching the record sets - - :type rrdatas: list of string - :param rrdatas: one or more lines containing the resource data - - :type zone: :class:`gcloud.dns.zone.ManagedZone` - :param zone: A zone which holds one or more record sets. - """ - - def __init__(self, name, record_type, ttl, rrdatas, zone): - self.name = name - self.record_type = record_type - self.ttl = ttl - self.rrdatas = rrdatas - self.zone = zone - - @classmethod - def from_api_repr(cls, resource, zone): - """Factory: construct a record set given its API representation - - :type resource: dict - :param resource: record sets representation returned from the API - - :type zone: :class:`gcloud.dns.zone.ManagedZone` - :param zone: A zone which holds one or more record sets. - - :rtype: :class:`gcloud.dns.zone.ResourceRecordSet` - :returns: RRS parsed from ``resource``. - """ - name = resource['name'] - record_type = resource['type'] - ttl = int(resource['ttl']) - rrdatas = resource['rrdatas'] - return cls(name, record_type, ttl, rrdatas, zone=zone) diff --git a/gcloud/dns/test_changes.py b/gcloud/dns/test_changes.py deleted file mode 100644 index f7902a106bc9..000000000000 --- a/gcloud/dns/test_changes.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestChanges(unittest2.TestCase): - PROJECT = 'project' - ZONE_NAME = 'example.com' - CHANGES_NAME = 'changeset_id' - - def _getTargetClass(self): - from gcloud.dns.changes import Changes - return Changes - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - from gcloud._helpers import UTC - from gcloud._helpers import _NOW - self.WHEN = _NOW().replace(tzinfo=UTC) - - def _makeResource(self): - from gcloud._helpers import _datetime_to_rfc3339 - when_str = _datetime_to_rfc3339(self.WHEN) - return { - 'kind': 'dns#change', - 'id': self.CHANGES_NAME, - 'startTime': when_str, - 'status': 'done', - 'additions': [ - {'name': 'test.example.com', - 'type': 'CNAME', - 'ttl': '3600', - 'rrdatas': ['www.example.com']}, - ], - 'deletions': [ - {'name': 'test.example.com', - 'type': 'CNAME', - 'ttl': '86400', - 'rrdatas': ['other.example.com']}, - ], - } - - def _verifyResourceProperties(self, changes, resource, zone): - from gcloud._helpers import _rfc3339_to_datetime - from gcloud._helpers import UTC - self.assertEqual(changes.name, resource['id']) - started = _rfc3339_to_datetime(resource['startTime']) - self.assertEqual(changes.started, started) - self.assertEqual(changes.status, resource['status']) - - r_additions = resource.get('additions', ()) - self.assertEqual(len(changes.additions), len(r_additions)) - for found, expected in zip(changes.additions, r_additions): - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.record_type, expected['type']) - self.assertEqual(found.ttl, int(expected['ttl'])) - self.assertEqual(found.rrdatas, expected['rrdatas']) - self.assertTrue(found.zone is zone) - - r_deletions = resource.get('deletions', ()) - self.assertEqual(len(changes.deletions), len(r_deletions)) - for found, expected in zip(changes.deletions, r_deletions): - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.record_type, expected['type']) - self.assertEqual(found.ttl, int(expected['ttl'])) - self.assertEqual(found.rrdatas, expected['rrdatas']) - self.assertTrue(found.zone is zone) - - def test_ctor(self): - zone = _Zone() - - changes = self._makeOne(zone) - - self.assertTrue(changes.zone is zone) - self.assertEqual(changes.name, None) - self.assertEqual(changes.status, None) - self.assertEqual(changes.started, None) - self.assertEqual(list(changes.additions), []) - self.assertEqual(list(changes.deletions), []) - - def test_from_api_repr_missing_additions_deletions(self): - self._setUpConstants() - RESOURCE = self._makeResource() - del RESOURCE['additions'] - del RESOURCE['deletions'] - zone = _Zone() - klass = self._getTargetClass() - - changes = klass.from_api_repr(RESOURCE, zone=zone) - - self._verifyResourceProperties(changes, RESOURCE, zone) - - def test_from_api_repr(self): - self._setUpConstants() - RESOURCE = self._makeResource() - zone = _Zone() - klass = self._getTargetClass() - - changes = klass.from_api_repr(RESOURCE, zone=zone) - - self._verifyResourceProperties(changes, RESOURCE, zone) - - def test_name_setter_bad_value(self): - zone = _Zone() - changes = self._makeOne(zone) - with self.assertRaises(ValueError): - changes.name = 12345 - - def test_name_setter(self): - zone = _Zone() - changes = self._makeOne(zone) - changes.name = 'NAME' - self.assertEqual(changes.name, 'NAME') - - def test_add_record_set_invalid_value(self): - zone = _Zone() - changes = self._makeOne(zone) - - with self.assertRaises(ValueError): - changes.add_record_set(object()) - - def test_add_record_set(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - zone = _Zone() - changes = self._makeOne(zone) - rrs = ResourceRecordSet('test.example.com', 'CNAME', 3600, - ['www.example.com'], zone) - changes.add_record_set(rrs) - self.assertEqual(list(changes.additions), [rrs]) - - def test_delete_record_set_invalid_value(self): - zone = _Zone() - changes = self._makeOne(zone) - - with self.assertRaises(ValueError): - changes.delete_record_set(object()) - - def test_delete_record_set(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - zone = _Zone() - changes = self._makeOne(zone) - rrs = ResourceRecordSet('test.example.com', 'CNAME', 3600, - ['www.example.com'], zone) - changes.delete_record_set(rrs) - self.assertEqual(list(changes.deletions), [rrs]) - - def test_create_wo_additions_or_deletions(self): - self._setUpConstants() - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = _Zone(client) - changes = self._makeOne(zone) - - with self.assertRaises(ValueError): - changes.create() - - self.assertEqual(len(conn._requested), 0) - - def test_create_w_bound_client(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - self._setUpConstants() - RESOURCE = self._makeResource() - PATH = 'projects/%s/managedZones/%s/changes' % ( - self.PROJECT, self.ZONE_NAME) - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = _Zone(client) - changes = self._makeOne(zone) - changes.add_record_set(ResourceRecordSet( - 'test.example.com', 'CNAME', 3600, ['www.example.com'], zone)) - changes.delete_record_set(ResourceRecordSet( - 'test.example.com', 'CNAME', 86400, ['other.example.com'], zone)) - - changes.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'additions': RESOURCE['additions'], - 'deletions': RESOURCE['deletions'], - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(changes, RESOURCE, zone) - - def test_create_w_alternate_client(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - self._setUpConstants() - RESOURCE = self._makeResource() - PATH = 'projects/%s/managedZones/%s/changes' % ( - self.PROJECT, self.ZONE_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = _Zone(client1) - changes = self._makeOne(zone) - changes.add_record_set(ResourceRecordSet( - 'test.example.com', 'CNAME', 3600, ['www.example.com'], zone)) - changes.delete_record_set(ResourceRecordSet( - 'test.example.com', 'CNAME', 86400, ['other.example.com'], zone)) - - changes.create(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'additions': RESOURCE['additions'], - 'deletions': RESOURCE['deletions'], - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(changes, RESOURCE, zone) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/managedZones/%s/changes/%s' % ( - self.PROJECT, self.ZONE_NAME, self.CHANGES_NAME) - self._setUpConstants() - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - zone = _Zone(client) - changes = self._makeOne(zone) - changes.name = self.CHANGES_NAME - - self.assertFalse(changes.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/managedZones/%s/changes/%s' % ( - self.PROJECT, self.ZONE_NAME, self.CHANGES_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = _Zone(client1) - changes = self._makeOne(zone) - changes.name = self.CHANGES_NAME - - self.assertTrue(changes.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/managedZones/%s/changes/%s' % ( - self.PROJECT, self.ZONE_NAME, self.CHANGES_NAME) - self._setUpConstants() - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = _Zone(client) - changes = self._makeOne(zone) - changes.name = self.CHANGES_NAME - - changes.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(changes, RESOURCE, zone) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/managedZones/%s/changes/%s' % ( - self.PROJECT, self.ZONE_NAME, self.CHANGES_NAME) - self._setUpConstants() - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = _Zone(client1) - changes = self._makeOne(zone) - changes.name = self.CHANGES_NAME - - changes.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(changes, RESOURCE, zone) - - -class _Zone(object): - - def __init__(self, client=None, project=TestChanges.PROJECT, - name=TestChanges.ZONE_NAME): - self._client = client - self.project = project - self.name = name - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response diff --git a/gcloud/dns/test_client.py b/gcloud/dns/test_client.py deleted file mode 100644 index 44b168f82efe..000000000000 --- a/gcloud/dns/test_client.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestClient(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.dns.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - from gcloud.dns.connection import Connection - PROJECT = 'PROJECT' - creds = _Credentials() - http = object() - client = self._makeOne(project=PROJECT, credentials=creds, http=http) - self.assertTrue(isinstance(client.connection, Connection)) - self.assertTrue(client.connection.credentials is creds) - self.assertTrue(client.connection.http is http) - - def test_quotas_defaults(self): - PROJECT = 'PROJECT' - PATH = 'projects/%s' % PROJECT - MANAGED_ZONES = 1234 - RRS_PER_RRSET = 23 - RRSETS_PER_ZONE = 345 - RRSET_ADDITIONS = 456 - RRSET_DELETIONS = 567 - TOTAL_SIZE = 67890 - DATA = { - 'quota': { - 'managedZones': str(MANAGED_ZONES), - 'resourceRecordsPerRrset': str(RRS_PER_RRSET), - 'rrsetsPerManagedZone': str(RRSETS_PER_ZONE), - 'rrsetAdditionsPerChange': str(RRSET_ADDITIONS), - 'rrsetDeletionsPerChange': str(RRSET_DELETIONS), - 'totalRrdataSizePerChange': str(TOTAL_SIZE), - } - } - CONVERTED = dict([(key, int(value)) - for key, value in DATA['quota'].items()]) - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - quotas = client.quotas() - - self.assertEqual(quotas, CONVERTED) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_quotas_w_kind_key(self): - PROJECT = 'PROJECT' - PATH = 'projects/%s' % PROJECT - MANAGED_ZONES = 1234 - RRS_PER_RRSET = 23 - RRSETS_PER_ZONE = 345 - RRSET_ADDITIONS = 456 - RRSET_DELETIONS = 567 - TOTAL_SIZE = 67890 - DATA = { - 'quota': { - 'managedZones': str(MANAGED_ZONES), - 'resourceRecordsPerRrset': str(RRS_PER_RRSET), - 'rrsetsPerManagedZone': str(RRSETS_PER_ZONE), - 'rrsetAdditionsPerChange': str(RRSET_ADDITIONS), - 'rrsetDeletionsPerChange': str(RRSET_DELETIONS), - 'totalRrdataSizePerChange': str(TOTAL_SIZE), - } - } - CONVERTED = dict([(key, int(value)) - for key, value in DATA['quota'].items()]) - WITH_KIND = {'quota': DATA['quota'].copy()} - WITH_KIND['quota']['kind'] = 'dns#quota' - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(WITH_KIND) - - quotas = client.quotas() - - self.assertEqual(quotas, CONVERTED) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_zones_defaults(self): - from gcloud.dns.zone import ManagedZone - PROJECT = 'PROJECT' - ID_1 = '123' - ZONE_1 = 'zone_one' - DNS_1 = 'one.example.com' - ID_2 = '234' - ZONE_2 = 'zone_two' - DNS_2 = 'two.example.com' - PATH = 'projects/%s/managedZones' % PROJECT - TOKEN = 'TOKEN' - DATA = { - 'nextPageToken': TOKEN, - 'managedZones': [ - {'kind': 'dns#managedZone', - 'id': ID_1, - 'name': ZONE_1, - 'dnsName': DNS_1}, - {'kind': 'dns#managedZone', - 'id': ID_2, - 'name': ZONE_2, - 'dnsName': DNS_2}, - ] - } - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - zones, token = client.list_zones() - - self.assertEqual(len(zones), len(DATA['managedZones'])) - for found, expected in zip(zones, DATA['managedZones']): - self.assertTrue(isinstance(found, ManagedZone)) - self.assertEqual(found.zone_id, expected['id']) - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.dns_name, expected['dnsName']) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_zones_explicit(self): - from gcloud.dns.zone import ManagedZone - PROJECT = 'PROJECT' - ID_1 = '123' - ZONE_1 = 'zone_one' - DNS_1 = 'one.example.com' - ID_2 = '234' - ZONE_2 = 'zone_two' - DNS_2 = 'two.example.com' - PATH = 'projects/%s/managedZones' % PROJECT - TOKEN = 'TOKEN' - DATA = { - 'managedZones': [ - {'kind': 'dns#managedZone', - 'id': ID_1, - 'name': ZONE_1, - 'dnsName': DNS_1}, - {'kind': 'dns#managedZone', - 'id': ID_2, - 'name': ZONE_2, - 'dnsName': DNS_2}, - ] - } - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - conn = client.connection = _Connection(DATA) - - zones, token = client.list_zones(max_results=3, page_token=TOKEN) - - self.assertEqual(len(zones), len(DATA['managedZones'])) - for found, expected in zip(zones, DATA['managedZones']): - self.assertTrue(isinstance(found, ManagedZone)) - self.assertEqual(found.zone_id, expected['id']) - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.dns_name, expected['dnsName']) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': 3, 'pageToken': TOKEN}) - - def test_zone(self): - from gcloud.dns.zone import ManagedZone - PROJECT = 'PROJECT' - ZONE_NAME = 'zone-name' - DNS_NAME = 'test.example.com' - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - zone = client.zone(ZONE_NAME, DNS_NAME) - self.assertTrue(isinstance(zone, ManagedZone)) - self.assertEqual(zone.name, ZONE_NAME) - self.assertEqual(zone.dns_name, DNS_NAME) - self.assertTrue(zone._client is client) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/dns/test_connection.py b/gcloud/dns/test_connection.py deleted file mode 100644 index 1a3f777399f5..000000000000 --- a/gcloud/dns/test_connection.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.dns.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_build_api_url_no_extra_query_params(self): - conn = self._makeOne() - URI = '/'.join([ - conn.API_BASE_URL, - 'dns', - conn.API_VERSION, - 'foo', - ]) - self.assertEqual(conn.build_api_url('/foo'), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - conn = self._makeOne() - uri = conn.build_api_url('/foo', {'bar': 'baz'}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, - '/'.join(['', 'dns', conn.API_VERSION, 'foo'])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms['bar'], 'baz') diff --git a/gcloud/dns/test_resource_record_set.py b/gcloud/dns/test_resource_record_set.py deleted file mode 100644 index 8f4bc98bd61b..000000000000 --- a/gcloud/dns/test_resource_record_set.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestResourceRecordSet(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - return ResourceRecordSet - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - zone = _Zone() - - rrs = self._makeOne('test.example.com', 'CNAME', 3600, - ['www.example.com'], zone) - - self.assertEqual(rrs.name, 'test.example.com') - self.assertEqual(rrs.record_type, 'CNAME') - self.assertEqual(rrs.ttl, 3600) - self.assertEqual(rrs.rrdatas, ['www.example.com']) - self.assertTrue(rrs.zone is zone) - - def test_from_api_repr_missing_rrdatas(self): - zone = _Zone() - klass = self._getTargetClass() - - with self.assertRaises(KeyError): - klass.from_api_repr({'name': 'test.example.com', - 'type': 'CNAME', - 'ttl': 3600}, zone=zone) - - def test_from_api_repr_missing_ttl(self): - zone = _Zone() - klass = self._getTargetClass() - - with self.assertRaises(KeyError): - klass.from_api_repr({'name': 'test.example.com', - 'type': 'CNAME', - 'rrdatas': ['www.example.com']}, zone=zone) - - def test_from_api_repr_missing_type(self): - zone = _Zone() - klass = self._getTargetClass() - - with self.assertRaises(KeyError): - klass.from_api_repr({'name': 'test.example.com', - 'ttl': 3600, - 'rrdatas': ['www.example.com']}, zone=zone) - - def test_from_api_repr_missing_name(self): - zone = _Zone() - klass = self._getTargetClass() - - with self.assertRaises(KeyError): - klass.from_api_repr({'type': 'CNAME', - 'ttl': 3600, - 'rrdatas': ['www.example.com']}, zone=zone) - - def test_from_api_repr_bare(self): - zone = _Zone() - RESOURCE = { - 'kind': 'dns#resourceRecordSet', - 'name': 'test.example.com', - 'type': 'CNAME', - 'ttl': '3600', - 'rrdatas': ['www.example.com'], - } - klass = self._getTargetClass() - rrs = klass.from_api_repr(RESOURCE, zone=zone) - self.assertEqual(rrs.name, 'test.example.com') - self.assertEqual(rrs.record_type, 'CNAME') - self.assertEqual(rrs.ttl, 3600) - self.assertEqual(rrs.rrdatas, ['www.example.com']) - self.assertTrue(rrs.zone is zone) - - -class _Zone(object): - pass diff --git a/gcloud/dns/test_zone.py b/gcloud/dns/test_zone.py deleted file mode 100644 index cec501cb4f29..000000000000 --- a/gcloud/dns/test_zone.py +++ /dev/null @@ -1,631 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestManagedZone(unittest2.TestCase): - PROJECT = 'project' - ZONE_NAME = 'zone-name' - DESCRIPTION = 'ZONE DESCRIPTION' - DNS_NAME = 'test.example.com' - - def _getTargetClass(self): - from gcloud.dns.zone import ManagedZone - return ManagedZone - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - import datetime - from gcloud._helpers import UTC - - year = 2015 - month = 7 - day = 24 - hour = 19 - minute = 53 - seconds = 19 - micros = 6000 - - self.WHEN_STR = '%d-%02d-%02dT%02d:%02d:%02d.%06dZ' % ( - year, month, day, hour, minute, seconds, micros) - self.WHEN = datetime.datetime( - year, month, day, hour, minute, seconds, micros, tzinfo=UTC) - self.ZONE_ID = 12345 - - def _makeResource(self): - self._setUpConstants() - return { - 'name': self.ZONE_NAME, - 'dnsName': self.DNS_NAME, - 'description': self.DESCRIPTION, - 'id': self.ZONE_ID, - 'creationTime': self.WHEN_STR, - 'nameServers': [ - 'ns-cloud1.googledomains.com', - 'ns-cloud2.googledomains.com', - ], - } - - def _verifyReadonlyResourceProperties(self, zone, resource): - - self.assertEqual(zone.zone_id, resource.get('id')) - - if 'creationTime' in resource: - self.assertEqual(zone.created, self.WHEN) - else: - self.assertEqual(zone.created, None) - - if 'nameServers' in resource: - self.assertEqual(zone.name_servers, resource['nameServers']) - else: - self.assertEqual(zone.name_servers, None) - - def _verifyResourceProperties(self, zone, resource): - - self._verifyReadonlyResourceProperties(zone, resource) - - self.assertEqual(zone.name, resource.get('name')) - self.assertEqual(zone.dns_name, resource.get('dnsName')) - self.assertEqual(zone.description, resource.get('description')) - self.assertEqual(zone.zone_id, resource.get('id')) - self.assertEqual(zone.name_server_set, resource.get('nameServerSet')) - - def test_ctor(self): - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - self.assertEqual(zone.name, self.ZONE_NAME) - self.assertEqual(zone.dns_name, self.DNS_NAME) - self.assertTrue(zone._client is client) - self.assertEqual(zone.project, client.project) - self.assertEqual( - zone.path, - '/projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME)) - - self.assertEqual(zone.zone_id, None) - self.assertEqual(zone.created, None) - - self.assertEqual(zone.description, None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'name': self.ZONE_NAME, - 'dnsName': self.DNS_NAME, - } - klass = self._getTargetClass() - zone = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(zone._client is client) - self._verifyResourceProperties(zone, RESOURCE) - - def test_from_api_repr_w_properties(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - zone = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(zone._client is client) - self._verifyResourceProperties(zone, RESOURCE) - - def test_description_setter_bad_value(self): - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - with self.assertRaises(ValueError): - zone.description = 12345 - - def test_description_setter(self): - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - zone.description = 'DESCRIPTION' - self.assertEqual(zone.description, 'DESCRIPTION') - - def test_name_server_set_setter_bad_value(self): - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - with self.assertRaises(ValueError): - zone.name_server_set = 12345 - - def test_name_server_set_setter(self): - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - zone.name_server_set = 'NAME_SERVER_SET' - self.assertEqual(zone.name_server_set, 'NAME_SERVER_SET') - - def test_resource_record_set(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - RRS_NAME = 'other.example.com' - RRS_TYPE = 'CNAME' - TTL = 3600 - RRDATAS = ['www.example.com'] - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - rrs = zone.resource_record_set(RRS_NAME, RRS_TYPE, TTL, RRDATAS) - self.assertTrue(isinstance(rrs, ResourceRecordSet)) - self.assertEqual(rrs.name, RRS_NAME) - self.assertEqual(rrs.record_type, RRS_TYPE) - self.assertEqual(rrs.ttl, TTL) - self.assertEqual(rrs.rrdatas, RRDATAS) - self.assertTrue(rrs.zone is zone) - - def test_changes(self): - from gcloud.dns.changes import Changes - client = _Client(self.PROJECT) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - changes = zone.changes() - self.assertTrue(isinstance(changes, Changes)) - self.assertTrue(changes.zone is zone) - - def test_create_w_bound_client(self): - PATH = 'projects/%s/managedZones' % self.PROJECT - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - zone.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'name': self.ZONE_NAME, - 'dnsName': self.DNS_NAME, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(zone, RESOURCE) - - def test_create_w_alternate_client(self): - PATH = 'projects/%s/managedZones' % self.PROJECT - DESCRIPTION = 'DESCRIPTION' - NAME_SERVER_SET = 'NAME_SERVER_SET' - RESOURCE = self._makeResource() - RESOURCE['nameServerSet'] = NAME_SERVER_SET - RESOURCE['description'] = DESCRIPTION - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - zone.name_server_set = NAME_SERVER_SET - zone.description = DESCRIPTION - - zone.create(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'name': self.ZONE_NAME, - 'dnsName': self.DNS_NAME, - 'nameServerSet': NAME_SERVER_SET, - 'description': DESCRIPTION, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(zone, RESOURCE) - - def test_create_w_missing_output_properties(self): - # In the wild, the resource returned from 'zone.create' sometimes - # lacks 'creationTime' / 'lastModifiedTime' - PATH = 'projects/%s/managedZones' % (self.PROJECT,) - RESOURCE = self._makeResource() - del RESOURCE['creationTime'] - del RESOURCE['id'] - del RESOURCE['nameServers'] - self.WHEN = None - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - zone.create() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % PATH) - SENT = { - 'name': self.ZONE_NAME, - 'dnsName': self.DNS_NAME, - } - self.assertEqual(req['data'], SENT) - self._verifyResourceProperties(zone, RESOURCE) - - def test_exists_miss_w_bound_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - self.assertFalse(zone.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_exists_hit_w_alternate_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - - self.assertTrue(zone.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'fields': 'id'}) - - def test_reload_w_bound_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - RESOURCE = self._makeResource() - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - zone.reload() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(zone, RESOURCE) - - def test_reload_w_alternate_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - RESOURCE = self._makeResource() - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - - zone.reload(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self._verifyResourceProperties(zone, RESOURCE) - - def test_delete_w_bound_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - zone.delete() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_delete_w_alternate_client(self): - PATH = 'projects/%s/managedZones/%s' % (self.PROJECT, self.ZONE_NAME) - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - - zone.delete(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_resource_record_sets_defaults(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - PATH = 'projects/%s/managedZones/%s/rrsets' % ( - self.PROJECT, self.ZONE_NAME) - TOKEN = 'TOKEN' - NAME_1 = 'www.example.com' - TYPE_1 = 'A' - TTL_1 = '86400' - RRDATAS_1 = ['123.45.67.89'] - NAME_2 = 'alias.example.com' - TYPE_2 = 'CNAME' - TTL_2 = '3600' - RRDATAS_2 = ['www.example.com'] - DATA = { - 'nextPageToken': TOKEN, - 'rrsets': [ - {'kind': 'dns#resourceRecordSet', - 'name': NAME_1, - 'type': TYPE_1, - 'ttl': TTL_1, - 'rrdatas': RRDATAS_1}, - {'kind': 'dns#resourceRecordSet', - 'name': NAME_2, - 'type': TYPE_2, - 'ttl': TTL_2, - 'rrdatas': RRDATAS_2}, - ] - } - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - rrsets, token = zone.list_resource_record_sets() - - self.assertEqual(len(rrsets), len(DATA['rrsets'])) - for found, expected in zip(rrsets, DATA['rrsets']): - self.assertTrue(isinstance(found, ResourceRecordSet)) - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.record_type, expected['type']) - self.assertEqual(found.ttl, int(expected['ttl'])) - self.assertEqual(found.rrdatas, expected['rrdatas']) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_resource_record_sets_explicit(self): - from gcloud.dns.resource_record_set import ResourceRecordSet - PATH = 'projects/%s/managedZones/%s/rrsets' % ( - self.PROJECT, self.ZONE_NAME) - TOKEN = 'TOKEN' - NAME_1 = 'www.example.com' - TYPE_1 = 'A' - TTL_1 = '86400' - RRDATAS_1 = ['123.45.67.89'] - NAME_2 = 'alias.example.com' - TYPE_2 = 'CNAME' - TTL_2 = '3600' - RRDATAS_2 = ['www.example.com'] - DATA = { - 'rrsets': [ - {'kind': 'dns#resourceRecordSet', - 'name': NAME_1, - 'type': TYPE_1, - 'ttl': TTL_1, - 'rrdatas': RRDATAS_1}, - {'kind': 'dns#resourceRecordSet', - 'name': NAME_2, - 'type': TYPE_2, - 'ttl': TTL_2, - 'rrdatas': RRDATAS_2}, - ] - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(DATA) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - - rrsets, token = zone.list_resource_record_sets( - max_results=3, page_token=TOKEN, client=client2) - - self.assertEqual(len(rrsets), len(DATA['rrsets'])) - for found, expected in zip(rrsets, DATA['rrsets']): - self.assertTrue(isinstance(found, ResourceRecordSet)) - self.assertEqual(found.name, expected['name']) - self.assertEqual(found.record_type, expected['type']) - self.assertEqual(found.ttl, int(expected['ttl'])) - self.assertEqual(found.rrdatas, expected['rrdatas']) - self.assertEqual(token, None) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': 3, 'pageToken': TOKEN}) - - def test_list_changes_defaults(self): - from gcloud._helpers import _datetime_to_rfc3339 - from gcloud.dns.changes import Changes - from gcloud.dns.resource_record_set import ResourceRecordSet - self._setUpConstants() - PATH = 'projects/%s/managedZones/%s/changes' % ( - self.PROJECT, self.ZONE_NAME) - TOKEN = 'TOKEN' - NAME_1 = 'www.example.com' - TYPE_1 = 'A' - TTL_1 = '86400' - RRDATAS_1 = ['123.45.67.89'] - NAME_2 = 'alias.example.com' - TYPE_2 = 'CNAME' - TTL_2 = '3600' - RRDATAS_2 = ['www.example.com'] - CHANGES_NAME = 'changeset_id' - DATA = { - 'nextPageToken': TOKEN, - 'changes': [{ - 'kind': 'dns#change', - 'id': CHANGES_NAME, - 'status': 'pending', - 'startTime': _datetime_to_rfc3339(self.WHEN), - 'additions': [ - {'kind': 'dns#resourceRecordSet', - 'name': NAME_1, - 'type': TYPE_1, - 'ttl': TTL_1, - 'rrdatas': RRDATAS_1}], - 'deletions': [ - {'kind': 'dns#change', - 'name': NAME_2, - 'type': TYPE_2, - 'ttl': TTL_2, - 'rrdatas': RRDATAS_2}], - }] - } - conn = _Connection(DATA) - client = _Client(project=self.PROJECT, connection=conn) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client) - - changes, token = zone.list_changes() - - self.assertEqual(len(changes), len(DATA['changes'])) - for found, expected in zip(changes, DATA['changes']): - self.assertTrue(isinstance(found, Changes)) - self.assertEqual(found.name, CHANGES_NAME) - self.assertEqual(found.status, 'pending') - self.assertEqual(found.started, self.WHEN) - - self.assertEqual(len(found.additions), len(expected['additions'])) - for found_rr, expected_rr in zip(found.additions, - expected['additions']): - self.assertTrue(isinstance(found_rr, ResourceRecordSet)) - self.assertEqual(found_rr.name, expected_rr['name']) - self.assertEqual(found_rr.record_type, expected_rr['type']) - self.assertEqual(found_rr.ttl, int(expected_rr['ttl'])) - self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas']) - - self.assertEqual(len(found.deletions), len(expected['deletions'])) - for found_rr, expected_rr in zip(found.deletions, - expected['deletions']): - self.assertTrue(isinstance(found_rr, ResourceRecordSet)) - self.assertEqual(found_rr.name, expected_rr['name']) - self.assertEqual(found_rr.record_type, expected_rr['type']) - self.assertEqual(found_rr.ttl, int(expected_rr['ttl'])) - self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas']) - - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_changes_explicit(self): - from gcloud._helpers import _datetime_to_rfc3339 - from gcloud.dns.changes import Changes - from gcloud.dns.resource_record_set import ResourceRecordSet - self._setUpConstants() - PATH = 'projects/%s/managedZones/%s/changes' % ( - self.PROJECT, self.ZONE_NAME) - TOKEN = 'TOKEN' - NAME_1 = 'www.example.com' - TYPE_1 = 'A' - TTL_1 = '86400' - RRDATAS_1 = ['123.45.67.89'] - NAME_2 = 'alias.example.com' - TYPE_2 = 'CNAME' - TTL_2 = '3600' - RRDATAS_2 = ['www.example.com'] - CHANGES_NAME = 'changeset_id' - DATA = { - 'changes': [{ - 'kind': 'dns#change', - 'id': CHANGES_NAME, - 'status': 'pending', - 'startTime': _datetime_to_rfc3339(self.WHEN), - 'additions': [ - {'kind': 'dns#resourceRecordSet', - 'name': NAME_1, - 'type': TYPE_1, - 'ttl': TTL_1, - 'rrdatas': RRDATAS_1}], - 'deletions': [ - {'kind': 'dns#change', - 'name': NAME_2, - 'type': TYPE_2, - 'ttl': TTL_2, - 'rrdatas': RRDATAS_2}], - }] - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(DATA) - client2 = _Client(project=self.PROJECT, connection=conn2) - zone = self._makeOne(self.ZONE_NAME, self.DNS_NAME, client1) - - changes, token = zone.list_changes( - max_results=3, page_token=TOKEN, client=client2) - - self.assertEqual(len(changes), len(DATA['changes'])) - for found, expected in zip(changes, DATA['changes']): - self.assertTrue(isinstance(found, Changes)) - self.assertEqual(found.name, CHANGES_NAME) - self.assertEqual(found.status, 'pending') - self.assertEqual(found.started, self.WHEN) - - self.assertEqual(len(found.additions), len(expected['additions'])) - for found_rr, expected_rr in zip(found.additions, - expected['additions']): - self.assertTrue(isinstance(found_rr, ResourceRecordSet)) - self.assertEqual(found_rr.name, expected_rr['name']) - self.assertEqual(found_rr.record_type, expected_rr['type']) - self.assertEqual(found_rr.ttl, int(expected_rr['ttl'])) - self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas']) - - self.assertEqual(len(found.deletions), len(expected['deletions'])) - for found_rr, expected_rr in zip(found.deletions, - expected['deletions']): - self.assertTrue(isinstance(found_rr, ResourceRecordSet)) - self.assertEqual(found_rr.name, expected_rr['name']) - self.assertEqual(found_rr.record_type, expected_rr['type']) - self.assertEqual(found_rr.ttl, int(expected_rr['ttl'])) - self.assertEqual(found_rr.rrdatas, expected_rr['rrdatas']) - - self.assertEqual(token, None) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'maxResults': 3, 'pageToken': TOKEN}) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response diff --git a/gcloud/dns/zone.py b/gcloud/dns/zone.py deleted file mode 100644 index 52f8e2818759..000000000000 --- a/gcloud/dns/zone.py +++ /dev/null @@ -1,384 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API ManagedZones.""" -import six - -from gcloud._helpers import _rfc3339_to_datetime -from gcloud.exceptions import NotFound -from gcloud.dns.changes import Changes -from gcloud.dns.resource_record_set import ResourceRecordSet - - -class ManagedZone(object): - """ManagedZones are containers for DNS resource records. - - See: - https://cloud.google.com/dns/api/v1/managedZones - - :type name: string - :param name: the name of the zone - - :type dns_name: string - :param dns_name: the DNS name of the zone - - :type client: :class:`gcloud.dns.client.Client` - :param client: A client which holds credentials and project configuration - for the zone (which requires a project). - """ - - def __init__(self, name, dns_name, client): - self.name = name - self.dns_name = dns_name - self._client = client - self._properties = {} - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a zone given its API representation - - :type resource: dict - :param resource: zone resource representation returned from the API - - :type client: :class:`gcloud.dns.client.Client` - :param client: Client which holds credentials and project - configuration for the zone. - - :rtype: :class:`gcloud.dns.zone.ManagedZone` - :returns: Zone parsed from ``resource``. - """ - name = resource.get('name') - dns_name = resource.get('dnsName') - if name is None or dns_name is None: - raise KeyError('Resource lacks required identity information:' - '["name"]["dnsName"]') - zone = cls(name, dns_name, client=client) - zone._set_properties(resource) - return zone - - @property - def project(self): - """Project bound to the zone. - - :rtype: string - :returns: the project (derived from the client). - """ - return self._client.project - - @property - def path(self): - """URL path for the zone's APIs. - - :rtype: string - :returns: the path based on project and dataste name. - """ - return '/projects/%s/managedZones/%s' % (self.project, self.name) - - @property - def created(self): - """Datetime at which the zone was created. - - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the creation time (None until set from the server). - """ - return self._properties.get('creationTime') - - @property - def name_servers(self): - """Datetime at which the zone was created. - - :rtype: list of strings, or ``NoneType``. - :returns: the assigned name servers (None until set from the server). - """ - return self._properties.get('nameServers') - - @property - def zone_id(self): - """ID for the zone resource. - - :rtype: string, or ``NoneType`` - :returns: the ID (None until set from the server). - """ - return self._properties.get('id') - - @property - def description(self): - """Description of the zone. - - :rtype: string, or ``NoneType`` - :returns: The description as set by the user, or None (the default). - """ - return self._properties.get('description') - - @description.setter - def description(self, value): - """Update description of the zone. - - :type value: string, or ``NoneType`` - :param value: new description - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['description'] = value - - @property - def name_server_set(self): - """Named set of DNS name servers that all host the same ManagedZones. - - Most users will leave this blank. - - See: - https://cloud.google.com/dns/api/v1/managedZones#nameServerSet - - :rtype: string, or ``NoneType`` - :returns: The name as set by the user, or None (the default). - """ - return self._properties.get('nameServerSet') - - @name_server_set.setter - def name_server_set(self, value): - """Update named set of DNS name servers. - - :type value: string, or ``NoneType`` - :param value: new title - - :raises: ValueError for invalid value types. - """ - if not isinstance(value, six.string_types) and value is not None: - raise ValueError("Pass a string, or None") - self._properties['nameServerSet'] = value - - def resource_record_set(self, name, record_type, ttl, rrdatas): - """Construct a resource record set bound to this zone. - - :type name: string - :param name: Name of the record set. - - :type record_type: string - :param record_type: RR type - - :type ttl: integer - :param ttl: TTL for the RR, in seconds - - :type rrdatas: list of string - :param rrdatas: resource data for the RR - - :rtype: :class:`gcloud.dns.resource_record_set.ResourceRecordSet` - :returns: a new ``ResourceRecordSet`` instance - """ - return ResourceRecordSet(name, record_type, ttl, rrdatas, zone=self) - - def changes(self): - """Construct a change set bound to this zone. - - :rtype: :class:`gcloud.dns.changes.Changes` - :returns: a new ``Changes`` instance - """ - return Changes(zone=self) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - - :rtype: :class:`gcloud.dns.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - self._properties.clear() - cleaned = api_response.copy() - if 'creationTime' in cleaned: - cleaned['creationTime'] = _rfc3339_to_datetime( - cleaned['creationTime']) - self._properties.update(cleaned) - - def _build_resource(self): - """Generate a resource for ``create`` or ``update``.""" - resource = { - 'name': self.name, - 'dnsName': self.dns_name, - } - - if self.description is not None: - resource['description'] = self.description - - if self.name_server_set is not None: - resource['nameServerSet'] = self.name_server_set - - return resource - - def create(self, client=None): - """API call: create the zone via a PUT request - - See: - https://cloud.google.com/dns/api/v1/managedZones/create - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - path = '/projects/%s/managedZones' % (self.project,) - api_response = client.connection.api_request( - method='POST', path=path, data=self._build_resource()) - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test for the existence of the zone via a GET request - - See - https://cloud.google.com/dns/api/v1/managedZones/get - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path, - query_params={'fields': 'id'}) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: refresh zone properties via a GET request - - See - https://cloud.google.com/dns/api/v1/managedZones/get - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) - - def delete(self, client=None): - """API call: delete the zone via a DELETE request - - See: - https://cloud.google.com/dns/api/v1/managedZones/delete - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) - - def list_resource_record_sets(self, max_results=None, page_token=None, - client=None): - """List resource record sets for this zone. - - See: - https://cloud.google.com/dns/api/v1/resourceRecordSets/list - - :type max_results: int - :param max_results: maximum number of zones to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of zones. If - not passed, the API will return the first page of - zones. - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - - :rtype: tuple, (list, str) - :returns: list of - :class:`gcloud.dns.resource_record_set.ResourceRecordSet`, - plus a "next page token" string: if the token is not None, - indicates that more zones can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/managedZones/%s/rrsets' % ( - self.project, self.name) - client = self._require_client(client) - conn = client.connection - resp = conn.api_request(method='GET', path=path, query_params=params) - zones = [ResourceRecordSet.from_api_repr(resource, self) - for resource in resp['rrsets']] - return zones, resp.get('nextPageToken') - - def list_changes(self, max_results=None, page_token=None, client=None): - """List change sets for this zone. - - See: - https://cloud.google.com/dns/api/v1/resourceRecordSets/list - - :type max_results: int - :param max_results: maximum number of zones to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of zones. If - not passed, the API will return the first page of - zones. - - :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current zone. - - :rtype: tuple, (list, str) - :returns: list of - :class:`gcloud.dns.resource_record_set.ResourceRecordSet`, - plus a "next page token" string: if the token is not None, - indicates that more zones can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['maxResults'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/managedZones/%s/changes' % ( - self.project, self.name) - client = self._require_client(client) - conn = client.connection - resp = conn.api_request(method='GET', path=path, query_params=params) - zones = [Changes.from_api_repr(resource, self) - for resource in resp['changes']] - return zones, resp.get('nextPageToken') diff --git a/gcloud/logging/__init__.py b/gcloud/logging/__init__.py deleted file mode 100644 index 67b0386329e9..000000000000 --- a/gcloud/logging/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Logging API wrapper.""" - -from gcloud.logging.client import Client -from gcloud.logging.connection import Connection - - -SCOPE = Connection.SCOPE -ASCENDING = 'timestamp asc' -DESCENDING = 'timestamp desc' diff --git a/gcloud/logging/_helpers.py b/gcloud/logging/_helpers.py deleted file mode 100644 index 8061abc2cd30..000000000000 --- a/gcloud/logging/_helpers.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for shared behavior.""" - -import re - -from gcloud._helpers import _name_from_project_path - - -_LOGGER_TEMPLATE = re.compile(r""" - projects/ # static prefix - (?P[^/]+) # initial letter, wordchars + hyphen - /logs/ # static midfix - (?P[^/]+) # initial letter, wordchars + allowed punc -""", re.VERBOSE) - - -def logger_name_from_path(path, project): - """Validate a logger URI path and get the logger name. - - :type path: string - :param path: URI path for a logger API request. - - :type project: string - :param project: The project associated with the request. It is - included for validation purposes. - - :rtype: string - :returns: Logger name parsed from ``path``. - :raises: :class:`ValueError` if the ``path`` is ill-formed or if - the project from the ``path`` does not agree with the - ``project`` passed in. - """ - return _name_from_project_path(path, project, _LOGGER_TEMPLATE) diff --git a/gcloud/logging/client.py b/gcloud/logging/client.py deleted file mode 100644 index fd79d3100aa3..000000000000 --- a/gcloud/logging/client.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud Logging API.""" - - -from gcloud.client import JSONClient -from gcloud.logging.connection import Connection -from gcloud.logging.entries import ProtobufEntry -from gcloud.logging.entries import StructEntry -from gcloud.logging.entries import TextEntry -from gcloud.logging.logger import Logger -from gcloud.logging.metric import Metric -from gcloud.logging.sink import Sink - - -class Client(JSONClient): - """Client to bundle configuration needed for API requests. - - :type project: string - :param project: the project which the client acts on behalf of. - If not passed, falls back to the default inferred - from the environment. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def logger(self, name): - """Creates a logger bound to the current client. - - :type name: string - :param name: the name of the logger to be constructed. - - :rtype: :class:`gcloud.logging.logger.Logger` - :returns: Logger created with the current client. - """ - return Logger(name, client=self) - - def _entry_from_resource(self, resource, loggers): - """Detect correct entry type from resource and instantiate. - - :type resource: dict - :param resource: one entry resource from API response - - :type loggers: dict or None - :param loggers: A mapping of logger fullnames -> loggers. If not - passed, the entry will have a newly-created logger. - - :rtype; One of: - :class:`gcloud.logging.entries.TextEntry`, - :class:`gcloud.logging.entries.StructEntry`, - :class:`gcloud.logging.entries.ProtobufEntry` - :returns: the entry instance, constructed via the resource - """ - if 'textPayload' in resource: - return TextEntry.from_api_repr(resource, self, loggers) - elif 'jsonPayload' in resource: - return StructEntry.from_api_repr(resource, self, loggers) - elif 'protoPayload' in resource: - return ProtobufEntry.from_api_repr(resource, self, loggers) - raise ValueError('Cannot parse log entry resource') - - def list_entries(self, projects=None, filter_=None, order_by=None, - page_size=None, page_token=None): - """Return a page of log entries. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list - - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the client. - - :type filter_: string - :param filter_: a filter expression. See: - https://cloud.google.com/logging/docs/view/advanced_filters - - :type order_by: string - :param order_by: One of :data:`gcloud.logging.ASCENDING` or - :data:`gcloud.logging.DESCENDING`. - - :type page_size: int - :param page_size: maximum number of entries to return, If not passed, - defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of entries. If not - passed, the API will return the first page of - entries. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a - "next page token" string: if not None, indicates that - more entries can be retrieved with another call (pass that - value as ``page_token``). - """ - if projects is None: - projects = [self.project] - - params = {'projectIds': projects} - - if filter_ is not None: - params['filter'] = filter_ - - if order_by is not None: - params['orderBy'] = order_by - - if page_size is not None: - params['pageSize'] = page_size - - if page_token is not None: - params['pageToken'] = page_token - - resp = self.connection.api_request(method='POST', path='/entries:list', - data=params) - loggers = {} - entries = [self._entry_from_resource(resource, loggers) - for resource in resp.get('entries', ())] - return entries, resp.get('nextPageToken') - - def sink(self, name, filter_, destination): - """Creates a sink bound to the current client. - - :type name: string - :param name: the name of the sink to be constructed. - - :type filter_: string - :param filter_: the advanced logs filter expression defining the - entries exported by the sink. - - :type destination: string - :param destination: destination URI for the entries exported by - the sink. - - :rtype: :class:`gcloud.logging.sink.Sink` - :returns: Sink created with the current client. - """ - return Sink(name, filter_, destination, client=self) - - def list_sinks(self, page_size=None, page_token=None): - """List sinks for the project associated with this client. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list - - :type page_size: int - :param page_size: maximum number of sinks to return, If not passed, - defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of sinks. If not - passed, the API will return the first page of - sinks. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.logging.sink.Sink`, plus a - "next page token" string: if not None, indicates that - more sinks can be retrieved with another call (pass that - value as ``page_token``). - """ - params = {} - - if page_size is not None: - params['pageSize'] = page_size - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/sinks' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - sinks = [Sink.from_api_repr(resource, self) - for resource in resp.get('sinks', ())] - return sinks, resp.get('nextPageToken') - - def metric(self, name, filter_, description=''): - """Creates a metric bound to the current client. - - :type name: string - :param name: the name of the metric to be constructed. - - :type filter_: string - :param filter_: the advanced logs filter expression defining the - entries tracked by the metric. - - :type description: string - :param description: the description of the metric to be constructed. - - :rtype: :class:`gcloud.logging.metric.Metric` - :returns: Metric created with the current client. - """ - return Metric(name, filter_, client=self, description=description) - - def list_metrics(self, page_size=None, page_token=None): - """List metrics for the project associated with this client. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list - - :type page_size: int - :param page_size: maximum number of metrics to return, If not passed, - defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of metrics. If not - passed, the API will return the first page of - metrics. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.logging.metric.Metric`, plus a - "next page token" string: if not None, indicates that - more metrics can be retrieved with another call (pass that - value as ``page_token``). - """ - params = {} - - if page_size is not None: - params['pageSize'] = page_size - - if page_token is not None: - params['pageToken'] = page_token - - path = '/projects/%s/metrics' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - metrics = [Metric.from_api_repr(resource, self) - for resource in resp.get('metrics', ())] - return metrics, resp.get('nextPageToken') diff --git a/gcloud/logging/connection.py b/gcloud/logging/connection.py deleted file mode 100644 index 1c330a28529e..000000000000 --- a/gcloud/logging/connection.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud logging connections.""" - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud Logging via the JSON REST API. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - connection. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: (Optional) HTTP object to make requests. - - :type api_base_url: string - :param api_base_url: The base of the API call URL. Defaults to the value - :attr:`Connection.API_BASE_URL`. - """ - - API_BASE_URL = 'https://logging.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v2beta1' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/logging.read', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/logging.admin', - 'https://www.googleapis.com/auth/cloud-platform') - """The scopes required for authenticating as a Cloud Logging consumer.""" diff --git a/gcloud/logging/entries.py b/gcloud/logging/entries.py deleted file mode 100644 index e26ac7ef0cd3..000000000000 --- a/gcloud/logging/entries.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Log entries within the Google Cloud Logging API.""" - -import json - -from google.protobuf.json_format import Parse - -from gcloud._helpers import _rfc3339_nanos_to_datetime -from gcloud.logging._helpers import logger_name_from_path - - -class _BaseEntry(object): - """Base class for TextEntry, StructEntry. - - :type payload: text or dict - :param payload: The payload passed as ``textPayload``, ``jsonPayload``, - or ``protoPayload``. - - :type logger: :class:`gcloud.logging.logger.Logger` - :param logger: the logger used to write the entry. - - :type insert_id: text, or :class:`NoneType` - :param insert_id: (optional) the ID used to identify an entry uniquely. - - :type timestamp: :class:`datetime.datetime`, or :class:`NoneType` - :param timestamp: (optional) timestamp for the entry - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry - """ - def __init__(self, payload, logger, insert_id=None, timestamp=None, - labels=None, severity=None, http_request=None): - self.payload = payload - self.logger = logger - self.insert_id = insert_id - self.timestamp = timestamp - self.labels = labels - self.severity = severity - self.http_request = http_request - - @classmethod - def from_api_repr(cls, resource, client, loggers=None): - """Factory: construct an entry given its API representation - - :type resource: dict - :param resource: text entry resource representation returned from - the API - - :type client: :class:`gcloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration. - - :type loggers: dict or None - :param loggers: A mapping of logger fullnames -> loggers. If not - passed, the entry will have a newly-created logger. - - :rtype: :class:`gcloud.logging.entries.TextEntry` - :returns: Text entry parsed from ``resource``. - """ - if loggers is None: - loggers = {} - logger_fullname = resource['logName'] - logger = loggers.get(logger_fullname) - if logger is None: - logger_name = logger_name_from_path( - logger_fullname, client.project) - logger = loggers[logger_fullname] = client.logger(logger_name) - payload = resource[cls._PAYLOAD_KEY] - insert_id = resource.get('insertId') - timestamp = resource.get('timestamp') - if timestamp is not None: - timestamp = _rfc3339_nanos_to_datetime(timestamp) - labels = resource.get('labels') - severity = resource.get('severity') - http_request = resource.get('httpRequest') - return cls(payload, logger, insert_id=insert_id, timestamp=timestamp, - labels=labels, severity=severity, http_request=http_request) - - -class TextEntry(_BaseEntry): - """Entry created with ``textPayload``. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry - """ - _PAYLOAD_KEY = 'textPayload' - - -class StructEntry(_BaseEntry): - """Entry created with ``jsonPayload``. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry - """ - _PAYLOAD_KEY = 'jsonPayload' - - -class ProtobufEntry(_BaseEntry): - """Entry created with ``protoPayload``. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry - """ - _PAYLOAD_KEY = 'protoPayload' - - def parse_message(self, message): - """Parse payload into a protobuf message. - - Mutates the passed-in ``message`` in place. - - :type message: Protobuf message - :param message: the message to be logged - """ - Parse(json.dumps(self.payload), message) diff --git a/gcloud/logging/logger.py b/gcloud/logging/logger.py deleted file mode 100644 index f7bb50ee4a80..000000000000 --- a/gcloud/logging/logger.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Loggers.""" - -import json - -from google.protobuf.json_format import MessageToJson - - -class Logger(object): - """Loggers represent named targets for log entries. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs - - :type name: string - :param name: the name of the logger - - :type client: :class:`gcloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the logger (which requires a project). - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of default labels for entries written - via this logger. - """ - def __init__(self, name, client, labels=None): - self.name = name - self._client = client - self.labels = labels - - @property - def client(self): - """Clent bound to the logger.""" - return self._client - - @property - def project(self): - """Project bound to the logger.""" - return self._client.project - - @property - def full_name(self): - """Fully-qualified name used in logging APIs""" - return 'projects/%s/logs/%s' % (self.project, self.name) - - @property - def path(self): - """URI path for use in logging APIs""" - return '/%s' % (self.full_name,) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :rtype: :class:`gcloud.logging.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - def batch(self, client=None): - """Return a batch to use as a context manager. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current topic. - - :rtype: :class:`Batch` - :returns: A batch to use as a context manager. - """ - client = self._require_client(client) - return Batch(self, client) - - def _make_entry_resource(self, text=None, info=None, message=None, - labels=None, insert_id=None, severity=None, - http_request=None): - """Return a log entry resource of the appropriate type. - - Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`. - - Only one of ``text``, ``info``, or ``message`` should be passed. - - :type text: string or :class:`NoneType` - :param text: text payload - - :type info: dict or :class:`NoneType` - :param info: struct payload - - :type message: Protobuf message or :class:`NoneType` - :param message: protobuf payload - - :type labels: dict or :class:`NoneType` - :param labels: labels passed in to calling method. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry - """ - resource = { - 'logName': self.full_name, - 'resource': {'type': 'global'}, - } - - if text is not None: - resource['textPayload'] = text - - if info is not None: - resource['jsonPayload'] = info - - if message is not None: - as_json_str = MessageToJson(message) - as_json = json.loads(as_json_str) - resource['protoPayload'] = as_json - - if labels is None: - labels = self.labels - - if labels is not None: - resource['labels'] = labels - - if insert_id is not None: - resource['insertId'] = insert_id - - if severity is not None: - resource['severity'] = severity - - if http_request is not None: - resource['httpRequest'] = http_request - - return resource - - def log_text(self, text, client=None, labels=None, insert_id=None, - severity=None, http_request=None): - """API call: log a text message via a POST request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write - - :type text: text - :param text: the log message. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry - """ - client = self._require_client(client) - entry_resource = self._make_entry_resource( - text=text, labels=labels, insert_id=insert_id, severity=severity, - http_request=http_request) - data = {'entries': [entry_resource]} - - client.connection.api_request( - method='POST', path='/entries:write', data=data) - - def log_struct(self, info, client=None, labels=None, insert_id=None, - severity=None, http_request=None): - """API call: log a structured message via a POST request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write - - :type info: dict - :param info: the log entry information - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry - """ - client = self._require_client(client) - entry_resource = self._make_entry_resource( - info=info, labels=labels, insert_id=insert_id, severity=severity, - http_request=http_request) - data = {'entries': [entry_resource]} - - client.connection.api_request( - method='POST', path='/entries:write', data=data) - - def log_proto(self, message, client=None, labels=None, insert_id=None, - severity=None, http_request=None): - """API call: log a protobuf message via a POST request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write - - :type message: Protobuf message - :param message: the message to be logged - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry - """ - client = self._require_client(client) - entry_resource = self._make_entry_resource( - message=message, labels=labels, insert_id=insert_id, - severity=severity, http_request=http_request) - data = {'entries': [entry_resource]} - - client.connection.api_request( - method='POST', path='/entries:write', data=data) - - def delete(self, client=None): - """API call: delete all entries in a logger via a DELETE request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs/delete - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) - - def list_entries(self, projects=None, filter_=None, order_by=None, - page_size=None, page_token=None): - """Return a page of log entries. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list - - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the client. - - :type filter_: string - :param filter_: a filter expression. See: - https://cloud.google.com/logging/docs/view/advanced_filters - - :type order_by: string - :param order_by: One of :data:`gcloud.logging.ASCENDING` or - :data:`gcloud.logging.DESCENDING`. - - :type page_size: int - :param page_size: maximum number of entries to return, If not passed, - defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of entries. If not - passed, the API will return the first page of - entries. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a - "next page token" string: if not None, indicates that - more entries can be retrieved with another call (pass that - value as ``page_token``). - """ - log_filter = 'logName:%s' % (self.name,) - if filter_ is not None: - filter_ = '%s AND %s' % (filter_, log_filter) - else: - filter_ = log_filter - return self.client.list_entries( - projects=projects, filter_=filter_, order_by=order_by, - page_size=page_size, page_token=page_token) - - -class Batch(object): - """Context manager: collect entries to log via a single API call. - - Helper returned by :meth:`Logger.batch` - - :type logger: :class:`gcloud.logging.logger.Logger` - :param logger: the logger to which entries will be logged. - - :type client: :class:`gcloud.logging.client.Client` - :param client: The client to use. - """ - def __init__(self, logger, client): - self.logger = logger - self.entries = [] - self.client = client - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is None: - self.commit() - - def log_text(self, text, labels=None, insert_id=None, severity=None, - http_request=None): - """Add a text entry to be logged during :meth:`commit`. - - :type text: string - :param text: the text entry - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry. - """ - self.entries.append( - ('text', text, labels, insert_id, severity, http_request)) - - def log_struct(self, info, labels=None, insert_id=None, severity=None, - http_request=None): - """Add a struct entry to be logged during :meth:`commit`. - - :type info: dict - :param info: the struct entry - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry. - """ - self.entries.append( - ('struct', info, labels, insert_id, severity, http_request)) - - def log_proto(self, message, labels=None, insert_id=None, severity=None, - http_request=None): - """Add a protobuf entry to be logged during :meth:`commit`. - - :type message: protobuf message - :param message: the protobuf entry - - :type labels: dict or :class:`NoneType` - :param labels: (optional) mapping of labels for the entry. - - :type insert_id: string or :class:`NoneType` - :param insert_id: (optional) unique ID for log entry. - - :type severity: string or :class:`NoneType` - :param severity: (optional) severity of event being logged. - - :type http_request: dict or :class:`NoneType` - :param http_request: (optional) info about HTTP request associated with - the entry. - """ - self.entries.append( - ('proto', message, labels, insert_id, severity, http_request)) - - def commit(self, client=None): - """Send saved log entries as a single API call. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current batch. - """ - if client is None: - client = self.client - - data = { - 'logName': self.logger.path, - 'resource': {'type': 'global'}, - } - if self.logger.labels is not None: - data['labels'] = self.logger.labels - - entries = data['entries'] = [] - for entry_type, entry, labels, iid, severity, http_req in self.entries: - if entry_type == 'text': - info = {'textPayload': entry} - elif entry_type == 'struct': - info = {'jsonPayload': entry} - elif entry_type == 'proto': - as_json_str = MessageToJson(entry) - as_json = json.loads(as_json_str) - info = {'protoPayload': as_json} - else: - raise ValueError('Unknown entry type: %s' % (entry_type,)) - if labels is not None: - info['labels'] = labels - if iid is not None: - info['insertId'] = iid - if severity is not None: - info['severity'] = severity - if http_req is not None: - info['httpRequest'] = http_req - entries.append(info) - - client.connection.api_request( - method='POST', path='/entries:write', data=data) - del self.entries[:] diff --git a/gcloud/logging/metric.py b/gcloud/logging/metric.py deleted file mode 100644 index 34fa343ff53f..000000000000 --- a/gcloud/logging/metric.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define Logging API Metrics.""" - -import re - -from gcloud._helpers import _name_from_project_path -from gcloud.exceptions import NotFound - - -_METRIC_TEMPLATE = re.compile(r""" - projects/ # static prefix - (?P[^/]+) # initial letter, wordchars + hyphen - /metrics/ # static midfix - (?P[^/]+) # initial letter, wordchars + allowed punc -""", re.VERBOSE) - - -def _metric_name_from_path(path, project): - """Validate a metric URI path and get the metric name. - - :type path: string - :param path: URI path for a metric API request. - - :type project: string - :param project: The project associated with the request. It is - included for validation purposes. - - :rtype: string - :returns: Metric name parsed from ``path``. - :raises: :class:`ValueError` if the ``path`` is ill-formed or if - the project from the ``path`` does not agree with the - ``project`` passed in. - """ - return _name_from_project_path(path, project, _METRIC_TEMPLATE) - - -class Metric(object): - """Metrics represent named filters for log entries. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics - - :type name: string - :param name: the name of the metric - - :type filter_: string - :param filter_: the advanced logs filter expression defining the entries - tracked by the metric. - - :type client: :class:`gcloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the metric (which requires a project). - - :type description: string - :param description: an optional description of the metric - """ - def __init__(self, name, filter_, client, description=''): - self.name = name - self._client = client - self.filter_ = filter_ - self.description = description - - @property - def client(self): - """Clent bound to the logger.""" - return self._client - - @property - def project(self): - """Project bound to the logger.""" - return self._client.project - - @property - def full_name(self): - """Fully-qualified name used in metric APIs""" - return 'projects/%s/metrics/%s' % (self.project, self.name) - - @property - def path(self): - """URL path for the metric's APIs""" - return '/%s' % (self.full_name,) - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a metric given its API representation - - :type resource: dict - :param resource: metric resource representation returned from the API - - :type client: :class:`gcloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration for the metric. - - :rtype: :class:`gcloud.logging.metric.Metric` - :returns: Metric parsed from ``resource``. - """ - metric_name = resource['name'] - filter_ = resource['filter'] - description = resource.get('description', '') - return cls(metric_name, filter_, client=client, - description=description) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - - :rtype: :class:`gcloud.logging.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - def create(self, client=None): - """API call: create the metric via a PUT request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - """ - client = self._require_client(client) - target = '/projects/%s/metrics' % (self.project,) - data = { - 'name': self.name, - 'filter': self.filter_, - } - if self.description: - data['description'] = self.description - client.connection.api_request(method='POST', path=target, data=data) - - def exists(self, client=None): - """API call: test for the existence of the metric via a GET request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/get - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: sync local metric configuration via a GET request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/get - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - """ - client = self._require_client(client) - data = client.connection.api_request(method='GET', path=self.path) - self.description = data.get('description', '') - self.filter_ = data['filter'] - - def update(self, client=None): - """API call: update metric configuration via a PUT request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/update - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - """ - client = self._require_client(client) - data = {'name': self.name, 'filter': self.filter_} - if self.description: - data['description'] = self.description - client.connection.api_request(method='PUT', path=self.path, data=data) - - def delete(self, client=None): - """API call: delete a metric via a DELETE request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/delete - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) diff --git a/gcloud/logging/sink.py b/gcloud/logging/sink.py deleted file mode 100644 index 49f651bfe905..000000000000 --- a/gcloud/logging/sink.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define Logging API Sinks.""" - -import re - -from gcloud._helpers import _name_from_project_path -from gcloud.exceptions import NotFound - - -_SINK_TEMPLATE = re.compile(r""" - projects/ # static prefix - (?P[^/]+) # initial letter, wordchars + hyphen - /sinks/ # static midfix - (?P[^/]+) # initial letter, wordchars + allowed punc -""", re.VERBOSE) - - -def _sink_name_from_path(path, project): - """Validate a sink URI path and get the sink name. - :type path: string - :param path: URI path for a sink API request. - :type project: string - :param project: The project associated with the request. It is - included for validation purposes. - :rtype: string - :returns: Metric name parsed from ``path``. - :raises: :class:`ValueError` if the ``path`` is ill-formed or if - the project from the ``path`` does not agree with the - ``project`` passed in. - """ - return _name_from_project_path(path, project, _SINK_TEMPLATE) - - -class Sink(object): - """Sinks represent filtered exports for log entries. - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks - - :type name: string - :param name: the name of the sink - - :type filter_: string - :param filter_: the advanced logs filter expression defining the entries - exported by the sink. - - :type destination: string - :param destination: destination URI for the entries exported by the sink. - - :type client: :class:`gcloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the sink (which requires a project). - """ - def __init__(self, name, filter_, destination, client): - self.name = name - self.filter_ = filter_ - self.destination = destination - self._client = client - - @property - def client(self): - """Clent bound to the sink.""" - return self._client - - @property - def project(self): - """Project bound to the sink.""" - return self._client.project - - @property - def full_name(self): - """Fully-qualified name used in sink APIs""" - return 'projects/%s/sinks/%s' % (self.project, self.name) - - @property - def path(self): - """URL path for the sink's APIs""" - return '/%s' % (self.full_name) - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a sink given its API representation - - :type resource: dict - :param resource: sink resource representation returned from the API - - :type client: :class:`gcloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration for the sink. - - :rtype: :class:`gcloud.logging.sink.Sink` - :returns: Sink parsed from ``resource``. - :raises: :class:`ValueError` if ``client`` is not ``None`` and the - project from the resource does not agree with the project - from the client. - """ - sink_name = _sink_name_from_path(resource['name'], client.project) - filter_ = resource['filter'] - destination = resource['destination'] - return cls(sink_name, filter_, destination, client=client) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - - :rtype: :class:`gcloud.logging.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - def create(self, client=None): - """API call: create the sink via a PUT request - - See: - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - """ - client = self._require_client(client) - target = '/projects/%s/sinks' % (self.project,) - data = { - 'name': self.name, - 'filter': self.filter_, - 'destination': self.destination, - } - client.connection.api_request(method='POST', path=target, data=data) - - def exists(self, client=None): - """API call: test for the existence of the sink via a GET request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/get - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - """ - client = self._require_client(client) - - try: - client.connection.api_request(method='GET', path=self.path) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: sync local sink configuration via a GET request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/get - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - """ - client = self._require_client(client) - data = client.connection.api_request(method='GET', path=self.path) - self.filter_ = data['filter'] - self.destination = data['destination'] - - def update(self, client=None): - """API call: update sink configuration via a PUT request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/update - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - """ - client = self._require_client(client) - data = { - 'name': self.name, - 'filter': self.filter_, - 'destination': self.destination, - } - client.connection.api_request(method='PUT', path=self.path, data=data) - - def delete(self, client=None): - """API call: delete a sink via a DELETE request - - See - https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/delete - - :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) diff --git a/gcloud/logging/test__helpers.py b/gcloud/logging/test__helpers.py deleted file mode 100644 index a70d40218186..000000000000 --- a/gcloud/logging/test__helpers.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_logger_name_from_path(unittest2.TestCase): - - def _callFUT(self, path, project): - from gcloud.logging._helpers import logger_name_from_path - return logger_name_from_path(path, project) - - def test_w_simple_name(self): - LOGGER_NAME = 'LOGGER_NAME' - PROJECT = 'my-project-1234' - PATH = 'projects/%s/logs/%s' % (PROJECT, LOGGER_NAME) - logger_name = self._callFUT(PATH, PROJECT) - self.assertEqual(logger_name, LOGGER_NAME) - - def test_w_name_w_all_extras(self): - LOGGER_NAME = 'LOGGER_NAME-part.one~part.two%part-three' - PROJECT = 'my-project-1234' - PATH = 'projects/%s/logs/%s' % (PROJECT, LOGGER_NAME) - logger_name = self._callFUT(PATH, PROJECT) - self.assertEqual(logger_name, LOGGER_NAME) diff --git a/gcloud/logging/test_client.py b/gcloud/logging/test_client.py deleted file mode 100644 index 2ac27234ad6e..000000000000 --- a/gcloud/logging/test_client.py +++ /dev/null @@ -1,431 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestClient(unittest2.TestCase): - - PROJECT = 'PROJECT' - LOGGER_NAME = 'LOGGER_NAME' - SINK_NAME = 'SINK_NAME' - FILTER = 'logName:syslog AND severity>=ERROR' - DESTINATION_URI = 'faux.googleapis.com/destination' - METRIC_NAME = 'metric_name' - FILTER = 'logName:syslog AND severity>=ERROR' - DESCRIPTION = 'DESCRIPTION' - - def _getTargetClass(self): - from gcloud.logging.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - creds = _Credentials() - client = self._makeOne(project=self.PROJECT, credentials=creds) - self.assertEqual(client.project, self.PROJECT) - - def test_logger(self): - from gcloud.logging.logger import Logger - creds = _Credentials() - client = self._makeOne(project=self.PROJECT, credentials=creds) - logger = client.logger(self.LOGGER_NAME) - self.assertTrue(isinstance(logger, Logger)) - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - - def test__entry_from_resource_unknown_type(self): - PROJECT = 'PROJECT' - creds = _Credentials() - client = self._makeOne(PROJECT, creds) - loggers = {} - with self.assertRaises(ValueError): - client._entry_from_resource({'unknownPayload': {}}, loggers) - - def test_list_entries_defaults(self): - from datetime import datetime - from gcloud._helpers import UTC - from gcloud.logging.entries import TextEntry - from gcloud.logging.test_entries import _datetime_to_rfc3339_w_nanos - NOW = datetime.utcnow().replace(tzinfo=UTC) - TIMESTAMP = _datetime_to_rfc3339_w_nanos(NOW) - IID1 = 'IID1' - TEXT = 'TEXT' - SENT = { - 'projectIds': [self.PROJECT], - } - TOKEN = 'TOKEN' - RETURNED = { - 'entries': [{ - 'textPayload': TEXT, - 'insertId': IID1, - 'resource': { - 'type': 'global', - }, - 'timestamp': TIMESTAMP, - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - }], - 'nextPageToken': TOKEN, - } - creds = _Credentials() - client = self._makeOne(project=self.PROJECT, credentials=creds) - conn = client.connection = _Connection(RETURNED) - entries, token = client.list_entries() - self.assertEqual(len(entries), 1) - entry = entries[0] - self.assertTrue(isinstance(entry, TextEntry)) - self.assertEqual(entry.insert_id, IID1) - self.assertEqual(entry.payload, TEXT) - self.assertEqual(entry.timestamp, NOW) - logger = entry.logger - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - self.assertEqual(token, TOKEN) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:list') - self.assertEqual(req['data'], SENT) - - def test_list_entries_explicit(self): - # pylint: disable=too-many-statements - from datetime import datetime - from gcloud._helpers import UTC - from gcloud.logging import DESCENDING - from gcloud.logging.entries import ProtobufEntry - from gcloud.logging.entries import StructEntry - from gcloud.logging.logger import Logger - from gcloud.logging.test_entries import _datetime_to_rfc3339_w_nanos - PROJECT1 = 'PROJECT1' - PROJECT2 = 'PROJECT2' - FILTER = 'logName:LOGNAME' - NOW = datetime.utcnow().replace(tzinfo=UTC) - TIMESTAMP = _datetime_to_rfc3339_w_nanos(NOW) - IID1 = 'IID1' - IID2 = 'IID2' - PAYLOAD = {'message': 'MESSAGE', 'weather': 'partly cloudy'} - PROTO_PAYLOAD = PAYLOAD.copy() - PROTO_PAYLOAD['@type'] = 'type.googleapis.com/testing.example' - TOKEN = 'TOKEN' - PAGE_SIZE = 42 - SENT = { - 'projectIds': [PROJECT1, PROJECT2], - 'filter': FILTER, - 'orderBy': DESCENDING, - 'pageSize': PAGE_SIZE, - 'pageToken': TOKEN, - } - RETURNED = { - 'entries': [{ - 'jsonPayload': PAYLOAD, - 'insertId': IID1, - 'resource': { - 'type': 'global', - }, - 'timestamp': TIMESTAMP, - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - }, { - 'protoPayload': PROTO_PAYLOAD, - 'insertId': IID2, - 'resource': { - 'type': 'global', - }, - 'timestamp': TIMESTAMP, - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - }], - } - creds = _Credentials() - client = self._makeOne(project=self.PROJECT, credentials=creds) - conn = client.connection = _Connection(RETURNED) - entries, token = client.list_entries( - projects=[PROJECT1, PROJECT2], filter_=FILTER, order_by=DESCENDING, - page_size=PAGE_SIZE, page_token=TOKEN) - self.assertEqual(len(entries), 2) - - entry = entries[0] - self.assertTrue(isinstance(entry, StructEntry)) - self.assertEqual(entry.insert_id, IID1) - self.assertEqual(entry.payload, PAYLOAD) - self.assertEqual(entry.timestamp, NOW) - logger = entry.logger - self.assertTrue(isinstance(logger, Logger)) - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - - entry = entries[1] - self.assertTrue(isinstance(entry, ProtobufEntry)) - self.assertEqual(entry.insert_id, IID2) - self.assertEqual(entry.payload, PROTO_PAYLOAD) - self.assertEqual(entry.timestamp, NOW) - logger = entry.logger - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - - self.assertTrue(entries[0].logger is entries[1].logger) - - self.assertEqual(token, None) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:list') - self.assertEqual(req['data'], SENT) - - def test_sink(self): - from gcloud.logging.sink import Sink - creds = _Credentials() - client = self._makeOne(project=self.PROJECT, credentials=creds) - sink = client.sink(self.SINK_NAME, self.FILTER, self.DESTINATION_URI) - self.assertTrue(isinstance(sink, Sink)) - self.assertEqual(sink.name, self.SINK_NAME) - self.assertEqual(sink.filter_, self.FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertTrue(sink.client is client) - self.assertEqual(sink.project, self.PROJECT) - - def test_list_sinks_no_paging(self): - from gcloud.logging.sink import Sink - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - SINK_NAME = 'sink_name' - FILTER = 'logName:syslog AND severity>=ERROR' - SINK_PATH = 'projects/%s/sinks/%s' % (PROJECT, SINK_NAME) - - RETURNED = { - 'sinks': [{ - 'name': SINK_PATH, - 'filter': FILTER, - 'destination': self.DESTINATION_URI, - }], - } - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - sinks, next_page_token = CLIENT_OBJ.list_sinks() - # Test values are correct. - self.assertEqual(len(sinks), 1) - sink = sinks[0] - self.assertTrue(isinstance(sink, Sink)) - self.assertEqual(sink.name, SINK_NAME) - self.assertEqual(sink.filter_, FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertEqual(next_page_token, None) - self.assertEqual(len(CLIENT_OBJ.connection._requested), 1) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/projects/%s/sinks' % (PROJECT,)) - self.assertEqual(req['query_params'], {}) - - def test_list_sinks_with_paging(self): - from gcloud.logging.sink import Sink - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - SINK_NAME = 'sink_name' - FILTER = 'logName:syslog AND severity>=ERROR' - SINK_PATH = 'projects/%s/sinks/%s' % (PROJECT, SINK_NAME) - TOKEN1 = 'TOKEN1' - TOKEN2 = 'TOKEN2' - SIZE = 1 - RETURNED = { - 'sinks': [{ - 'name': SINK_PATH, - 'filter': FILTER, - 'destination': self.DESTINATION_URI, - }], - 'nextPageToken': TOKEN2, - } - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - sinks, next_page_token = CLIENT_OBJ.list_sinks(SIZE, TOKEN1) - # Test values are correct. - self.assertEqual(len(sinks), 1) - sink = sinks[0] - self.assertTrue(isinstance(sink, Sink)) - self.assertEqual(sink.name, SINK_NAME) - self.assertEqual(sink.filter_, FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertEqual(next_page_token, TOKEN2) - self.assertEqual(len(CLIENT_OBJ.connection._requested), 1) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/projects/%s/sinks' % (PROJECT,)) - self.assertEqual(req['query_params'], - {'pageSize': SIZE, 'pageToken': TOKEN1}) - - def test_list_sinks_missing_key(self): - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - RETURNED = {} - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - sinks, next_page_token = CLIENT_OBJ.list_sinks() - # Test values are correct. - self.assertEqual(len(sinks), 0) - self.assertEqual(next_page_token, None) - self.assertEqual(len(CLIENT_OBJ.connection._requested), 1) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/projects/%s/sinks' % PROJECT) - self.assertEqual(req['query_params'], {}) - - def test_metric(self): - from gcloud.logging.metric import Metric - creds = _Credentials() - - client_obj = self._makeOne(project=self.PROJECT, credentials=creds) - metric = client_obj.metric(self.METRIC_NAME, self.FILTER, - description=self.DESCRIPTION) - self.assertTrue(isinstance(metric, Metric)) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, self.DESCRIPTION) - self.assertTrue(metric.client is client_obj) - self.assertEqual(metric.project, self.PROJECT) - - def test_list_metrics_no_paging(self): - from gcloud.logging.metric import Metric - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - RETURNED = { - 'metrics': [{ - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - 'description': self.DESCRIPTION, - }], - } - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - metrics, next_page_token = CLIENT_OBJ.list_metrics() - # Test values are correct. - self.assertEqual(len(metrics), 1) - metric = metrics[0] - self.assertTrue(isinstance(metric, Metric)) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, self.DESCRIPTION) - self.assertEqual(next_page_token, None) - self.assertEqual(len(CLIENT_OBJ.connection._requested), 1) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/projects/%s/metrics' % PROJECT) - self.assertEqual(req['query_params'], {}) - - def test_list_metrics_with_paging(self): - from gcloud.logging.metric import Metric - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - TOKEN1 = 'TOKEN1' - TOKEN2 = 'TOKEN2' - SIZE = 1 - RETURNED = { - 'metrics': [{ - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - 'description': self.DESCRIPTION, - }], - 'nextPageToken': TOKEN2, - } - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - metrics, next_page_token = CLIENT_OBJ.list_metrics(SIZE, TOKEN1) - # Test values are correct. - self.assertEqual(len(metrics), 1) - metric = metrics[0] - self.assertTrue(isinstance(metric, Metric)) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, self.DESCRIPTION) - self.assertEqual(next_page_token, TOKEN2) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['path'], '/projects/%s/metrics' % PROJECT) - self.assertEqual(req['query_params'], - {'pageSize': SIZE, 'pageToken': TOKEN1}) - - def test_list_metrics_missing_key(self): - PROJECT = 'PROJECT' - CREDS = _Credentials() - - CLIENT_OBJ = self._makeOne(project=PROJECT, credentials=CREDS) - - RETURNED = {} - # Replace the connection on the client with one of our own. - CLIENT_OBJ.connection = _Connection(RETURNED) - - # Execute request. - metrics, next_page_token = CLIENT_OBJ.list_metrics() - # Test values are correct. - self.assertEqual(len(metrics), 0) - self.assertEqual(next_page_token, None) - self.assertEqual(len(CLIENT_OBJ.connection._requested), 1) - req = CLIENT_OBJ.connection._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/projects/%s/metrics' % PROJECT) - self.assertEqual(req['query_params'], {}) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/logging/test_connection.py b/gcloud/logging/test_connection.py deleted file mode 100644 index 2939b683305e..000000000000 --- a/gcloud/logging/test_connection.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.logging.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_default_url(self): - creds = _Credentials() - conn = self._makeOne(creds) - klass = self._getTargetClass() - self.assertEqual(conn.credentials._scopes, klass.SCOPE) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self diff --git a/gcloud/logging/test_entries.py b/gcloud/logging/test_entries.py deleted file mode 100644 index 312e456d63e0..000000000000 --- a/gcloud/logging/test_entries.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_BaseEntry(unittest2.TestCase): - - PROJECT = 'PROJECT' - LOGGER_NAME = 'LOGGER_NAME' - - def _getTargetClass(self): - from gcloud.logging.entries import _BaseEntry - - class _Dummy(_BaseEntry): - _PAYLOAD_KEY = 'dummyPayload' - - return _Dummy - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - PAYLOAD = 'PAYLOAD' - logger = _Logger(self.LOGGER_NAME, self.PROJECT) - entry = self._makeOne(PAYLOAD, logger) - self.assertEqual(entry.payload, PAYLOAD) - self.assertTrue(entry.logger is logger) - self.assertTrue(entry.insert_id is None) - self.assertTrue(entry.timestamp is None) - self.assertTrue(entry.labels is None) - self.assertTrue(entry.severity is None) - self.assertTrue(entry.http_request is None) - - def test_ctor_explicit(self): - import datetime - PAYLOAD = 'PAYLOAD' - IID = 'IID' - TIMESTAMP = datetime.datetime.now() - LABELS = {'foo': 'bar', 'baz': 'qux'} - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - logger = _Logger(self.LOGGER_NAME, self.PROJECT) - entry = self._makeOne(PAYLOAD, logger, - insert_id=IID, - timestamp=TIMESTAMP, - labels=LABELS, - severity=SEVERITY, - http_request=REQUEST) - self.assertEqual(entry.payload, PAYLOAD) - self.assertTrue(entry.logger is logger) - self.assertEqual(entry.insert_id, IID) - self.assertEqual(entry.timestamp, TIMESTAMP) - self.assertEqual(entry.labels, LABELS) - self.assertEqual(entry.severity, SEVERITY) - self.assertEqual(entry.http_request['requestMethod'], METHOD) - self.assertEqual(entry.http_request['requestUrl'], URI) - self.assertEqual(entry.http_request['status'], STATUS) - - def test_from_api_repr_missing_data_no_loggers(self): - client = _Client(self.PROJECT) - PAYLOAD = 'PAYLOAD' - LOG_NAME = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME) - API_REPR = { - 'dummyPayload': PAYLOAD, - 'logName': LOG_NAME, - } - klass = self._getTargetClass() - entry = klass.from_api_repr(API_REPR, client) - self.assertEqual(entry.payload, PAYLOAD) - self.assertTrue(entry.insert_id is None) - self.assertTrue(entry.timestamp is None) - self.assertTrue(entry.severity is None) - self.assertTrue(entry.http_request is None) - logger = entry.logger - self.assertTrue(isinstance(logger, _Logger)) - self.assertTrue(logger.client is client) - self.assertEqual(logger.name, self.LOGGER_NAME) - - def test_from_api_repr_w_loggers_no_logger_match(self): - from datetime import datetime - from gcloud._helpers import UTC - klass = self._getTargetClass() - client = _Client(self.PROJECT) - PAYLOAD = 'PAYLOAD' - SEVERITY = 'CRITICAL' - IID = 'IID' - NOW = datetime.utcnow().replace(tzinfo=UTC) - TIMESTAMP = _datetime_to_rfc3339_w_nanos(NOW) - LOG_NAME = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME) - LABELS = {'foo': 'bar', 'baz': 'qux'} - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - API_REPR = { - 'dummyPayload': PAYLOAD, - 'logName': LOG_NAME, - 'insertId': IID, - 'timestamp': TIMESTAMP, - 'labels': LABELS, - 'severity': SEVERITY, - 'httpRequest': { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - }, - } - loggers = {} - entry = klass.from_api_repr(API_REPR, client, loggers=loggers) - self.assertEqual(entry.payload, PAYLOAD) - self.assertEqual(entry.insert_id, IID) - self.assertEqual(entry.timestamp, NOW) - self.assertEqual(entry.labels, LABELS) - self.assertEqual(entry.severity, SEVERITY) - self.assertEqual(entry.http_request['requestMethod'], METHOD) - self.assertEqual(entry.http_request['requestUrl'], URI) - self.assertEqual(entry.http_request['status'], STATUS) - logger = entry.logger - self.assertTrue(isinstance(logger, _Logger)) - self.assertTrue(logger.client is client) - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertEqual(loggers, {LOG_NAME: logger}) - - def test_from_api_repr_w_loggers_w_logger_match(self): - from datetime import datetime - from gcloud._helpers import UTC - client = _Client(self.PROJECT) - PAYLOAD = 'PAYLOAD' - IID = 'IID' - NOW = datetime.utcnow().replace(tzinfo=UTC) - TIMESTAMP = _datetime_to_rfc3339_w_nanos(NOW) - LOG_NAME = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME) - LABELS = {'foo': 'bar', 'baz': 'qux'} - API_REPR = { - 'dummyPayload': PAYLOAD, - 'logName': LOG_NAME, - 'insertId': IID, - 'timestamp': TIMESTAMP, - 'labels': LABELS, - } - LOGGER = object() - loggers = {LOG_NAME: LOGGER} - klass = self._getTargetClass() - entry = klass.from_api_repr(API_REPR, client, loggers=loggers) - self.assertEqual(entry.payload, PAYLOAD) - self.assertEqual(entry.insert_id, IID) - self.assertEqual(entry.timestamp, NOW) - self.assertEqual(entry.labels, LABELS) - self.assertTrue(entry.logger is LOGGER) - - -class TestProtobufEntry(unittest2.TestCase): - - PROJECT = 'PROJECT' - LOGGER_NAME = 'LOGGER_NAME' - - def _getTargetClass(self): - from gcloud.logging.entries import ProtobufEntry - return ProtobufEntry - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_parse_message(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - LOGGER = object() - message = Struct(fields={'foo': Value(bool_value=False)}) - with_true = Struct(fields={'foo': Value(bool_value=True)}) - PAYLOAD = json.loads(MessageToJson(with_true)) - entry = self._makeOne(PAYLOAD, LOGGER) - entry.parse_message(message) - self.assertTrue(message.fields['foo']) - - -def _datetime_to_rfc3339_w_nanos(value): - from gcloud._helpers import _RFC3339_NO_FRACTION - no_fraction = value.strftime(_RFC3339_NO_FRACTION) - return '%s.%09dZ' % (no_fraction, value.microsecond * 1000) - - -class _Logger(object): - - def __init__(self, name, client): - self.name = name - self.client = client - - -class _Client(object): - - def __init__(self, project): - self.project = project - - def logger(self, name): - return _Logger(name, self) diff --git a/gcloud/logging/test_logger.py b/gcloud/logging/test_logger.py deleted file mode 100644 index 069ad2f47d2e..000000000000 --- a/gcloud/logging/test_logger.py +++ /dev/null @@ -1,762 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestLogger(unittest2.TestCase): - - PROJECT = 'test-project' - LOGGER_NAME = 'logger-name' - - def _getTargetClass(self): - from gcloud.logging.logger import Logger - return Logger - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - conn = _Connection() - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - self.assertEqual(logger.full_name, 'projects/%s/logs/%s' - % (self.PROJECT, self.LOGGER_NAME)) - self.assertEqual(logger.path, '/projects/%s/logs/%s' - % (self.PROJECT, self.LOGGER_NAME)) - self.assertEqual(logger.labels, None) - - def test_ctor_explicit(self): - LABELS = {'foo': 'bar', 'baz': 'qux'} - conn = _Connection() - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client, labels=LABELS) - self.assertEqual(logger.name, self.LOGGER_NAME) - self.assertTrue(logger.client is client) - self.assertEqual(logger.project, self.PROJECT) - self.assertEqual(logger.full_name, 'projects/%s/logs/%s' - % (self.PROJECT, self.LOGGER_NAME)) - self.assertEqual(logger.path, '/projects/%s/logs/%s' - % (self.PROJECT, self.LOGGER_NAME)) - self.assertEqual(logger.labels, LABELS) - - def test_batch_w_bound_client(self): - from gcloud.logging.logger import Batch - conn = _Connection() - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - batch = logger.batch() - self.assertTrue(isinstance(batch, Batch)) - self.assertTrue(batch.logger is logger) - self.assertTrue(batch.client is client) - - def test_batch_w_alternate_client(self): - from gcloud.logging.logger import Batch - conn1 = _Connection() - conn2 = _Connection() - client1 = _Client(self.PROJECT, conn1) - client2 = _Client(self.PROJECT, conn2) - logger = self._makeOne(self.LOGGER_NAME, client=client1) - batch = logger.batch(client2) - self.assertTrue(isinstance(batch, Batch)) - self.assertTrue(batch.logger is logger) - self.assertTrue(batch.client is client2) - - def test_log_text_w_str_implicit_client(self): - TEXT = 'TEXT' - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - logger.log_text(TEXT) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'textPayload': TEXT, - 'resource': { - 'type': 'global', - }, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_text_w_default_labels(self): - TEXT = 'TEXT' - DEFAULT_LABELS = {'foo': 'spam'} - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client, - labels=DEFAULT_LABELS) - logger.log_text(TEXT) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'textPayload': TEXT, - 'resource': { - 'type': 'global', - }, - 'labels': DEFAULT_LABELS, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_text_w_unicode_explicit_client_labels_severity_httpreq(self): - TEXT = u'TEXT' - DEFAULT_LABELS = {'foo': 'spam'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - conn = _Connection({}) - client1 = _Client(self.PROJECT, object()) - client2 = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client1, - labels=DEFAULT_LABELS) - logger.log_text(TEXT, client=client2, labels=LABELS, - insert_id=IID, severity=SEVERITY, http_request=REQUEST) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'textPayload': TEXT, - 'resource': { - 'type': 'global', - }, - 'labels': LABELS, - 'insertId': IID, - 'severity': SEVERITY, - 'httpRequest': REQUEST, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_struct_w_implicit_client(self): - STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'} - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - logger.log_struct(STRUCT) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'jsonPayload': STRUCT, - 'resource': { - 'type': 'global', - }, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_struct_w_default_labels(self): - STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'} - DEFAULT_LABELS = {'foo': 'spam'} - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client, - labels=DEFAULT_LABELS) - logger.log_struct(STRUCT) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'jsonPayload': STRUCT, - 'resource': { - 'type': 'global', - }, - 'labels': DEFAULT_LABELS, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_struct_w_explicit_client_labels_severity_httpreq(self): - STRUCT = {'message': 'MESSAGE', 'weather': 'cloudy'} - DEFAULT_LABELS = {'foo': 'spam'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - conn = _Connection({}) - client1 = _Client(self.PROJECT, object()) - client2 = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client1, - labels=DEFAULT_LABELS) - logger.log_struct(STRUCT, client=client2, labels=LABELS, - insert_id=IID, severity=SEVERITY, - http_request=REQUEST) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'jsonPayload': STRUCT, - 'resource': { - 'type': 'global', - }, - 'labels': LABELS, - 'insertId': IID, - 'severity': SEVERITY, - 'httpRequest': REQUEST, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_proto_w_implicit_client(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - message = Struct(fields={'foo': Value(bool_value=True)}) - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - logger.log_proto(message) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'protoPayload': json.loads(MessageToJson(message)), - 'resource': { - 'type': 'global', - }, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_proto_w_default_labels(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - message = Struct(fields={'foo': Value(bool_value=True)}) - DEFAULT_LABELS = {'foo': 'spam'} - conn = _Connection({}) - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client, - labels=DEFAULT_LABELS) - logger.log_proto(message) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'protoPayload': json.loads(MessageToJson(message)), - 'resource': { - 'type': 'global', - }, - 'labels': DEFAULT_LABELS, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_log_proto_w_explicit_client_labels_severity_httpreq(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - message = Struct(fields={'foo': Value(bool_value=True)}) - DEFAULT_LABELS = {'foo': 'spam'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - conn = _Connection({}) - client1 = _Client(self.PROJECT, object()) - client2 = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client1, - labels=DEFAULT_LABELS) - logger.log_proto(message, client=client2, labels=LABELS, - insert_id=IID, severity=SEVERITY, - http_request=REQUEST) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - SENT = { - 'entries': [{ - 'logName': 'projects/%s/logs/%s' % ( - self.PROJECT, self.LOGGER_NAME), - 'protoPayload': json.loads(MessageToJson(message)), - 'resource': { - 'type': 'global', - }, - 'labels': LABELS, - 'insertId': IID, - 'severity': SEVERITY, - 'httpRequest': REQUEST, - }], - } - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_delete_w_bound_client(self): - PATH = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME) - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - logger = self._makeOne(self.LOGGER_NAME, client=CLIENT) - logger.delete() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_delete_w_alternate_client(self): - PATH = 'projects/%s/logs/%s' % (self.PROJECT, self.LOGGER_NAME) - conn1 = _Connection({}) - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - logger = self._makeOne(self.LOGGER_NAME, client=CLIENT1) - logger.delete(client=CLIENT2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % PATH) - - def test_list_entries_defaults(self): - LISTED = { - 'projects': None, - 'filter_': 'logName:%s' % (self.LOGGER_NAME), - 'order_by': None, - 'page_size': None, - 'page_token': None, - } - TOKEN = 'TOKEN' - conn = _Connection() - client = _Client(self.PROJECT, conn) - client._token = TOKEN - logger = self._makeOne(self.LOGGER_NAME, client=client) - entries, token = logger.list_entries() - self.assertEqual(len(entries), 0) - self.assertEqual(token, TOKEN) - self.assertEqual(client._listed, LISTED) - - def test_list_entries_explicit(self): - from gcloud.logging import DESCENDING - PROJECT1 = 'PROJECT1' - PROJECT2 = 'PROJECT2' - FILTER = 'resource.type:global' - TOKEN = 'TOKEN' - PAGE_SIZE = 42 - LISTED = { - 'projects': ['PROJECT1', 'PROJECT2'], - 'filter_': '%s AND logName:%s' % (FILTER, self.LOGGER_NAME), - 'order_by': DESCENDING, - 'page_size': PAGE_SIZE, - 'page_token': TOKEN, - } - conn = _Connection() - client = _Client(self.PROJECT, conn) - logger = self._makeOne(self.LOGGER_NAME, client=client) - entries, token = logger.list_entries( - projects=[PROJECT1, PROJECT2], filter_=FILTER, order_by=DESCENDING, - page_size=PAGE_SIZE, page_token=TOKEN) - self.assertEqual(len(entries), 0) - self.assertEqual(token, None) - self.assertEqual(client._listed, LISTED) - - -class TestBatch(unittest2.TestCase): - - PROJECT = 'test-project' - - def _getTargetClass(self): - from gcloud.logging.logger import Batch - return Batch - - def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) - - def test_ctor_defaults(self): - logger = _Logger() - CLIENT = _Client(project=self.PROJECT) - batch = self._makeOne(logger, CLIENT) - self.assertTrue(batch.logger is logger) - self.assertTrue(batch.client is CLIENT) - self.assertEqual(len(batch.entries), 0) - - def test_log_text_defaults(self): - TEXT = 'This is the entry text' - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_text(TEXT) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('text', TEXT, None, None, None, None)]) - - def test_log_text_explicit(self): - TEXT = 'This is the entry text' - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_text(TEXT, labels=LABELS, insert_id=IID, severity=SEVERITY, - http_request=REQUEST) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('text', TEXT, LABELS, IID, SEVERITY, REQUEST)]) - - def test_log_struct_defaults(self): - STRUCT = {'message': 'Message text', 'weather': 'partly cloudy'} - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_struct(STRUCT) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('struct', STRUCT, None, None, None, None)]) - - def test_log_struct_explicit(self): - STRUCT = {'message': 'Message text', 'weather': 'partly cloudy'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_struct(STRUCT, labels=LABELS, insert_id=IID, - severity=SEVERITY, http_request=REQUEST) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('struct', STRUCT, LABELS, IID, SEVERITY, REQUEST)]) - - def test_log_proto_defaults(self): - from google.protobuf.struct_pb2 import Struct, Value - message = Struct(fields={'foo': Value(bool_value=True)}) - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_proto(message) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('proto', message, None, None, None, None)]) - - def test_log_proto_explicit(self): - from google.protobuf.struct_pb2 import Struct, Value - message = Struct(fields={'foo': Value(bool_value=True)}) - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - connection = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=connection) - logger = _Logger() - batch = self._makeOne(logger, client=CLIENT) - batch.log_proto(message, labels=LABELS, insert_id=IID, - severity=SEVERITY, http_request=REQUEST) - self.assertEqual(len(connection._requested), 0) - self.assertEqual(batch.entries, - [('proto', message, LABELS, IID, SEVERITY, REQUEST)]) - - def test_commit_w_invalid_entry_type(self): - logger = _Logger() - conn = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=conn) - batch = self._makeOne(logger, CLIENT) - batch.entries.append(('bogus', 'BOGUS', None, None, None, None)) - with self.assertRaises(ValueError): - batch.commit() - - def test_commit_w_bound_client(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - TEXT = 'This is the entry text' - STRUCT = {'message': TEXT, 'weather': 'partly cloudy'} - message = Struct(fields={'foo': Value(bool_value=True)}) - IID1 = 'IID1' - IID2 = 'IID2' - IID3 = 'IID3' - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - logger = _Logger() - SENT = { - 'logName': logger.path, - 'resource': { - 'type': 'global', - }, - 'entries': [ - {'textPayload': TEXT, 'insertId': IID1}, - {'jsonPayload': STRUCT, 'insertId': IID2}, - {'protoPayload': json.loads(MessageToJson(message)), - 'insertId': IID3}, - ], - } - batch = self._makeOne(logger, client=CLIENT) - batch.log_text(TEXT, insert_id=IID1) - batch.log_struct(STRUCT, insert_id=IID2) - batch.log_proto(message, insert_id=IID3) - batch.commit() - self.assertEqual(list(batch.entries), []) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_commit_w_alternate_client(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - from gcloud.logging.logger import Logger - TEXT = 'This is the entry text' - STRUCT = {'message': TEXT, 'weather': 'partly cloudy'} - message = Struct(fields={'foo': Value(bool_value=True)}) - DEFAULT_LABELS = {'foo': 'spam'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - conn1 = _Connection() - conn2 = _Connection({}) - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - logger = Logger('logger_name', CLIENT1, labels=DEFAULT_LABELS) - SENT = { - 'logName': logger.path, - 'resource': {'type': 'global'}, - 'labels': DEFAULT_LABELS, - 'entries': [ - {'textPayload': TEXT, 'labels': LABELS}, - {'jsonPayload': STRUCT, 'severity': SEVERITY}, - {'protoPayload': json.loads(MessageToJson(message)), - 'httpRequest': REQUEST}, - ], - } - batch = self._makeOne(logger, client=CLIENT1) - batch.log_text(TEXT, labels=LABELS) - batch.log_struct(STRUCT, severity=SEVERITY) - batch.log_proto(message, http_request=REQUEST) - batch.commit(client=CLIENT2) - self.assertEqual(list(batch.entries), []) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_context_mgr_success(self): - import json - from google.protobuf.json_format import MessageToJson - from google.protobuf.struct_pb2 import Struct, Value - from gcloud.logging.logger import Logger - TEXT = 'This is the entry text' - STRUCT = {'message': TEXT, 'weather': 'partly cloudy'} - message = Struct(fields={'foo': Value(bool_value=True)}) - DEFAULT_LABELS = {'foo': 'spam'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - logger = Logger('logger_name', CLIENT, labels=DEFAULT_LABELS) - SENT = { - 'logName': logger.path, - 'resource': { - 'type': 'global', - }, - 'labels': DEFAULT_LABELS, - 'entries': [ - {'textPayload': TEXT, 'httpRequest': REQUEST}, - {'jsonPayload': STRUCT, 'labels': LABELS}, - {'protoPayload': json.loads(MessageToJson(message)), - 'severity': SEVERITY}, - ], - } - batch = self._makeOne(logger, client=CLIENT) - - with batch as other: - other.log_text(TEXT, http_request=REQUEST) - other.log_struct(STRUCT, labels=LABELS) - other.log_proto(message, severity=SEVERITY) - - self.assertEqual(list(batch.entries), []) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/entries:write') - self.assertEqual(req['data'], SENT) - - def test_context_mgr_failure(self): - from google.protobuf.struct_pb2 import Struct, Value - TEXT = 'This is the entry text' - STRUCT = {'message': TEXT, 'weather': 'partly cloudy'} - LABELS = {'foo': 'bar', 'baz': 'qux'} - IID = 'IID' - SEVERITY = 'CRITICAL' - METHOD = 'POST' - URI = 'https://api.example.com/endpoint' - STATUS = '500' - REQUEST = { - 'requestMethod': METHOD, - 'requestUrl': URI, - 'status': STATUS, - } - message = Struct(fields={'foo': Value(bool_value=True)}) - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - logger = _Logger() - UNSENT = [ - ('text', TEXT, None, IID, None, None), - ('struct', STRUCT, None, None, SEVERITY, None), - ('proto', message, LABELS, None, None, REQUEST), - ] - batch = self._makeOne(logger, client=CLIENT) - - try: - with batch as other: - other.log_text(TEXT, insert_id=IID) - other.log_struct(STRUCT, severity=SEVERITY) - other.log_proto(message, labels=LABELS, http_request=REQUEST) - raise _Bugout() - except _Bugout: - pass - - self.assertEqual(list(batch.entries), UNSENT) - self.assertEqual(len(conn._requested), 0) - - -class _Logger(object): - - labels = None - - def __init__(self, name="NAME", project="PROJECT"): - self.path = '/projects/%s/logs/%s' % (project, name) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _Client(object): - - _listed = _token = None - _entries = () - - def __init__(self, project, connection=None): - self.project = project - self.connection = connection - - def list_entries(self, **kw): - self._listed = kw - return self._entries, self._token - - -class _Bugout(Exception): - pass diff --git a/gcloud/logging/test_metric.py b/gcloud/logging/test_metric.py deleted file mode 100644 index cbba9d1c4252..000000000000 --- a/gcloud/logging/test_metric.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test__metric_name_from_path(unittest2.TestCase): - - def _callFUT(self, path, project): - from gcloud.logging.metric import _metric_name_from_path - return _metric_name_from_path(path, project) - - def test_invalid_path_length(self): - PATH = 'projects/foo' - PROJECT = None - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT) - - def test_invalid_path_format(self): - METRIC_NAME = 'METRIC_NAME' - PROJECT = 'PROJECT' - PATH = 'foo/%s/bar/%s' % (PROJECT, METRIC_NAME) - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT) - - def test_invalid_project(self): - METRIC_NAME = 'METRIC_NAME' - PROJECT1 = 'PROJECT1' - PROJECT2 = 'PROJECT2' - PATH = 'projects/%s/metrics/%s' % (PROJECT1, METRIC_NAME) - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT2) - - def test_valid_data(self): - METRIC_NAME = 'METRIC_NAME' - PROJECT = 'PROJECT' - PATH = 'projects/%s/metrics/%s' % (PROJECT, METRIC_NAME) - metric_name = self._callFUT(PATH, PROJECT) - self.assertEqual(metric_name, METRIC_NAME) - - -class TestMetric(unittest2.TestCase): - - PROJECT = 'test-project' - METRIC_NAME = 'metric-name' - FILTER = 'logName:syslog AND severity>=ERROR' - DESCRIPTION = 'DESCRIPTION' - - def _getTargetClass(self): - from gcloud.logging.metric import Metric - return Metric - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn = _Connection() - client = _Client(self.PROJECT, conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, '') - self.assertTrue(metric.client is client) - self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - self.assertEqual(metric.path, '/%s' % (FULL,)) - - def test_ctor_explicit(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn = _Connection() - client = _Client(self.PROJECT, conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, - client=client, description=self.DESCRIPTION) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, self.DESCRIPTION) - self.assertTrue(metric.client is client) - self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - self.assertEqual(metric.path, '/%s' % (FULL,)) - - def test_from_api_repr_minimal(self): - CLIENT = _Client(project=self.PROJECT) - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - } - klass = self._getTargetClass() - metric = klass.from_api_repr(RESOURCE, client=CLIENT) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, '') - self.assertTrue(metric._client is CLIENT) - self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - - def test_from_api_repr_w_description(self): - CLIENT = _Client(project=self.PROJECT) - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - DESCRIPTION = 'DESCRIPTION' - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - 'description': DESCRIPTION, - } - klass = self._getTargetClass() - metric = klass.from_api_repr(RESOURCE, client=CLIENT) - self.assertEqual(metric.name, self.METRIC_NAME) - self.assertEqual(metric.filter_, self.FILTER) - self.assertEqual(metric.description, DESCRIPTION) - self.assertTrue(metric._client is CLIENT) - self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - - def test_create_w_bound_client(self): - TARGET = 'projects/%s/metrics' % (self.PROJECT,) - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - } - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client) - metric.create() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % TARGET) - self.assertEqual(req['data'], RESOURCE) - - def test_create_w_alternate_client(self): - TARGET = 'projects/%s/metrics' % (self.PROJECT,) - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - 'description': self.DESCRIPTION, - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=client1, - description=self.DESCRIPTION) - metric.create(client=client2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % TARGET) - self.assertEqual(req['data'], RESOURCE) - - def test_exists_miss_w_bound_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT) - self.assertFalse(metric.exists()) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_exists_hit_w_alternate_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({'name': FULL}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT1) - self.assertTrue(metric.exists(client=CLIENT2)) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_reload_w_bound_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - DESCRIPTION = 'DESCRIPTION' - NEW_FILTER = 'logName:syslog AND severity>=INFO' - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': NEW_FILTER, - } - conn = _Connection(RESOURCE) - CLIENT = _Client(project=self.PROJECT, connection=conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT, - description=DESCRIPTION) - metric.reload() - self.assertEqual(metric.filter_, NEW_FILTER) - self.assertEqual(metric.description, '') - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_reload_w_alternate_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - DESCRIPTION = 'DESCRIPTION' - NEW_FILTER = 'logName:syslog AND severity>=INFO' - RESOURCE = { - 'name': self.METRIC_NAME, - 'description': DESCRIPTION, - 'filter': NEW_FILTER, - } - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT1) - metric.reload(client=CLIENT2) - self.assertEqual(metric.filter_, NEW_FILTER) - self.assertEqual(metric.description, DESCRIPTION) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_update_w_bound_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - RESOURCE = { - 'name': self.METRIC_NAME, - 'filter': self.FILTER, - } - conn = _Connection(RESOURCE) - CLIENT = _Client(project=self.PROJECT, connection=conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT) - metric.update() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % FULL) - self.assertEqual(req['data'], RESOURCE) - - def test_update_w_alternate_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - DESCRIPTION = 'DESCRIPTION' - RESOURCE = { - 'name': self.METRIC_NAME, - 'description': DESCRIPTION, - 'filter': self.FILTER, - } - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT1, - description=DESCRIPTION) - metric.update(client=CLIENT2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % FULL) - self.assertEqual(req['data'], RESOURCE) - - def test_delete_w_bound_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT) - metric.delete() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_delete_w_alternate_client(self): - FULL = 'projects/%s/metrics/%s' % (self.PROJECT, self.METRIC_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - metric = self._makeOne(self.METRIC_NAME, self.FILTER, client=CLIENT1) - metric.delete(client=CLIENT2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % FULL) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: # pragma: NO COVER - raise NotFound('miss') - else: - return response - - -class _Client(object): - - def __init__(self, project, connection=None): - self.project = project - self.connection = connection diff --git a/gcloud/logging/test_sink.py b/gcloud/logging/test_sink.py deleted file mode 100644 index 103aa0ab6b8c..000000000000 --- a/gcloud/logging/test_sink.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2016 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test__sink_name_from_path(unittest2.TestCase): - - def _callFUT(self, path, project): - from gcloud.logging.sink import _sink_name_from_path - return _sink_name_from_path(path, project) - - def test_invalid_path_length(self): - PATH = 'projects/foo' - PROJECT = None - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT) - - def test_invalid_path_format(self): - SINK_NAME = 'SINK_NAME' - PROJECT = 'PROJECT' - PATH = 'foo/%s/bar/%s' % (PROJECT, SINK_NAME) - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT) - - def test_invalid_project(self): - SINK_NAME = 'SINK_NAME' - PROJECT1 = 'PROJECT1' - PROJECT2 = 'PROJECT2' - PATH = 'projects/%s/sinks/%s' % (PROJECT1, SINK_NAME) - self.assertRaises(ValueError, self._callFUT, PATH, PROJECT2) - - def test_valid_data(self): - SINK_NAME = 'SINK_NAME' - PROJECT = 'PROJECT' - PATH = 'projects/%s/sinks/%s' % (PROJECT, SINK_NAME) - sink_name = self._callFUT(PATH, PROJECT) - self.assertEqual(sink_name, SINK_NAME) - - -class TestSink(unittest2.TestCase): - - PROJECT = 'test-project' - SINK_NAME = 'sink-name' - FILTER = 'logName:syslog AND severity>=INFO' - DESTINATION_URI = 'faux.googleapis.com/destination' - - def _getTargetClass(self): - from gcloud.logging.sink import Sink - return Sink - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - conn = _Connection() - client = _Client(self.PROJECT, conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=client) - self.assertEqual(sink.name, self.SINK_NAME) - self.assertEqual(sink.filter_, self.FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertTrue(sink.client is client) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) - self.assertEqual(sink.path, '/%s' % (FULL,)) - - def test_from_api_repr_minimal(self): - CLIENT = _Client(project=self.PROJECT) - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - RESOURCE = { - 'name': FULL, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - klass = self._getTargetClass() - sink = klass.from_api_repr(RESOURCE, client=CLIENT) - self.assertEqual(sink.name, self.SINK_NAME) - self.assertEqual(sink.filter_, self.FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertTrue(sink._client is CLIENT) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) - - def test_from_api_repr_w_description(self): - CLIENT = _Client(project=self.PROJECT) - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - RESOURCE = { - 'name': FULL, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - klass = self._getTargetClass() - sink = klass.from_api_repr(RESOURCE, client=CLIENT) - self.assertEqual(sink.name, self.SINK_NAME) - self.assertEqual(sink.filter_, self.FILTER) - self.assertEqual(sink.destination, self.DESTINATION_URI) - self.assertTrue(sink._client is CLIENT) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) - - def test_from_api_repr_with_mismatched_project(self): - PROJECT1 = 'PROJECT1' - PROJECT2 = 'PROJECT2' - CLIENT = _Client(project=PROJECT1) - FULL = 'projects/%s/sinks/%s' % (PROJECT2, self.SINK_NAME) - RESOURCE = { - 'name': FULL, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - klass = self._getTargetClass() - self.assertRaises(ValueError, klass.from_api_repr, - RESOURCE, client=CLIENT) - - def test_create_w_bound_client(self): - TARGET = 'projects/%s/sinks' % (self.PROJECT,) - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - conn = _Connection(RESOURCE) - client = _Client(project=self.PROJECT, connection=conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=client) - sink.create() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % TARGET) - self.assertEqual(req['data'], RESOURCE) - - def test_create_w_alternate_client(self): - TARGET = 'projects/%s/sinks' % (self.PROJECT,) - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - client2 = _Client(project=self.PROJECT, connection=conn2) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=client1) - sink.create(client=client2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'POST') - self.assertEqual(req['path'], '/%s' % TARGET) - self.assertEqual(req['data'], RESOURCE) - - def test_exists_miss_w_bound_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - conn = _Connection() - CLIENT = _Client(project=self.PROJECT, connection=conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT) - self.assertFalse(sink.exists()) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_exists_hit_w_alternate_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({'name': FULL}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT1) - self.assertTrue(sink.exists(client=CLIENT2)) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_reload_w_bound_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - NEW_FILTER = 'logName:syslog AND severity>=INFO' - NEW_DESTINATION_URI = 'faux.googleapis.com/other' - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': NEW_FILTER, - 'destination': NEW_DESTINATION_URI, - } - conn = _Connection(RESOURCE) - CLIENT = _Client(project=self.PROJECT, connection=conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT) - sink.reload() - self.assertEqual(sink.filter_, NEW_FILTER) - self.assertEqual(sink.destination, NEW_DESTINATION_URI) - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_reload_w_alternate_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - NEW_FILTER = 'logName:syslog AND severity>=INFO' - NEW_DESTINATION_URI = 'faux.googleapis.com/other' - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': NEW_FILTER, - 'destination': NEW_DESTINATION_URI, - } - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT1) - sink.reload(client=CLIENT2) - self.assertEqual(sink.filter_, NEW_FILTER) - self.assertEqual(sink.destination, NEW_DESTINATION_URI) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_update_w_bound_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - conn = _Connection(RESOURCE) - CLIENT = _Client(project=self.PROJECT, connection=conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT) - sink.update() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % FULL) - self.assertEqual(req['data'], RESOURCE) - - def test_update_w_alternate_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - RESOURCE = { - 'name': self.SINK_NAME, - 'filter': self.FILTER, - 'destination': self.DESTINATION_URI, - } - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(RESOURCE) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT1) - sink.update(client=CLIENT2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % FULL) - self.assertEqual(req['data'], RESOURCE) - - def test_delete_w_bound_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - conn = _Connection({}) - CLIENT = _Client(project=self.PROJECT, connection=conn) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT) - sink.delete() - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % FULL) - - def test_delete_w_alternate_client(self): - FULL = 'projects/%s/sinks/%s' % (self.PROJECT, self.SINK_NAME) - conn1 = _Connection() - CLIENT1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - CLIENT2 = _Client(project=self.PROJECT, connection=conn2) - sink = self._makeOne(self.SINK_NAME, self.FILTER, self.DESTINATION_URI, - client=CLIENT1) - sink.delete(client=CLIENT2) - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % FULL) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: # pragma: NO COVER - raise NotFound('miss') - else: - return response - - -class _Client(object): - - def __init__(self, project, connection=None): - self.project = project - self.connection = connection diff --git a/gcloud/resource_manager/__init__.py b/gcloud/resource_manager/__init__.py deleted file mode 100644 index 25089c3854c4..000000000000 --- a/gcloud/resource_manager/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Resource Manager API wrapper.""" - -from gcloud.resource_manager.client import Client -from gcloud.resource_manager.connection import Connection -from gcloud.resource_manager.project import Project - - -SCOPE = Connection.SCOPE diff --git a/gcloud/resource_manager/client.py b/gcloud/resource_manager/client.py deleted file mode 100644 index 74844019babc..000000000000 --- a/gcloud/resource_manager/client.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A Client for interacting with the Resource Manager API.""" - - -from gcloud.client import Client as BaseClient -from gcloud.iterator import Iterator -from gcloud.resource_manager.connection import Connection -from gcloud.resource_manager.project import Project - - -class Client(BaseClient): - """Client to bundle configuration needed for API requests. - - See - https://cloud.google.com/resource-manager/reference/rest/ - for more information on this API. - - Automatically get credentials:: - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def new_project(self, project_id, name=None, labels=None): - """Creates a :class:`.Project` bound to the current client. - - Use :meth:`Project.reload() \ - ` to retrieve - project metadata after creating a :class:`.Project` instance. - - .. note: - - This does not make an API call. - - :type project_id: str - :param project_id: The ID for this project. - - :type name: string - :param name: The display name of the project. - - :type labels: dict - :param labels: A list of labels associated with the project. - - :rtype: :class:`.Project` - :returns: A new instance of a :class:`.Project` **without** - any metadata loaded. - """ - return Project(project_id=project_id, - client=self, name=name, labels=labels) - - def fetch_project(self, project_id): - """Fetch an existing project and it's relevant metadata by ID. - - .. note:: - - If the project does not exist, this will raise a - :class:`NotFound ` error. - - :type project_id: str - :param project_id: The ID for this project. - - :rtype: :class:`.Project` - :returns: A :class:`.Project` with metadata fetched from the API. - """ - project = self.new_project(project_id) - project.reload() - return project - - def list_projects(self, filter_params=None, page_size=None): - """List the projects visible to this client. - - Example:: - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() - >>> for project in client.list_projects(): - ... print project.project_id - - List all projects with label ``'environment'`` set to ``'prod'`` - (filtering by labels):: - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() - >>> env_filter = {'labels.environment': 'prod'} - >>> for project in client.list_projects(env_filter): - ... print project.project_id - - See: - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/list - - Complete filtering example:: - - >>> project_filter = { # Return projects with... - ... 'name': 'My Project', # name set to 'My Project'. - ... 'id': 'my-project-id', # id set to 'my-project-id'. - ... 'labels.stage': 'prod', # the label 'stage' set to 'prod' - ... 'labels.color': '*' # a label 'color' set to anything. - ... } - >>> client.list_projects(project_filter) - - :type filter_params: dict - :param filter_params: (Optional) A dictionary of filter options where - each key is a property to filter on, and each - value is the (case-insensitive) value to check - (or the glob ``*`` to check for existence of the - property). See the example above for more - details. - - :type page_size: int - :param page_size: (Optional) Maximum number of projects to return in a - single page. If not passed, defaults to a value set - by the API. - - :rtype: :class:`_ProjectIterator` - :returns: A project iterator. The iterator will make multiple API - requests if you continue iterating and there are more - pages of results. Each item returned will be a. - :class:`.Project`. - """ - extra_params = {} - - if page_size is not None: - extra_params['pageSize'] = page_size - - if filter_params is not None: - extra_params['filter'] = filter_params - - return _ProjectIterator(self, extra_params=extra_params) - - -class _ProjectIterator(Iterator): - """An iterator over a list of Project resources. - - You shouldn't have to use this directly, but instead should use the - helper methods on :class:`gcloud.resource_manager.client.Client` - objects. - - :type client: :class:`gcloud.resource_manager.client.Client` - :param client: The client to use for making connections. - - :type extra_params: dict - :param extra_params: (Optional) Extra query string parameters for - the API call. - """ - - def __init__(self, client, extra_params=None): - super(_ProjectIterator, self).__init__(client=client, path='/projects', - extra_params=extra_params) - - def get_items_from_response(self, response): - """Yield :class:`.Project` items from response. - - :type response: dict - :param response: The JSON API response for a page of projects. - """ - for resource in response.get('projects', []): - item = Project.from_api_repr(resource, client=self.client) - yield item diff --git a/gcloud/resource_manager/connection.py b/gcloud/resource_manager/connection.py deleted file mode 100644 index 49ef2fc2e57b..000000000000 --- a/gcloud/resource_manager/connection.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud.resource_manager connections.""" - - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud Resource Manager via the JSON REST API. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - connection. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: (Optional) HTTP object to make requests. - """ - - API_BASE_URL = 'https://cloudresourcemanager.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v1beta1' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/cloud-platform',) - """The scopes required for authenticating as a Resouce Manager consumer.""" diff --git a/gcloud/resource_manager/project.py b/gcloud/resource_manager/project.py deleted file mode 100644 index bf3c0e01d18b..000000000000 --- a/gcloud/resource_manager/project.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utility for managing projects via the Cloud Resource Manager API.""" - - -from gcloud.exceptions import NotFound - - -class Project(object): - """Projects are containers for your work on Google Cloud Platform. - - .. note:: - - A :class:`Project` can also be created via - :meth:`Client.new_project() \ - ` - - To manage labels on a :class:`Project`:: - - >>> from gcloud import resource_manager - >>> client = resource_manager.Client() - >>> project = client.new_project('purple-spaceship-123') - >>> project.labels = {'color': 'purple'} - >>> project.labels['environment'] = 'production' - >>> project.update() - - See: - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects - - :type project_id: string - :param project_id: The globally unique ID of the project. - - :type client: :class:`gcloud.resource_manager.client.Client` - :param client: The Client used with this project. - - :type name: string - :param name: The display name of the project. - - :type labels: dict - :param labels: A list of labels associated with the project. - """ - def __init__(self, project_id, client, name=None, labels=None): - self._client = client - self.project_id = project_id - self.name = name - self.number = None - self.labels = labels or {} - self.status = None - - def __repr__(self): - return '' % (self.name, self.project_id) - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a project given its API representation. - - :type resource: dict - :param resource: project resource representation returned from the API - - :type client: :class:`gcloud.resource_manager.client.Client` - :param client: The Client used with this project. - - :rtype: :class:`gcloud.resource_manager.project.Project` - """ - project = cls(project_id=resource['projectId'], client=client) - project.set_properties_from_api_repr(resource) - return project - - def set_properties_from_api_repr(self, resource): - """Update specific properties from its API representation.""" - self.name = resource.get('name') - self.number = resource['projectNumber'] - self.labels = resource.get('labels', {}) - self.status = resource['lifecycleState'] - - @property - def full_name(self): - """Fully-qualified name (ie, ``'projects/purple-spaceship-123'``).""" - if not self.project_id: - raise ValueError('Missing project ID.') - return 'projects/%s' % (self.project_id) - - @property - def path(self): - """URL for the project (ie, ``'/projects/purple-spaceship-123'``).""" - return '/%s' % (self.full_name) - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.resource_manager.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current project. - - :rtype: :class:`gcloud.resource_manager.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self._client - return client - - def create(self, client=None): - """API call: create the project via a ``POST`` request. - - See - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/create - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - """ - client = self._require_client(client) - - data = { - 'projectId': self.project_id, - 'name': self.name, - 'labels': self.labels, - } - resp = client.connection.api_request(method='POST', path='/projects', - data=data) - self.set_properties_from_api_repr(resource=resp) - - def reload(self, client=None): - """API call: reload the project via a ``GET`` request. - - This method will reload the newest metadata for the project. If you've - created a new :class:`Project` instance via - :meth:`Client.new_project() \ - `, - this method will retrieve project metadata. - - .. warning:: - - This will overwrite any local changes you've made and not saved - via :meth:`update`. - - See - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - """ - client = self._require_client(client) - - # We assume the project exists. If it doesn't it will raise a NotFound - # exception. - resp = client.connection.api_request(method='GET', path=self.path) - self.set_properties_from_api_repr(resource=resp) - - def exists(self, client=None): - """API call: test the existence of a project via a ``GET`` request. - - See - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - """ - client = self._require_client(client) - - try: - # Note that we have to request the entire resource as the API - # doesn't provide a way tocheck for existence only. - client.connection.api_request(method='GET', path=self.path) - except NotFound: - return False - else: - return True - - def update(self, client=None): - """API call: update the project via a ``PUT`` request. - - See - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/update - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - """ - client = self._require_client(client) - - data = {'name': self.name, 'labels': self.labels} - resp = client.connection.api_request(method='PUT', path=self.path, - data=data) - self.set_properties_from_api_repr(resp) - - def delete(self, client=None, reload_data=False): - """API call: delete the project via a ``DELETE`` request. - - See: - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete - - This actually changes the status (``lifecycleState``) from ``ACTIVE`` - to ``DELETE_REQUESTED``. - Later (it's not specified when), the project will move into the - ``DELETE_IN_PROGRESS`` state, which means the deleting has actually - begun. - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - - :type reload_data: bool - :param reload_data: Whether to reload the project with the latest - state. If you want to get the updated status, - you'll want this set to :data:`True` as the DELETE - method doesn't send back the updated project. - Default: :data:`False`. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) - - # If the reload flag is set, reload the project. - if reload_data: - self.reload() - - def undelete(self, client=None, reload_data=False): - """API call: undelete the project via a ``POST`` request. - - See - https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/undelete - - This actually changes the project status (``lifecycleState``) from - ``DELETE_REQUESTED`` to ``ACTIVE``. - If the project has already reached a status of ``DELETE_IN_PROGRESS``, - this request will fail and the project cannot be restored. - - :type client: :class:`gcloud.resource_manager.client.Client` or - :data:`NoneType ` - :param client: the client to use. If not passed, falls back to - the client stored on the current project. - - :type reload_data: bool - :param reload_data: Whether to reload the project with the latest - state. If you want to get the updated status, - you'll want this set to :data:`True` as the DELETE - method doesn't send back the updated project. - Default: :data:`False`. - """ - client = self._require_client(client) - client.connection.api_request(method='POST', - path=self.path + ':undelete') - - # If the reload flag is set, reload the project. - if reload_data: - self.reload() diff --git a/gcloud/resource_manager/test_client.py b/gcloud/resource_manager/test_client.py deleted file mode 100644 index d6713acd5ca3..000000000000 --- a/gcloud/resource_manager/test_client.py +++ /dev/null @@ -1,296 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test__ProjectIterator(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.resource_manager.client import _ProjectIterator - return _ProjectIterator - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_constructor(self): - client = object() - iterator = self._makeOne(client) - self.assertEqual(iterator.path, '/projects') - self.assertEqual(iterator.page_number, 0) - self.assertEqual(iterator.next_page_token, None) - self.assertTrue(iterator.client is client) - self.assertEqual(iterator.extra_params, {}) - - def test_get_items_from_response_empty(self): - client = object() - iterator = self._makeOne(client) - self.assertEqual(list(iterator.get_items_from_response({})), []) - - def test_get_items_from_response_non_empty(self): - from gcloud.resource_manager.project import Project - - PROJECT_ID = 'project-id' - PROJECT_NAME = 'My Project Name' - PROJECT_NUMBER = 12345678 - PROJECT_LABELS = {'env': 'prod'} - PROJECT_LIFECYCLE_STATE = 'ACTIVE' - API_RESOURCE = { - 'projectId': PROJECT_ID, - 'name': PROJECT_NAME, - 'projectNumber': PROJECT_NUMBER, - 'labels': PROJECT_LABELS, - 'lifecycleState': PROJECT_LIFECYCLE_STATE, - } - RESPONSE = {'projects': [API_RESOURCE]} - - client = object() - iterator = self._makeOne(client) - projects = list(iterator.get_items_from_response(RESPONSE)) - - project, = projects - self.assertTrue(isinstance(project, Project)) - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project._client, client) - self.assertEqual(project.name, PROJECT_NAME) - self.assertEqual(project.number, PROJECT_NUMBER) - self.assertEqual(project.labels, PROJECT_LABELS) - self.assertEqual(project.status, PROJECT_LIFECYCLE_STATE) - - -class TestClient(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.resource_manager.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_constructor(self): - from gcloud.resource_manager.connection import Connection - - http = object() - credentials = _Credentials() - client = self._makeOne(credentials=credentials, http=http) - self.assertTrue(isinstance(client.connection, Connection)) - self.assertEqual(client.connection._credentials, credentials) - self.assertEqual(client.connection._http, http) - - def test_new_project_factory(self): - from gcloud.resource_manager.project import Project - - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - project_id = 'project_id' - name = object() - labels = object() - project = client.new_project(project_id, name=name, labels=labels) - - self.assertTrue(isinstance(project, Project)) - self.assertEqual(project._client, client) - self.assertEqual(project.project_id, project_id) - self.assertEqual(project.name, name) - self.assertEqual(project.labels, labels) - - def test_fetch_project(self): - from gcloud.resource_manager.project import Project - - project_id = 'project-id' - project_number = 123 - project_name = 'Project Name' - labels = {'env': 'prod'} - project_resource = { - 'projectId': project_id, - 'projectNumber': project_number, - 'name': project_name, - 'labels': labels, - 'lifecycleState': 'ACTIVE', - } - - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - # Patch the connection with one we can easily control. - client.connection = _Connection(project_resource) - - project = client.fetch_project(project_id) - self.assertTrue(isinstance(project, Project)) - self.assertEqual(project._client, client) - self.assertEqual(project.project_id, project_id) - self.assertEqual(project.name, project_name) - self.assertEqual(project.labels, labels) - - def test_list_projects_return_type(self): - from gcloud.resource_manager.client import _ProjectIterator - - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - # Patch the connection with one we can easily control. - client.connection = _Connection({}) - - results = client.list_projects() - self.assertIsInstance(results, _ProjectIterator) - - def test_list_projects_no_paging(self): - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 1 - STATUS = 'ACTIVE' - PROJECTS_RESOURCE = { - 'projects': [ - { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'lifecycleState': STATUS, - }, - ], - } - # Patch the connection with one we can easily control. - client.connection = _Connection(PROJECTS_RESOURCE) - # Make sure there will be no paging. - self.assertFalse('nextPageToken' in PROJECTS_RESOURCE) - - results = list(client.list_projects()) - - project, = results - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project.number, PROJECT_NUMBER) - self.assertEqual(project.status, STATUS) - - def test_list_projects_with_paging(self): - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - - PROJECT_ID1 = 'project-id' - PROJECT_NUMBER1 = 1 - STATUS = 'ACTIVE' - TOKEN = 'next-page-token' - FIRST_PROJECTS_RESOURCE = { - 'projects': [ - { - 'projectId': PROJECT_ID1, - 'projectNumber': PROJECT_NUMBER1, - 'lifecycleState': STATUS, - }, - ], - 'nextPageToken': TOKEN, - } - PROJECT_ID2 = 'project-id-2' - PROJECT_NUMBER2 = 42 - SECOND_PROJECTS_RESOURCE = { - 'projects': [ - { - 'projectId': PROJECT_ID2, - 'projectNumber': PROJECT_NUMBER2, - 'lifecycleState': STATUS, - }, - ], - } - # Patch the connection with one we can easily control. - client.connection = _Connection(FIRST_PROJECTS_RESOURCE, - SECOND_PROJECTS_RESOURCE) - - # Page size = 1 with two response means we'll have two requests. - results = list(client.list_projects(page_size=1)) - - # Check that the results are as expected. - project1, project2 = results - self.assertEqual(project1.project_id, PROJECT_ID1) - self.assertEqual(project1.number, PROJECT_NUMBER1) - self.assertEqual(project1.status, STATUS) - self.assertEqual(project2.project_id, PROJECT_ID2) - self.assertEqual(project2.number, PROJECT_NUMBER2) - self.assertEqual(project2.status, STATUS) - - # Check that two requests were required since page_size=1. - request1, request2 = client.connection._requested - self.assertEqual(request1, { - 'path': '/projects', - 'method': 'GET', - 'query_params': { - 'pageSize': 1, - }, - }) - self.assertEqual(request2, { - 'path': '/projects', - 'method': 'GET', - 'query_params': { - 'pageSize': 1, - 'pageToken': TOKEN, - }, - }) - - def test_list_projects_with_filter(self): - credentials = _Credentials() - client = self._makeOne(credentials=credentials) - - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 1 - STATUS = 'ACTIVE' - PROJECTS_RESOURCE = { - 'projects': [ - { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'lifecycleState': STATUS, - }, - ], - } - # Patch the connection with one we can easily control. - client.connection = _Connection(PROJECTS_RESOURCE) - - FILTER_PARAMS = {'id': 'project-id'} - results = list(client.list_projects(filter_params=FILTER_PARAMS)) - - project, = results - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project.number, PROJECT_NUMBER) - self.assertEqual(project.status, STATUS) - - # Check that the filter made it in the request. - request, = client.connection._requested - self.assertEqual(request, { - 'path': '/projects', - 'method': 'GET', - 'query_params': { - 'filter': FILTER_PARAMS, - }, - }) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/resource_manager/test_connection.py b/gcloud/resource_manager/test_connection.py deleted file mode 100644 index cce5cd95da1d..000000000000 --- a/gcloud/resource_manager/test_connection.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.resource_manager.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_build_api_url_no_extra_query_params(self): - conn = self._makeOne() - URI = '/'.join([ - conn.API_BASE_URL, - conn.API_VERSION, - 'foo', - ]) - self.assertEqual(conn.build_api_url('/foo'), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - conn = self._makeOne() - uri = conn.build_api_url('/foo', {'bar': 'baz'}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, - '/'.join(['', conn.API_VERSION, 'foo'])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms['bar'], 'baz') diff --git a/gcloud/resource_manager/test_project.py b/gcloud/resource_manager/test_project.py deleted file mode 100644 index 2173b55edcae..000000000000 --- a/gcloud/resource_manager/test_project.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestProject(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.resource_manager.project import Project - return Project - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_constructor_defaults(self): - client = object() - PROJECT_ID = 'project-id' - project = self._makeOne(PROJECT_ID, client) - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project._client, client) - self.assertEqual(project.name, None) - self.assertEqual(project.number, None) - self.assertEqual(project.labels, {}) - self.assertEqual(project.status, None) - - def test_constructor_explicit(self): - client = object() - PROJECT_ID = 'project-id' - DISPLAY_NAME = 'name' - LABELS = {'foo': 'bar'} - project = self._makeOne(PROJECT_ID, client, - name=DISPLAY_NAME, labels=LABELS) - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project._client, client) - self.assertEqual(project.name, DISPLAY_NAME) - self.assertEqual(project.number, None) - self.assertEqual(project.labels, LABELS) - self.assertEqual(project.status, None) - - def test_from_api_repr(self): - client = object() - PROJECT_ID = 'project-id' - PROJECT_NAME = 'My Project Name' - PROJECT_NUMBER = 12345678 - PROJECT_LABELS = {'env': 'prod'} - PROJECT_LIFECYCLE_STATE = 'ACTIVE' - resource = {'projectId': PROJECT_ID, - 'name': PROJECT_NAME, - 'projectNumber': PROJECT_NUMBER, - 'labels': PROJECT_LABELS, - 'lifecycleState': PROJECT_LIFECYCLE_STATE} - project = self._getTargetClass().from_api_repr(resource, client) - self.assertEqual(project.project_id, PROJECT_ID) - self.assertEqual(project._client, client) - self.assertEqual(project.name, PROJECT_NAME) - self.assertEqual(project.number, PROJECT_NUMBER) - self.assertEqual(project.labels, PROJECT_LABELS) - self.assertEqual(project.status, PROJECT_LIFECYCLE_STATE) - - def test_full_name(self): - PROJECT_ID = 'project-id' - project = self._makeOne(PROJECT_ID, None) - self.assertEqual('projects/%s' % PROJECT_ID, project.full_name) - - def test_full_name_missing_id(self): - project = self._makeOne(None, None) - with self.assertRaises(ValueError): - self.assertIsNone(project.full_name) - - def test_path(self): - PROJECT_ID = 'project-id' - project = self._makeOne(PROJECT_ID, None) - self.assertEqual('/projects/%s' % PROJECT_ID, project.path) - - def test_create(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {}, - 'lifecycleState': 'ACTIVE', - } - connection = _Connection(PROJECT_RESOURCE) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - self.assertEqual(project.number, None) - project.create() - self.assertEqual(project.number, PROJECT_NUMBER) - request, = connection._requested - - expected_request = { - 'method': 'POST', - 'data': { - 'projectId': PROJECT_ID, - 'labels': {}, - 'name': None, - }, - 'path': '/projects', - } - self.assertEqual(request, expected_request) - - def test_reload(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {'env': 'prod'}, - 'lifecycleState': 'ACTIVE', - } - connection = _Connection(PROJECT_RESOURCE) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - self.assertEqual(project.number, None) - self.assertEqual(project.name, None) - self.assertEqual(project.labels, {}) - self.assertEqual(project.status, None) - project.reload() - self.assertEqual(project.name, PROJECT_RESOURCE['name']) - self.assertEqual(project.number, PROJECT_NUMBER) - self.assertEqual(project.labels, PROJECT_RESOURCE['labels']) - self.assertEqual(project.status, PROJECT_RESOURCE['lifecycleState']) - - request, = connection._requested - # NOTE: data is not in the request since a GET request. - expected_request = { - 'method': 'GET', - 'path': project.path, - } - self.assertEqual(request, expected_request) - - def test_exists(self): - PROJECT_ID = 'project-id' - connection = _Connection({'projectId': PROJECT_ID}) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - self.assertTrue(project.exists()) - - def test_exists_with_explicitly_passed_client(self): - PROJECT_ID = 'project-id' - connection = _Connection({'projectId': PROJECT_ID}) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, None) - self.assertTrue(project.exists(client=client)) - - def test_exists_with_missing_client(self): - PROJECT_ID = 'project-id' - project = self._makeOne(PROJECT_ID, None) - with self.assertRaises(AttributeError): - project.exists() - - def test_exists_not_found(self): - PROJECT_ID = 'project-id' - connection = _Connection() - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - self.assertFalse(project.exists()) - - def test_update(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_NAME = 'Project Name' - LABELS = {'env': 'prod'} - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': PROJECT_NAME, - 'labels': LABELS, - 'lifecycleState': 'ACTIVE', - } - connection = _Connection(PROJECT_RESOURCE) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - project.name = PROJECT_NAME - project.labels = LABELS - project.update() - - request, = connection._requested - expected_request = { - 'method': 'PUT', - 'data': { - 'name': PROJECT_NAME, - 'labels': LABELS, - }, - 'path': project.path, - } - self.assertEqual(request, expected_request) - - def test_delete_without_reload_data(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {'env': 'prod'}, - 'lifecycleState': 'ACTIVE', - } - connection = _Connection(PROJECT_RESOURCE) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - project.delete(reload_data=False) - - request, = connection._requested - # NOTE: data is not in the request since a DELETE request. - expected_request = { - 'method': 'DELETE', - 'path': project.path, - } - self.assertEqual(request, expected_request) - - def test_delete_with_reload_data(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {'env': 'prod'}, - 'lifecycleState': 'ACTIVE', - } - DELETING_PROJECT = PROJECT_RESOURCE.copy() - DELETING_PROJECT['lifecycleState'] = NEW_STATE = 'DELETE_REQUESTED' - - connection = _Connection(PROJECT_RESOURCE, DELETING_PROJECT) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - project.delete(reload_data=True) - self.assertEqual(project.status, NEW_STATE) - - delete_request, get_request = connection._requested - # NOTE: data is not in the request since a DELETE request. - expected_delete_request = { - 'method': 'DELETE', - 'path': project.path, - } - self.assertEqual(delete_request, expected_delete_request) - - # NOTE: data is not in the request since a GET request. - expected_get_request = { - 'method': 'GET', - 'path': project.path, - } - self.assertEqual(get_request, expected_get_request) - - def test_undelete_without_reload_data(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {'env': 'prod'}, - 'lifecycleState': 'DELETE_REQUESTED', - } - connection = _Connection(PROJECT_RESOURCE) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - project.undelete(reload_data=False) - - request, = connection._requested - # NOTE: data is not in the request, undelete doesn't need it. - expected_request = { - 'method': 'POST', - 'path': project.path + ':undelete', - } - self.assertEqual(request, expected_request) - - def test_undelete_with_reload_data(self): - PROJECT_ID = 'project-id' - PROJECT_NUMBER = 123 - PROJECT_RESOURCE = { - 'projectId': PROJECT_ID, - 'projectNumber': PROJECT_NUMBER, - 'name': 'Project Name', - 'labels': {'env': 'prod'}, - 'lifecycleState': 'DELETE_REQUESTED', - } - UNDELETED_PROJECT = PROJECT_RESOURCE.copy() - UNDELETED_PROJECT['lifecycleState'] = NEW_STATE = 'ACTIVE' - - connection = _Connection(PROJECT_RESOURCE, UNDELETED_PROJECT) - client = _Client(connection=connection) - project = self._makeOne(PROJECT_ID, client) - project.undelete(reload_data=True) - self.assertEqual(project.status, NEW_STATE) - - undelete_request, get_request = connection._requested - # NOTE: data is not in the request, undelete doesn't need it. - expected_undelete_request = { - 'method': 'POST', - 'path': project.path + ':undelete', - } - self.assertEqual(undelete_request, expected_undelete_request) - - # NOTE: data is not in the request since a GET request. - expected_get_request = { - 'method': 'GET', - 'path': project.path, - } - self.assertEqual(get_request, expected_get_request) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response - - -class _Client(object): - - def __init__(self, connection=None): - self.connection = connection diff --git a/gcloud/search/__init__.py b/gcloud/search/__init__.py deleted file mode 100644 index 97ffb55ad822..000000000000 --- a/gcloud/search/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Google Cloud Search API wrapper.""" - -from gcloud.search.client import Client -from gcloud.search.connection import Connection - - -SCOPE = Connection.SCOPE diff --git a/gcloud/search/client.py b/gcloud/search/client.py deleted file mode 100644 index 451c1ee37a7c..000000000000 --- a/gcloud/search/client.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud search API.""" - - -from gcloud.client import JSONClient -from gcloud.search.connection import Connection -from gcloud.search.index import Index - - -class Client(JSONClient): - """Client to bundle configuration needed for API requests. - - :type project: string - :param project: the project which the client acts on behalf of. Will be - passed when creating a index. If not passed, - falls back to the default inferred from the environment. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def list_indexes(self, max_results=None, page_token=None, - view=None, prefix=None): - """List indexes for the project associated with this client. - - See: - https://cloud.google.com/search/reference/rest/v1/indexes/list - - :type max_results: int - :param max_results: maximum number of indexes to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of indexes. If - not passed, the API will return the first page of - indexes. - - :type view: string - :param view: One of 'ID_ONLY' (return only the index ID; the default) - or 'FULL' (return information on indexed fields). - - :type prefix: string - :param prefix: return only indexes whose ID starts with ``prefix``. - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.dns.index.Index`, plus a - "next page token" string: if the token is not None, - indicates that more indexes can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['pageSize'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - if view is not None: - params['view'] = view - - if prefix is not None: - params['indexNamePrefix'] = prefix - - path = '/projects/%s/indexes' % (self.project,) - resp = self.connection.api_request(method='GET', path=path, - query_params=params) - indexes = [Index.from_api_repr(resource, self) - for resource in resp['indexes']] - return indexes, resp.get('nextPageToken') - - def index(self, name): - """Construct an index bound to this client. - - :type name: string - :param name: Name of the index. - - :rtype: :class:`gcloud.search.index.Index` - :returns: a new ``Index`` instance - """ - return Index(name, client=self) diff --git a/gcloud/search/connection.py b/gcloud/search/connection.py deleted file mode 100644 index f14627c6916d..000000000000 --- a/gcloud/search/connection.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud search connections.""" - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud Search via the JSON REST API.""" - - API_BASE_URL = 'https://cloudsearch.googleapis.com' - """The base of the API call URL.""" - - API_VERSION = 'v1' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/cloudsearch',) - """The scopes required for authenticating as a Cloud Search consumer.""" diff --git a/gcloud/search/document.py b/gcloud/search/document.py deleted file mode 100644 index ecbff93ba16e..000000000000 --- a/gcloud/search/document.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Document.""" - -import datetime - -import six - -from gcloud._helpers import _datetime_to_rfc3339 -from gcloud._helpers import _rfc3339_to_datetime -from gcloud.exceptions import NotFound - - -class StringValue(object): - """StringValues hold individual text values for a given field - - See: - https://cloud.google.com/search/reference/rest/google/cloudsearch/v1/FieldValue - - :type string_value: string - :param string_value: the actual value. - - :type string_format: string - :param string_format: how the value should be indexed: one of - 'ATOM', 'TEXT', 'HTML' (leave as ``None`` to - use the server-supplied default). - - :type language: string - :param language: Human language of the text. Should be an ISO 639-1 - language code. - """ - - value_type = 'string' - - def __init__(self, string_value, string_format=None, language=None): - self.string_value = string_value - self.string_format = string_format - self.language = language - - -class NumberValue(object): - """NumberValues hold individual numeric values for a given field - - See: - https://cloud.google.com/search/reference/rest/google/cloudsearch/v1/FieldValue - - :type number_value: integer, float (long on Python2) - :param number_value: the actual value. - """ - - value_type = 'number' - - def __init__(self, number_value): - self.number_value = number_value - - -class TimestampValue(object): - """TimestampValues hold individual datetime values for a given field - See: - https://cloud.google.com/search/reference/rest/google/cloudsearch/v1/FieldValue - - :type timestamp_value: class:``datetime.datetime`` - :param timestamp_value: the actual value. - """ - - value_type = 'timestamp' - - def __init__(self, timestamp_value): - self.timestamp_value = timestamp_value - - -class GeoValue(object): - """GeoValues hold individual latitude/longitude values for a given field - See: - https://cloud.google.com/search/reference/rest/google/cloudsearch/v1/FieldValue - - :type geo_value: tuple, (float, float) - :param geo_value: latitude, longitude - """ - - value_type = 'geo' - - def __init__(self, geo_value): - self.geo_value = geo_value - - -class Field(object): - """Fields hold values for a given document - - See: - https://cloud.google.com/search/reference/rest/google/cloudsearch/v1/FieldValueList - - :type name: string - :param name: field name - """ - - def __init__(self, name): - self.name = name - self.values = [] - - def add_value(self, value, **kw): - """Add a value to the field. - - Selects type of value instance based on type of ``value``. - - :type value: string, integer, float, datetime, or tuple (float, float) - :param value: the field value to add. - - :param kw: extra keyword arguments to be passed to the value instance - constructor. Currently, only :class:`StringValue` - expects / honors additional parameters. - - :raises: ValueError if unable to match the type of ``value``. - """ - if isinstance(value, six.string_types): - self.values.append(StringValue(value, **kw)) - elif isinstance(value, (six.integer_types, float)): - self.values.append(NumberValue(value, **kw)) - elif isinstance(value, datetime.datetime): - self.values.append(TimestampValue(value, **kw)) - elif isinstance(value, tuple): - self.values.append(GeoValue(value, **kw)) - else: - raise ValueError("Couldn't determine value type: %s" % (value,)) - - -class Document(object): - """Documents hold values for search within indexes. - - See: - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents - - :type name: string - :param name: the name of the document - - :type index: :class:`gcloud.search.index.Index` - :param index: the index to which the document belongs. - - :type rank: positive integer - :param rank: override the server-generated rank for ordering the document - within in queries. If not passed, the server generates a - timestamp-based value. See the ``rank`` entry on the - page above for details. - """ - def __init__(self, name, index, rank=None): - self.name = name - self.index = index - self.rank = rank - self.fields = {} - - @classmethod - def from_api_repr(cls, resource, index): - """Factory: construct a document given its API representation - - :type resource: dict - :param resource: document resource representation returned from the API - - :type index: :class:`gcloud.search.index.Index` - :param index: Index holding the document. - - :rtype: :class:`gcloud.search.document.Document` - :returns: Document parsed from ``resource``. - """ - name = resource.get('docId') - if name is None: - raise KeyError( - 'Resource lacks required identity information: ["docId"]') - rank = resource.get('rank') - document = cls(name, index, rank) - document._parse_fields_resource(resource) - return document - - @staticmethod - def _parse_value_resource(resource): - """Helper for _parse_fields_resource""" - if 'stringValue' in resource: - string_format = resource.get('stringFormat') - language = resource.get('lang') - value = resource['stringValue'] - return StringValue(value, string_format, language) - if 'numberValue' in resource: - value = resource['numberValue'] - if isinstance(value, six.string_types): - if '.' in value: - value = float(value) - else: - value = int(value) - return NumberValue(value) - if 'timestampValue' in resource: - stamp = resource['timestampValue'] - value = _rfc3339_to_datetime(stamp) - return TimestampValue(value) - if 'geoValue' in resource: - lat_long = resource['geoValue'] - lat, long = [float(coord.strip()) for coord in lat_long.split(',')] - return GeoValue((lat, long)) - raise ValueError("Unknown value type") - - def _parse_fields_resource(self, resource): - """Helper for from_api_repr, create, reload""" - self.fields.clear() - for field_name, val_obj in resource.get('fields', {}).items(): - field = self.field(field_name) - for value in val_obj['values']: - field.values.append(self._parse_value_resource(value)) - - @property - def path(self): - """URL path for the document's APIs""" - return '%s/documents/%s' % (self.index.path, self.name) - - def field(self, name): - """Construct a Field instance. - - :type name: string - :param name: field's name - """ - field = self.fields[name] = Field(name) - return field - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.search.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the index of the - current document. - - :rtype: :class:`gcloud.search.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.index._client - return client - - @staticmethod - def _build_value_resource(value): - """Helper for _build_fields_resource""" - result = {} - if value.value_type == 'string': - result['stringValue'] = value.string_value - if value.string_format is not None: - result['stringFormat'] = value.string_format - if value.language is not None: - result['lang'] = value.language - elif value.value_type == 'number': - result['numberValue'] = value.number_value - elif value.value_type == 'timestamp': - stamp = _datetime_to_rfc3339(value.timestamp_value) - result['timestampValue'] = stamp - elif value.value_type == 'geo': - result['geoValue'] = '%s, %s' % value.geo_value - else: - raise ValueError('Unknown value_type: %s' % value.value_type) - return result - - def _build_fields_resource(self): - """Helper for create""" - fields = {} - for field_name, field in self.fields.items(): - if field.values: - values = [] - fields[field_name] = {'values': values} - for value in field.values: - values.append(self._build_value_resource(value)) - return fields - - def _set_properties(self, api_response): - """Helper for create, reload""" - self.rank = api_response.get('rank') - self._parse_fields_resource(api_response) - - def create(self, client=None): - """API call: create the document via a PUT request - - See: - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/create - - :type client: :class:`gcloud.search.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current document's index. - """ - data = {'docId': self.name} - - if self.rank is not None: - data['rank'] = self.rank - - fields = self._build_fields_resource() - if fields: - data['fields'] = fields - - client = self._require_client(client) - api_response = client.connection.api_request( - method='PUT', path=self.path, data=data) - - self._set_properties(api_response) - - def exists(self, client=None): - """API call: test existence of the document via a GET request - - See - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/get - - :type client: :class:`gcloud.search.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current document's index. - """ - client = self._require_client(client) - try: - client.connection.api_request(method='GET', path=self.path) - except NotFound: - return False - else: - return True - - def reload(self, client=None): - """API call: sync local document configuration via a GET request - - See - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/get - - :type client: :class:`gcloud.search.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current document's index. - """ - client = self._require_client(client) - api_response = client.connection.api_request( - method='GET', path=self.path) - self._set_properties(api_response) - - def delete(self, client=None): - """API call: delete the document via a DELETE request. - - See: - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/delete - - :type client: :class:`gcloud.search.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current document's index. - """ - client = self._require_client(client) - client.connection.api_request(method='DELETE', path=self.path) diff --git a/gcloud/search/index.py b/gcloud/search/index.py deleted file mode 100644 index c9014b24817e..000000000000 --- a/gcloud/search/index.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Define API Indexes.""" - -from gcloud.search.document import Document - - -class Index(object): - """Indexes are containers for documents. - - See: - https://cloud.google.com/search/reference/rest/v1/indexes - - :type name: string - :param name: the name of the index - - :type client: :class:`gcloud.dns.client.Client` - :param client: A client which holds credentials and project configuration - for the index (which requires a project). - """ - - def __init__(self, name, client): - self.name = name - self._client = client - self._properties = {} - - @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct an index given its API representation - - :type resource: dict - :param resource: index resource representation returned from the API - - :type client: :class:`gcloud.dns.client.Client` - :param client: Client which holds credentials and project - configuration for the index. - - :rtype: :class:`gcloud.dns.index.Index` - :returns: Index parsed from ``resource``. - """ - name = resource.get('indexId') - if name is None: - raise KeyError( - 'Resource lacks required identity information: ["indexId"]') - index = cls(name, client=client) - index._set_properties(resource) - return index - - @property - def project(self): - """Project bound to the index. - - :rtype: string - :returns: the project (derived from the client). - """ - return self._client.project - - @property - def path(self): - """URL path for the index's APIs. - - :rtype: string - :returns: the path based on project and dataste name. - """ - return '/projects/%s/indexes/%s' % (self.project, self.name) - - def _list_field_names(self, field_type): - """Helper for 'text_fields', etc. - """ - fields = self._properties.get('indexedField', {}) - return fields.get(field_type) - - @property - def text_fields(self): - """Names of text fields in the index. - - :rtype: list of string, or None - :returns: names of text fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('textFields') - - @property - def atom_fields(self): - """Names of atom fields in the index. - - :rtype: list of string, or None - :returns: names of atom fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('atomFields') - - @property - def html_fields(self): - """Names of html fields in the index. - - :rtype: list of string, or None - :returns: names of html fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('htmlFields') - - @property - def date_fields(self): - """Names of date fields in the index. - - :rtype: list of string, or None - :returns: names of date fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('dateFields') - - @property - def number_fields(self): - """Names of number fields in the index. - - :rtype: list of string, or None - :returns: names of number fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('numberFields') - - @property - def geo_fields(self): - """Names of geo fields in the index. - - :rtype: list of string, or None - :returns: names of geo fields in the index, or None if no - resource information is available. - """ - return self._list_field_names('geoFields') - - def _set_properties(self, api_response): - """Update properties from resource in body of ``api_response`` - - :type api_response: httplib2.Response - :param api_response: response returned from an API call - """ - self._properties.clear() - self._properties.update(api_response) - - def list_documents(self, max_results=None, page_token=None, - view=None): - """List documents created within this index. - - See: - https://cloud.google.com/search/reference/rest/v1/projects/indexes/documents/list - - :type max_results: int - :param max_results: maximum number of indexes to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of indexes. If - not passed, the API will return the first page of - indexes. - - :type view: string - :param view: One of 'ID_ONLY' (return only the document ID; the - default) or 'FULL' (return the full resource - representation for the document, including field - values) - - :rtype: tuple, (list, str) - :returns: list of :class:`gcloud.dns.document.Document`, plus a - "next page token" string: if the token is not None, - indicates that more indexes can be retrieved with another - call (pass that value as ``page_token``). - """ - params = {} - - if max_results is not None: - params['pageSize'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - if view is not None: - params['view'] = view - - path = '%s/documents' % (self.path,) - connection = self._client.connection - resp = connection.api_request(method='GET', path=path, - query_params=params) - indexes = [Document.from_api_repr(resource, self) - for resource in resp['documents']] - return indexes, resp.get('nextPageToken') - - def document(self, name, rank=None): - """Construct a document bound to this index. - - :type name: string - :param name: Name of the document. - - :type rank: integer - :param rank: Rank of the document (defaults to a server-assigned - value based on timestamp). - - :rtype: :class:`gcloud.search.document.Document` - :returns: a new ``Document`` instance - """ - return Document(name, index=self, rank=rank) - - def search(self, - query, - max_results=None, - page_token=None, - field_expressions=None, - order_by=None, - matched_count_accuracy=None, - scorer=None, - scorer_size=None, - return_fields=None): - """Search documents created within this index. - - See: - https://cloud.google.com/search/reference/rest/v1/projects/indexes/search - - :type query: string - :param query: query string (see https://cloud.google.com/search/query). - - :type max_results: int - :param max_results: maximum number of indexes to return, If not - passed, defaults to a value set by the API. - - :type page_token: string - :param page_token: opaque marker for the next "page" of indexes. If - not passed, the API will return the first page of - indexes. - - :type field_expressions: dict, or ``NoneType`` - :param field_expressions: mapping of field name -> expression - for use in 'order_by' or 'return_fields' - - :type order_by: sequence of string, or ``NoneType`` - :param order_by: list of field names (plus optional ' desc' suffix) - specifying ordering of results. - - :type matched_count_accuracy: integer or ``NoneType`` - :param matched_count_accuracy: minimum accuracy for matched count - returned - - :type return_fields: sequence of string, or ``NoneType`` - :param return_fields: list of field names to be returned. - - :type scorer: string or ``NoneType`` - :param scorer: name of scorer function (e.g., "generic"). - - :type scorer_size: integer or ``NoneType`` - :param scorer_size: max number of top results pass to scorer function. - - :rtype: tuple, (list, str, int) - :returns: list of :class:`gcloud.dns.document.Document`, plus a - "next page token" string, and a "matched count". If the - token is not None, indicates that more indexes can be - retrieved with another call (pass that value as - ``page_token``). The "matched count" indicates the total - number of documents matching the query string. - """ - params = {'query': query} - - if max_results is not None: - params['pageSize'] = max_results - - if page_token is not None: - params['pageToken'] = page_token - - if field_expressions is not None: - params['fieldExpressions'] = field_expressions - - if order_by is not None: - params['orderBy'] = order_by - - if matched_count_accuracy is not None: - params['matchedCountAccuracy'] = matched_count_accuracy - - if scorer is not None: - params['scorer'] = scorer - - if scorer_size is not None: - params['scorerSize'] = scorer_size - - if return_fields is not None: - params['returnFields'] = return_fields - - path = '%s/search' % (self.path,) - connection = self._client.connection - resp = connection.api_request(method='GET', path=path, - query_params=params) - indexes = [Document.from_api_repr(resource, self) - for resource in resp['results']] - return indexes, resp.get('nextPageToken'), resp.get('matchedCount') diff --git a/gcloud/search/test_client.py b/gcloud/search/test_client.py deleted file mode 100644 index 9ba7c65e4d52..000000000000 --- a/gcloud/search/test_client.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestClient(unittest2.TestCase): - PROJECT = 'PROJECT' - - def _getTargetClass(self): - from gcloud.search.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - from gcloud.search.connection import Connection - creds = _Credentials() - http = object() - client = self._makeOne( - project=self.PROJECT, credentials=creds, http=http) - self.assertTrue(isinstance(client.connection, Connection)) - self.assertTrue(client.connection.credentials is creds) - self.assertTrue(client.connection.http is http) - - def test_list_indexes_defaults(self): - from gcloud.search.index import Index - INDEX_1 = 'index-one' - INDEX_2 = 'index-two' - PATH = 'projects/%s/indexes' % self.PROJECT - TOKEN = 'TOKEN' - DATA = { - 'nextPageToken': TOKEN, - 'indexes': [ - {'project': self.PROJECT, - 'indexId': INDEX_1}, - {'project': self.PROJECT, - 'indexId': INDEX_2}, - ] - } - creds = _Credentials() - client = self._makeOne(self.PROJECT, creds) - conn = client.connection = _Connection(DATA) - - indexes, token = client.list_indexes() - - self.assertEqual(len(indexes), len(DATA['indexes'])) - for found, expected in zip(indexes, DATA['indexes']): - self.assertTrue(isinstance(found, Index)) - self.assertEqual(found.name, expected['indexId']) - self.assertEqual(found.text_fields, None) - self.assertEqual(found.atom_fields, None) - self.assertEqual(found.html_fields, None) - self.assertEqual(found.date_fields, None) - self.assertEqual(found.number_fields, None) - self.assertEqual(found.geo_fields, None) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {}) - - def test_list_indexes_explicit(self): - from gcloud.search.index import Index - INDEX_1 = 'index-one' - INDEX_2 = 'index-two' - PATH = 'projects/%s/indexes' % self.PROJECT - TOKEN = 'TOKEN' - DATA = { - 'indexes': [ - {'project': self.PROJECT, - 'indexId': INDEX_1, - 'indexedField': {'textFields': ['text-1']}}, - {'project': self.PROJECT, - 'indexId': INDEX_2, - 'indexedField': {'htmlFields': ['html-1']}}, - ] - } - creds = _Credentials() - client = self._makeOne(self.PROJECT, creds) - conn = client.connection = _Connection(DATA) - - indexes, token = client.list_indexes( - max_results=3, page_token=TOKEN, prefix='index', view='FULL') - - self.assertEqual(len(indexes), len(DATA['indexes'])) - for found, expected in zip(indexes, DATA['indexes']): - self.assertTrue(isinstance(found, Index)) - self.assertEqual(found.name, expected['indexId']) - field_info = expected['indexedField'] - self.assertEqual(found.text_fields, field_info.get('textFields')) - self.assertEqual(found.atom_fields, field_info.get('atomFields')) - self.assertEqual(found.html_fields, field_info.get('htmlFields')) - self.assertEqual(found.date_fields, field_info.get('dateFields')) - self.assertEqual(found.number_fields, - field_info.get('numberFields')) - self.assertEqual(found.geo_fields, field_info.get('geoFields')) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'indexNamePrefix': 'index', - 'pageSize': 3, - 'pageToken': TOKEN, - 'view': 'FULL'}) - - def test_index(self): - from gcloud.search.index import Index - INDEX_ID = 'index-id' - creds = _Credentials() - http = object() - client = self._makeOne( - project=self.PROJECT, credentials=creds, http=http) - index = client.index(INDEX_ID) - self.assertTrue(isinstance(index, Index)) - self.assertEqual(index.name, INDEX_ID) - self.assertTrue(index._client is client) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/search/test_connection.py b/gcloud/search/test_connection.py deleted file mode 100644 index 5b41154e7073..000000000000 --- a/gcloud/search/test_connection.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.search.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_build_api_url_no_extra_query_params(self): - conn = self._makeOne() - URI = '/'.join([ - conn.API_BASE_URL, - conn.API_VERSION, - 'foo', - ]) - self.assertEqual(conn.build_api_url('/foo'), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - conn = self._makeOne() - uri = conn.build_api_url('/foo', {'bar': 'baz'}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, - '/'.join(['', conn.API_VERSION, 'foo'])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms['bar'], 'baz') diff --git a/gcloud/search/test_document.py b/gcloud/search/test_document.py deleted file mode 100644 index 688fefd4520f..000000000000 --- a/gcloud/search/test_document.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestStringValue(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.search.document import StringValue - return StringValue - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - sv = self._makeOne('abcde') - self.assertEqual(sv.string_value, 'abcde') - self.assertEqual(sv.string_format, None) - self.assertEqual(sv.language, None) - - def test_ctor_explicit(self): - sv = self._makeOne('abcde', 'text', 'en') - self.assertEqual(sv.string_value, 'abcde') - self.assertEqual(sv.string_format, 'text') - self.assertEqual(sv.language, 'en') - - -class TestNumberValue(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.search.document import NumberValue - return NumberValue - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - nv = self._makeOne(42) - self.assertEqual(nv.number_value, 42) - - -class TestTimestampValue(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.search.document import TimestampValue - return TimestampValue - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - import datetime - from gcloud._helpers import UTC - NOW = datetime.datetime.utcnow().replace(tzinfo=UTC) - tv = self._makeOne(NOW) - self.assertEqual(tv.timestamp_value, NOW) - - -class TestGeoValue(unittest2.TestCase): - - LATITUDE, LONGITUDE = 38.301931, -77.458722 - - def _getTargetClass(self): - from gcloud.search.document import GeoValue - return GeoValue - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - gv = self._makeOne((self.LATITUDE, self.LONGITUDE)) - self.assertEqual(gv.geo_value, (self.LATITUDE, self.LONGITUDE)) - - -class TestField(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.search.document import Field - return Field - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - field = self._makeOne('field_name') - self.assertEqual(field.name, 'field_name') - self.assertEqual(len(field.values), 0) - - def test_add_value_unknown(self): - field = self._makeOne('field_name') - with self.assertRaises(ValueError): - field.add_value(object()) - - def test_add_value_string_defaults(self): - from gcloud.search.document import StringValue - field = self._makeOne('field_name') - field.add_value('this is a string') - self.assertEqual(len(field.values), 1) - value = field.values[0] - self.assertTrue(isinstance(value, StringValue)) - self.assertEqual(value.string_value, 'this is a string') - self.assertEqual(value.string_format, None) - self.assertEqual(value.language, None) - - def test_add_value_string_explicit(self): - from gcloud.search.document import StringValue - field = self._makeOne('field_name') - field.add_value('this is a string', - string_format='text', language='en') - self.assertEqual(len(field.values), 1) - value = field.values[0] - self.assertTrue(isinstance(value, StringValue)) - self.assertEqual(value.string_value, 'this is a string') - self.assertEqual(value.string_format, 'text') - self.assertEqual(value.language, 'en') - - def test_add_value_integer(self): - from gcloud.search.document import NumberValue - field = self._makeOne('field_name') - field.add_value(42) - self.assertEqual(len(field.values), 1) - value = field.values[0] - self.assertTrue(isinstance(value, NumberValue)) - self.assertEqual(value.number_value, 42) - - def test_add_value_datetime(self): - import datetime - from gcloud._helpers import UTC - from gcloud.search.document import TimestampValue - NOW = datetime.datetime.utcnow().replace(tzinfo=UTC) - field = self._makeOne('field_name') - field.add_value(NOW) - self.assertEqual(len(field.values), 1) - value = field.values[0] - self.assertTrue(isinstance(value, TimestampValue)) - self.assertEqual(value.timestamp_value, NOW) - - def test_add_value_geo(self): - from gcloud.search.document import GeoValue - LATITUDE, LONGITUDE = 38.301931, -77.458722 - field = self._makeOne('field_name') - field.add_value((LATITUDE, LONGITUDE)) - self.assertEqual(len(field.values), 1) - value = field.values[0] - self.assertTrue(isinstance(value, GeoValue)) - self.assertEqual(value.geo_value, (LATITUDE, LONGITUDE)) - - -class TestDocument(unittest2.TestCase): - - PROJECT = 'PROJECT' - DOC_NAME = 'doc_name' - INDEX_NAME = 'index_name' - DOC_PATH = 'projects/%s/indexes/%s/documents/%s' % ( - PROJECT, INDEX_NAME, DOC_NAME) - RANK = 42 - - def _getTargetClass(self): - from gcloud.search.document import Document - return Document - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - index = object() - document = self._makeOne(self.DOC_NAME, index) - self.assertEqual(document.name, self.DOC_NAME) - self.assertTrue(document.index is index) - self.assertEqual(document.rank, None) - self.assertEqual(document.fields, {}) - - def test_ctor_explicit(self): - index = object() - document = self._makeOne(self.DOC_NAME, index, self.RANK) - self.assertEqual(document.name, self.DOC_NAME) - self.assertTrue(document.index is index) - self.assertEqual(document.rank, self.RANK) - self.assertEqual(document.fields, {}) - - def test_from_api_repr_invalid(self): - klass = self._getTargetClass() - index = object() - with self.assertRaises(KeyError): - klass.from_api_repr({}, index) - - def test_from_api_repr(self): - import datetime - from gcloud._helpers import UTC, _RFC3339_MICROS - VALUE = 'The quick brown fox' - HTML_VALUE = 'jumped over the lazy fence.' - NOW = datetime.datetime.utcnow().replace(tzinfo=UTC) - NOW_STR = NOW.strftime(_RFC3339_MICROS) - LATITUDE, LONGITUDE = 38.301931, -77.458722 - resource = { - 'docId': self.DOC_NAME, - 'rank': self.RANK, - 'fields': { - 'title': { - 'values': [ - {'stringFormat': 'text', - 'lang': 'en', - 'stringValue': VALUE}, - {'stringFormat': 'html', - 'lang': 'en', - 'stringValue': HTML_VALUE}, - {'numberValue': 42}, - {'numberValue': '42'}, - {'numberValue': '3.1415926'}, - {'timestampValue': NOW_STR}, - {'geoValue': '%s, %s' % (LATITUDE, LONGITUDE)}, - ], - } - } - } - klass = self._getTargetClass() - index = object() - - document = klass.from_api_repr(resource, index) - - self.assertEqual(document.name, self.DOC_NAME) - self.assertTrue(document.index is index) - self.assertEqual(document.rank, self.RANK) - - self.assertEqual(list(document.fields), ['title']) - field = document.fields['title'] - self.assertEqual(field.name, 'title') - self.assertEqual(len(field.values), 7) - - value = field.values[0] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.language, 'en') - self.assertEqual(value.string_format, 'text') - self.assertEqual(value.string_value, VALUE) - - value = field.values[1] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.language, 'en') - self.assertEqual(value.string_format, 'html') - self.assertEqual(value.string_value, - 'jumped over the lazy fence.') - - value = field.values[2] - self.assertEqual(value.value_type, 'number') - self.assertEqual(value.number_value, 42) - - value = field.values[3] - self.assertEqual(value.value_type, 'number') - self.assertEqual(value.number_value, 42) - - value = field.values[4] - self.assertEqual(value.value_type, 'number') - self.assertEqual(value.number_value, 3.1415926) - - value = field.values[5] - self.assertEqual(value.value_type, 'timestamp') - self.assertEqual(value.timestamp_value, NOW) - - value = field.values[6] - self.assertEqual(value.value_type, 'geo') - self.assertEqual(value.geo_value, (LATITUDE, LONGITUDE)) - - def test__parse_value_resource_invalid(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - with self.assertRaises(ValueError): - document._parse_value_resource({}) - - def test__build_value_resource_invalid(self): - class _UnknownValue(object): - value_type = 'nonesuch' - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - with self.assertRaises(ValueError): - document._build_value_resource(_UnknownValue()) - - def test__build_field_resources_field_wo_values(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - _ = document.field('testing') # no values - self.assertEqual(document._build_fields_resource(), {}) - - def test_create_wo_fields(self): - import copy - BODY = {'docId': self.DOC_NAME} - RESPONSE = copy.deepcopy(BODY) - RESPONSE['rank'] = self.RANK - conn = _Connection(RESPONSE) - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - - document.create() - - self.assertEqual(list(document.fields), []) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - self.assertEqual(req['data'], BODY) - - def test_create_wo_rank_w_bound_client(self): - import copy - VALUE = 'The quick brown fox' - BODY = { - 'docId': self.DOC_NAME, - 'fields': { - 'testing': { - 'values': [ - {'stringValue': VALUE}, - ], - } - } - } - RESPONSE = copy.deepcopy(BODY) - RESPONSE['rank'] = self.RANK - response_value = RESPONSE['fields']['testing']['values'][0] - response_value['stringFormat'] = 'auto' - conn = _Connection(RESPONSE) - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - field = document.field('testing') - field.add_value(VALUE) - - document.create() - - self.assertEqual(list(document.fields), ['testing']) - field = document.fields['testing'] - self.assertEqual(len(field.values), 1) - - value = field.values[0] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.string_format, 'auto') - self.assertEqual(value.string_value, VALUE) - self.assertEqual(value.language, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - self.assertEqual(req['data'], BODY) - - def test_create_w_rank_w_alternate_client(self): - import datetime - from gcloud._helpers import UTC, _RFC3339_MICROS - VALUE = 'The quick brown fox' - NOW = datetime.datetime.utcnow().replace(tzinfo=UTC) - NOW_STR = NOW.strftime(_RFC3339_MICROS) - LATITUDE, LONGITUDE = 38.301931, -77.458722 - BODY = { - 'docId': self.DOC_NAME, - 'rank': self.RANK, - 'fields': { - 'title': { - 'values': [ - {'stringValue': VALUE, - 'stringFormat': 'text', - 'lang': 'en'}, - {'numberValue': 17.5}, - {'timestampValue': NOW_STR}, - {'geoValue': '%s, %s' % (LATITUDE, LONGITUDE)}, - ], - } - } - } - RESPONSE = BODY.copy() - RESPONSE['rank'] = self.RANK - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(BODY) - client2 = _Client(project=self.PROJECT, connection=conn2) - index = _Index(self.INDEX_NAME, client=client1) - document = self._makeOne(self.DOC_NAME, index, rank=self.RANK) - field = document.field('title') - field.add_value(VALUE, string_format='text', language='en') - field.add_value(17.5) - field.add_value(NOW) - field.add_value((LATITUDE, LONGITUDE)) - - document.create(client=client2) - - self.assertEqual(list(document.fields), ['title']) - field = document.fields['title'] - self.assertEqual(len(field.values), 4) - - value = field.values[0] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.string_format, 'text') - self.assertEqual(value.string_value, VALUE) - self.assertEqual(value.language, 'en') - - value = field.values[1] - self.assertEqual(value.value_type, 'number') - self.assertEqual(value.number_value, 17.5) - - value = field.values[2] - self.assertEqual(value.value_type, 'timestamp') - self.assertEqual(value.timestamp_value, NOW) - - value = field.values[3] - self.assertEqual(value.value_type, 'geo') - self.assertEqual(value.geo_value, (LATITUDE, LONGITUDE)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - - req = conn2._requested[0] - self.assertEqual(req['method'], 'PUT') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - self.assertEqual(req['data'], BODY) - - def test_exists_miss_w_bound_client(self): - conn = _Connection() - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - - self.assertFalse(document.exists()) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - self.assertEqual(req.get('query_params'), None) - - def test_exists_hit_w_alternate_client(self): - BODY = {'docId': self.DOC_NAME, 'rank': self.RANK} - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(BODY) - client2 = _Client(project=self.PROJECT, connection=conn2) - index = _Index(self.INDEX_NAME, client=client1) - document = self._makeOne(self.DOC_NAME, index) - - self.assertTrue(document.exists(client=client2)) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - self.assertEqual(req.get('query_params'), None) - - def test_reload_w_bound_client(self): - VALUE = 'The quick brown fox' - BODY = { - 'docId': self.DOC_NAME, - 'rank': self.RANK, - 'fields': { - 'title': { - 'values': [ - {'stringFormat': 'text', - 'lang': 'en', - 'stringValue': VALUE}, - ], - } - } - } - conn = _Connection(BODY) - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - - document.reload() - - self.assertEqual(document.rank, self.RANK) - - self.assertEqual(list(document.fields), ['title']) - field = document.fields['title'] - self.assertEqual(len(field.values), 1) - self.assertEqual(field.name, 'title') - self.assertEqual(len(field.values), 1) - - value = field.values[0] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.language, 'en') - self.assertEqual(value.string_format, 'text') - self.assertEqual(value.string_value, VALUE) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - - def test_reload_w_alternate_client(self): - VALUE = 'The quick brown fox' - BODY = { - 'docId': self.DOC_NAME, - 'rank': self.RANK, - 'fields': { - 'title': { - 'values': [ - {'stringFormat': 'text', - 'lang': 'en', - 'stringValue': VALUE}, - ], - } - } - } - conn1 = _Connection() - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection(BODY) - client2 = _Client(project=self.PROJECT, connection=conn2) - index = _Index(self.INDEX_NAME, client=client1) - document = self._makeOne(self.DOC_NAME, index) - - document.reload(client=client2) - - self.assertEqual(document.rank, self.RANK) - - self.assertEqual(list(document.fields), ['title']) - field = document.fields['title'] - self.assertEqual(field.name, 'title') - self.assertEqual(len(field.values), 1) - - value = field.values[0] - self.assertEqual(value.value_type, 'string') - self.assertEqual(value.language, 'en') - self.assertEqual(value.string_format, 'text') - self.assertEqual(value.string_value, VALUE) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - - def test_delete_w_bound_client(self): - conn = _Connection({}) - client = _Client(project=self.PROJECT, connection=conn) - index = _Index(self.INDEX_NAME, client=client) - document = self._makeOne(self.DOC_NAME, index) - - document.delete() - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - - def test_delete_w_alternate_client(self): - conn1 = _Connection({}) - client1 = _Client(project=self.PROJECT, connection=conn1) - conn2 = _Connection({}) - client2 = _Client(project=self.PROJECT, connection=conn2) - index = _Index(self.INDEX_NAME, client=client1) - document = self._makeOne(self.DOC_NAME, index) - - document.delete(client=client2) - - self.assertEqual(len(conn1._requested), 0) - self.assertEqual(len(conn2._requested), 1) - req = conn2._requested[0] - self.assertEqual(req['method'], 'DELETE') - self.assertEqual(req['path'], '/%s' % self.DOC_PATH) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response - - -class _Index(object): - - def __init__(self, name, client): - self.name = name - self._client = client - self.project = client.project - self.path = '/projects/%s/indexes/%s' % (client.project, name) - - -class _Client(object): - - def __init__(self, project, connection=None): - self.project = project - self.connection = connection diff --git a/gcloud/search/test_index.py b/gcloud/search/test_index.py deleted file mode 100644 index 40225d1a358d..000000000000 --- a/gcloud/search/test_index.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestIndex(unittest2.TestCase): - PROJECT = 'project' - INDEX_ID = 'index-id' - - def _getTargetClass(self): - from gcloud.search.index import Index - return Index - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _setUpConstants(self): - import datetime - from gcloud._helpers import UTC - - self.WHEN_TS = 1437767599.006 - self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace( - tzinfo=UTC) - self.ZONE_ID = 12345 - - def _makeResource(self): - self._setUpConstants() - return { - 'projectId': self.PROJECT, - 'indexId': self.INDEX_ID, - 'indexedField': { - 'textFields': ['text-1', 'text-2'], - 'htmlFields': ['html-1', 'html-2'], - 'atomFields': ['atom-1', 'atom-2'], - 'dateFields': ['date-1', 'date-2'], - 'numberFields': ['number-1', 'number-2'], - 'geoFields': ['geo-1', 'geo-2'], - } - } - - def _makeDocumentResource(self, doc_id, rank=None, title=None): - resource = {'docId': doc_id} - if rank is not None: - resource['rank'] = rank - if title is not None: - resource['fields'] = { - 'title': { - 'values': [{ - 'stringValue': title, - 'stringFormat': 'text', - 'lang': 'en'}] - } - } - return resource - - def _verifyResourceProperties(self, index, resource): - - self.assertEqual(index.name, resource.get('indexId')) - field_info = resource.get('indexedField', {}) - self.assertEqual(index.text_fields, field_info.get('textFields')) - self.assertEqual(index.html_fields, field_info.get('htmlFields')) - self.assertEqual(index.atom_fields, field_info.get('atomFields')) - self.assertEqual(index.date_fields, field_info.get('dateFields')) - self.assertEqual(index.number_fields, field_info.get('numberFields')) - self.assertEqual(index.geo_fields, field_info.get('geoFields')) - - def _verifyDocumentResource(self, documents, resource): - from gcloud.search.document import Document - from gcloud.search.document import StringValue - self.assertEqual(len(documents), len(resource)) - for found, expected in zip(documents, resource): - self.assertTrue(isinstance(found, Document)) - self.assertEqual(found.name, expected['docId']) - self.assertEqual(found.rank, expected.get('rank')) - e_fields = expected.get('fields', ()) - self.assertEqual(sorted(found.fields), sorted(e_fields)) - for field, f_field in found.fields.items(): - e_field = e_fields[field] - for f_value, e_value in zip(f_field.values, e_field['values']): - self.assertTrue(isinstance(f_value, StringValue)) - self.assertEqual(f_value.string_value, - e_value['stringValue']) - self.assertEqual(f_value.string_format, - e_value['stringFormat']) - self.assertEqual(f_value.language, - e_value['lang']) - - def test_ctor(self): - client = _Client(self.PROJECT) - index = self._makeOne(self.INDEX_ID, client) - self.assertEqual(index.name, self.INDEX_ID) - self.assertTrue(index._client is client) - self.assertEqual(index.project, client.project) - self.assertEqual( - index.path, - '/projects/%s/indexes/%s' % (self.PROJECT, self.INDEX_ID)) - self.assertEqual(index.text_fields, None) - self.assertEqual(index.html_fields, None) - self.assertEqual(index.atom_fields, None) - self.assertEqual(index.date_fields, None) - self.assertEqual(index.number_fields, None) - self.assertEqual(index.geo_fields, None) - - def test_from_api_repr_missing_identity(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = {} - klass = self._getTargetClass() - with self.assertRaises(KeyError): - klass.from_api_repr(RESOURCE, client=client) - - def test_from_api_repr_bare(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = { - 'indexId': self.INDEX_ID, - } - klass = self._getTargetClass() - index = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(index._client is client) - self._verifyResourceProperties(index, RESOURCE) - - def test_from_api_repr_w_properties(self): - self._setUpConstants() - client = _Client(self.PROJECT) - RESOURCE = self._makeResource() - klass = self._getTargetClass() - index = klass.from_api_repr(RESOURCE, client=client) - self.assertTrue(index._client is client) - self._verifyResourceProperties(index, RESOURCE) - - def test_list_documents_defaults(self): - DOCID_1 = 'docid-one' - DOCID_2 = 'docid-two' - PATH = 'projects/%s/indexes/%s/documents' % ( - self.PROJECT, self.INDEX_ID) - TOKEN = 'TOKEN' - DOC_1 = self._makeDocumentResource(DOCID_1) - DOC_2 = self._makeDocumentResource(DOCID_2) - RESPONSE = { - 'nextPageToken': TOKEN, - 'documents': [DOC_1, DOC_2], - } - client = _Client(self.PROJECT) - conn = client.connection = _Connection(RESPONSE) - index = self._makeOne(self.INDEX_ID, client) - - documents, token = index.list_documents() - - self._verifyDocumentResource(documents, RESPONSE['documents']) - self.assertEqual(token, TOKEN) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {}) - - def test_list_documents_explicit(self): - DOCID_1 = 'docid-one' - RANK_1 = 2345 - TITLE_1 = 'Title One' - DOCID_2 = 'docid-two' - RANK_2 = 1234 - TITLE_2 = 'Title Two' - PATH = 'projects/%s/indexes/%s/documents' % ( - self.PROJECT, self.INDEX_ID) - TOKEN = 'TOKEN' - DOC_1 = self._makeDocumentResource(DOCID_1, RANK_1, TITLE_1) - DOC_2 = self._makeDocumentResource(DOCID_2, RANK_2, TITLE_2) - RESPONSE = {'documents': [DOC_1, DOC_2]} - client = _Client(self.PROJECT) - conn = client.connection = _Connection(RESPONSE) - index = self._makeOne(self.INDEX_ID, client) - - documents, token = index.list_documents( - max_results=3, page_token=TOKEN, view='FULL') - - self._verifyDocumentResource(documents, RESPONSE['documents']) - self.assertEqual(token, None) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], - {'pageSize': 3, - 'pageToken': TOKEN, - 'view': 'FULL'}) - - def test_document_defaults(self): - from gcloud.search.document import Document - DOCUMENT_ID = 'document-id' - client = _Client(self.PROJECT) - index = self._makeOne(self.INDEX_ID, client) - - document = index.document(DOCUMENT_ID) - - self.assertTrue(isinstance(document, Document)) - self.assertEqual(document.name, DOCUMENT_ID) - self.assertEqual(document.rank, None) - self.assertTrue(document.index is index) - - def test_document_explicit(self): - from gcloud.search.document import Document - DOCUMENT_ID = 'document-id' - RANK = 1234 - client = _Client(self.PROJECT) - index = self._makeOne(self.INDEX_ID, client) - - document = index.document(DOCUMENT_ID, rank=RANK) - - self.assertTrue(isinstance(document, Document)) - self.assertEqual(document.name, DOCUMENT_ID) - self.assertEqual(document.rank, RANK) - self.assertTrue(document.index is index) - - def test_search_defaults(self): - DOCID_1 = 'docid-one' - TITLE_1 = 'Title One' - DOCID_2 = 'docid-two' - TITLE_2 = 'Title Two' - PATH = 'projects/%s/indexes/%s/search' % ( - self.PROJECT, self.INDEX_ID) - TOKEN = 'TOKEN' - DOC_1 = self._makeDocumentResource(DOCID_1, title=TITLE_1) - DOC_2 = self._makeDocumentResource(DOCID_2, title=TITLE_2) - QUERY = 'query string' - RESPONSE = { - 'nextPageToken': TOKEN, - 'matchedCount': 2, - 'results': [DOC_1, DOC_2], - } - client = _Client(self.PROJECT) - conn = client.connection = _Connection(RESPONSE) - index = self._makeOne(self.INDEX_ID, client) - - documents, token, matched_count = index.search(QUERY) - - self._verifyDocumentResource(documents, RESPONSE['results']) - self.assertEqual(token, TOKEN) - self.assertEqual(matched_count, 2) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - self.assertEqual(req['query_params'], {'query': QUERY}) - - def test_search_explicit(self): - DOCID_1 = 'docid-one' - TITLE_1 = 'Title One' - FUNKY_1 = 'this is a funky show' - RANK_1 = 2345 - DOCID_2 = 'docid-two' - TITLE_2 = 'Title Two' - FUNKY_2 = 'delighfully funky ambiance' - RANK_2 = 1234 - PATH = 'projects/%s/indexes/%s/search' % ( - self.PROJECT, self.INDEX_ID) - TOKEN = 'TOKEN' - - def _makeFunky(text): - return { - 'values': [{ - 'stringValue': text, - 'stringFormat': 'text', - 'lang': 'en', - }] - } - - DOC_1 = self._makeDocumentResource(DOCID_1, RANK_1, TITLE_1) - DOC_1['fields']['funky'] = _makeFunky(FUNKY_1) - DOC_2 = self._makeDocumentResource(DOCID_2, RANK_2, TITLE_2) - DOC_2['fields']['funky'] = _makeFunky(FUNKY_2) - EXPRESSIONS = {'funky': 'snippet("funky", content)'} - QUERY = 'query string' - RESPONSE = { - 'matchedCount': 2, - 'results': [DOC_1, DOC_2], - } - client = _Client(self.PROJECT) - conn = client.connection = _Connection(RESPONSE) - index = self._makeOne(self.INDEX_ID, client) - - documents, token, matched_count = index.search( - query=QUERY, - max_results=3, - page_token=TOKEN, - field_expressions=EXPRESSIONS, - order_by=['title'], - matched_count_accuracy=100, - scorer='generic', - scorer_size=20, - return_fields=['_rank', 'title', 'funky'], - ) - - self._verifyDocumentResource(documents, RESPONSE['results']) - self.assertEqual(token, None) - self.assertEqual(matched_count, 2) - - self.assertEqual(len(conn._requested), 1) - req = conn._requested[0] - self.assertEqual(req['method'], 'GET') - self.assertEqual(req['path'], '/%s' % PATH) - expected_params = { - 'query': QUERY, - 'pageSize': 3, - 'pageToken': TOKEN, - 'fieldExpressions': EXPRESSIONS, - 'orderBy': ['title'], - 'matchedCountAccuracy': 100, - 'scorer': 'generic', - 'scorerSize': 20, - 'returnFields': ['_rank', 'title', 'funky'], - } - self.assertEqual(req['query_params'], expected_params) - - -class _Client(object): - - def __init__(self, project='project', connection=None): - self.project = project - self.connection = connection - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: # pragma: NO COVER - raise NotFound('miss') - else: - return response diff --git a/gcloud/storage/__init__.py b/gcloud/storage/__init__.py deleted file mode 100644 index 8847798d25e3..000000000000 --- a/gcloud/storage/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Shortcut methods for getting set up with Google Cloud Storage. - -You'll typically use these to get started with the API: - ->>> from gcloud import storage ->>> client = storage.Client() ->>> bucket = client.get_bucket('bucket-id-here') ->>> # Then do other things... ->>> blob = bucket.get_blob('/remote/path/to/file.txt') ->>> print blob.download_as_string() ->>> blob.upload_from_string('New contents!') ->>> blob2 = bucket.blob('/remote/path/storage.txt') ->>> blob2.upload_from_filename(filename='/local/path.txt') - -The main concepts with this API are: - -- :class:`gcloud.storage.connection.Connection` which represents a - connection between your machine and the Cloud Storage API. - -- :class:`gcloud.storage.bucket.Bucket` which represents a particular - bucket (akin to a mounted disk on a computer). - -- :class:`gcloud.storage.blob.Blob` which represents a pointer to a - particular entity in Cloud Storage (akin to a file path on a remote - machine). -""" - -from gcloud.storage.batch import Batch -from gcloud.storage.blob import Blob -from gcloud.storage.bucket import Bucket -from gcloud.storage.client import Client -from gcloud.storage.connection import Connection - - -SCOPE = Connection.SCOPE diff --git a/gcloud/storage/_helpers.py b/gcloud/storage/_helpers.py deleted file mode 100644 index e55fcf179a5f..000000000000 --- a/gcloud/storage/_helpers.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper functions for Cloud Storage utility classes. - -These are *not* part of the API. -""" - -import base64 -from hashlib import md5 - - -class _PropertyMixin(object): - """Abstract mixin for cloud storage classes with associated propertties. - - Non-abstract subclasses should implement: - - client - - path - - :type name: string - :param name: The name of the object. - """ - - def __init__(self, name=None): - self.name = name - self._properties = {} - self._changes = set() - - @property - def path(self): - """Abstract getter for the object path.""" - raise NotImplementedError - - @property - def client(self): - """Abstract getter for the object client.""" - raise NotImplementedError - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - - :rtype: :class:`gcloud.storage.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.client - return client - - def reload(self, client=None): - """Reload properties from Cloud Storage. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - client = self._require_client(client) - # Pass only '?projection=noAcl' here because 'acl' and related - # are handled via custom endpoints. - query_params = {'projection': 'noAcl'} - api_response = client.connection.api_request( - method='GET', path=self.path, query_params=query_params, - _target_object=self) - self._set_properties(api_response) - - def _patch_property(self, name, value): - """Update field of this object's properties. - - This method will only update the field provided and will not - touch the other fields. - - It **will not** reload the properties from the server. The behavior is - local only and syncing occurs via :meth:`patch`. - - :type name: string - :param name: The field name to update. - - :type value: object - :param value: The value being updated. - """ - self._changes.add(name) - self._properties[name] = value - - def _set_properties(self, value): - """Set the properties for the current object. - - :type value: dict or :class:`gcloud.storage.batch._FutureDict` - :param value: The properties to be set. - """ - self._properties = value - # If the values are reset, the changes must as well. - self._changes = set() - - def patch(self, client=None): - """Sends all changed properties in a PATCH request. - - Updates the ``_properties`` with the response from the backend. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current object. - """ - client = self._require_client(client) - # Pass '?projection=full' here because 'PATCH' documented not - # to work properly w/ 'noAcl'. - update_properties = dict((key, self._properties[key]) - for key in self._changes) - api_response = client.connection.api_request( - method='PATCH', path=self.path, data=update_properties, - query_params={'projection': 'full'}, _target_object=self) - self._set_properties(api_response) - - -def _scalar_property(fieldname): - """Create a property descriptor around the :class:`_PropertyMixin` helpers. - """ - def _getter(self): - """Scalar property getter.""" - return self._properties.get(fieldname) - - def _setter(self, value): - """Scalar property setter.""" - self._patch_property(fieldname, value) - - return property(_getter, _setter) - - -def _write_buffer_to_hash(buffer_object, hash_obj, digest_block_size=8192): - """Read blocks from a buffer and update a hash with them. - - :type buffer_object: bytes buffer - :param buffer_object: Buffer containing bytes used to update a hash object. - - :type hash_obj: object that implements update - :param hash_obj: A hash object (MD5 or CRC32-C). - - :type digest_block_size: integer - :param digest_block_size: The block size to write to the hash. - Defaults to 8192. - """ - block = buffer_object.read(digest_block_size) - - while len(block) > 0: - hash_obj.update(block) - # Update the block for the next iteration. - block = buffer_object.read(digest_block_size) - - -def _base64_md5hash(buffer_object): - """Get MD5 hash of bytes (as base64). - - :type buffer_object: bytes buffer - :param buffer_object: Buffer containing bytes used to compute an MD5 - hash (as base64). - """ - hash_obj = md5() - _write_buffer_to_hash(buffer_object, hash_obj) - digest_bytes = hash_obj.digest() - return base64.b64encode(digest_bytes) diff --git a/gcloud/storage/acl.py b/gcloud/storage/acl.py deleted file mode 100644 index 92a955239fa1..000000000000 --- a/gcloud/storage/acl.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Manipulate access control lists that Cloud Storage provides. - -:class:`gcloud.storage.bucket.Bucket` has a getting method that creates -an ACL object under the hood, and you can interact with that using -:func:`gcloud.storage.bucket.Bucket.acl`:: - - >>> from gcloud import storage - >>> client = storage.Client() - >>> bucket = client.get_bucket(bucket_name) - >>> acl = bucket.acl - -Adding and removing permissions can be done with the following methods -(in increasing order of granularity): - -- :func:`ACL.all` - corresponds to access for all users. -- :func:`ACL.all_authenticated` corresponds - to access for all users that are signed into a Google account. -- :func:`ACL.domain` corresponds to access on a - per Google Apps domain (ie, ``example.com``). -- :func:`ACL.group` corresponds to access on a - per group basis (either by ID or e-mail address). -- :func:`ACL.user` corresponds to access on a - per user basis (either by ID or e-mail address). - -And you are able to ``grant`` and ``revoke`` the following roles: - -- **Reading**: - :func:`_ACLEntity.grant_read` and :func:`_ACLEntity.revoke_read` -- **Writing**: - :func:`_ACLEntity.grant_write` and :func:`_ACLEntity.revoke_write` -- **Owning**: - :func:`_ACLEntity.grant_owner` and :func:`_ACLEntity.revoke_owner` - -You can use any of these like any other factory method (these happen to -be :class:`_ACLEntity` factories):: - - >>> acl.user('me@example.org').grant_read() - >>> acl.all_authenticated().grant_write() - -You can also chain these ``grant_*`` and ``revoke_*`` methods together -for brevity:: - - >>> acl.all().grant_read().revoke_write() - -After that, you can save any changes you make with the -:func:`gcloud.storage.acl.ACL.save` method:: - - >>> acl.save() - -You can alternatively save any existing :class:`gcloud.storage.acl.ACL` -object (whether it was created by a factory method or not) from a -:class:`gcloud.storage.bucket.Bucket`:: - - >>> bucket.acl.save(acl=acl) - -To get the list of ``entity`` and ``role`` for each unique pair, the -:class:`ACL` class is iterable:: - - >>> print list(ACL) - [{'role': 'OWNER', 'entity': 'allUsers'}, ...] - -This list of tuples can be used as the ``entity`` and ``role`` fields -when sending metadata for ACLs to the API. -""" - - -class _ACLEntity(object): - """Class representing a set of roles for an entity. - - This is a helper class that you likely won't ever construct - outside of using the factor methods on the :class:`ACL` object. - - :type entity_type: string - :param entity_type: The type of entity (ie, 'group' or 'user'). - - :type identifier: string - :param identifier: The ID or e-mail of the entity. For the special - entity types (like 'allUsers') this is optional. - """ - - READER_ROLE = 'READER' - WRITER_ROLE = 'WRITER' - OWNER_ROLE = 'OWNER' - - def __init__(self, entity_type, identifier=None): - self.identifier = identifier - self.roles = set([]) - self.type = entity_type - - def __str__(self): - if not self.identifier: - return str(self.type) - else: - return '{acl.type}-{acl.identifier}'.format(acl=self) - - def __repr__(self): - return ''.format( - acl=self, roles=', '.join(self.roles)) - - def get_roles(self): - """Get the list of roles permitted by this entity. - - :rtype: list of strings - :returns: The list of roles associated with this entity. - """ - return self.roles - - def grant(self, role): - """Add a role to the entity. - - :type role: string - :param role: The role to add to the entity. - """ - self.roles.add(role) - - def revoke(self, role): - """Remove a role from the entity. - - :type role: string - :param role: The role to remove from the entity. - """ - if role in self.roles: - self.roles.remove(role) - - def grant_read(self): - """Grant read access to the current entity.""" - self.grant(_ACLEntity.READER_ROLE) - - def grant_write(self): - """Grant write access to the current entity.""" - self.grant(_ACLEntity.WRITER_ROLE) - - def grant_owner(self): - """Grant owner access to the current entity.""" - self.grant(_ACLEntity.OWNER_ROLE) - - def revoke_read(self): - """Revoke read access from the current entity.""" - self.revoke(_ACLEntity.READER_ROLE) - - def revoke_write(self): - """Revoke write access from the current entity.""" - self.revoke(_ACLEntity.WRITER_ROLE) - - def revoke_owner(self): - """Revoke owner access from the current entity.""" - self.revoke(_ACLEntity.OWNER_ROLE) - - -class ACL(object): - """Container class representing a list of access controls.""" - - _URL_PATH_ELEM = 'acl' - _PREDEFINED_QUERY_PARAM = 'predefinedAcl' - - PREDEFINED_XML_ACLS = { - # XML API name -> JSON API name - 'project-private': 'projectPrivate', - 'public-read': 'publicRead', - 'public-read-write': 'publicReadWrite', - 'authenticated-read': 'authenticatedRead', - 'bucket-owner-read': 'bucketOwnerRead', - 'bucket-owner-full-control': 'bucketOwnerFullControl', - } - - PREDEFINED_JSON_ACLS = frozenset([ - 'private', - 'projectPrivate', - 'publicRead', - 'publicReadWrite', - 'authenticatedRead', - 'bucketOwnerRead', - 'bucketOwnerFullControl', - ]) - """See: - https://cloud.google.com/storage/docs/access-control#predefined-acl - """ - - loaded = False - - # Subclasses must override to provide these attributes (typically, - # as properties). - reload_path = None - save_path = None - - def __init__(self): - self.entities = {} - - def _ensure_loaded(self): - """Load if not already loaded.""" - if not self.loaded: - self.reload() - - def reset(self): - """Remove all entities from the ACL, and clear the ``loaded`` flag.""" - self.entities.clear() - self.loaded = False - - def __iter__(self): - self._ensure_loaded() - - for entity in self.entities.values(): - for role in entity.get_roles(): - if role: - yield {'entity': str(entity), 'role': role} - - def entity_from_dict(self, entity_dict): - """Build an _ACLEntity object from a dictionary of data. - - An entity is a mutable object that represents a list of roles - belonging to either a user or group or the special types for all - users and all authenticated users. - - :type entity_dict: dict - :param entity_dict: Dictionary full of data from an ACL lookup. - - :rtype: :class:`_ACLEntity` - :returns: An Entity constructed from the dictionary. - """ - entity = entity_dict['entity'] - role = entity_dict['role'] - - if entity == 'allUsers': - entity = self.all() - - elif entity == 'allAuthenticatedUsers': - entity = self.all_authenticated() - - elif '-' in entity: - entity_type, identifier = entity.split('-', 1) - entity = self.entity(entity_type=entity_type, - identifier=identifier) - - if not isinstance(entity, _ACLEntity): - raise ValueError('Invalid dictionary: %s' % entity_dict) - - entity.grant(role) - return entity - - def has_entity(self, entity): - """Returns whether or not this ACL has any entries for an entity. - - :type entity: :class:`_ACLEntity` - :param entity: The entity to check for existence in this ACL. - - :rtype: boolean - :returns: True of the entity exists in the ACL. - """ - self._ensure_loaded() - return str(entity) in self.entities - - def get_entity(self, entity, default=None): - """Gets an entity object from the ACL. - - :type entity: :class:`_ACLEntity` or string - :param entity: The entity to get lookup in the ACL. - - :type default: anything - :param default: This value will be returned if the entity - doesn't exist. - - :rtype: :class:`_ACLEntity` - :returns: The corresponding entity or the value provided - to ``default``. - """ - self._ensure_loaded() - return self.entities.get(str(entity), default) - - def add_entity(self, entity): - """Add an entity to the ACL. - - :type entity: :class:`_ACLEntity` - :param entity: The entity to add to this ACL. - """ - self._ensure_loaded() - self.entities[str(entity)] = entity - - def entity(self, entity_type, identifier=None): - """Factory method for creating an Entity. - - If an entity with the same type and identifier already exists, - this will return a reference to that entity. If not, it will - create a new one and add it to the list of known entities for - this ACL. - - :type entity_type: string - :param entity_type: The type of entity to create - (ie, ``user``, ``group``, etc) - - :type identifier: string - :param identifier: The ID of the entity (if applicable). - This can be either an ID or an e-mail address. - - :rtype: :class:`_ACLEntity` - :returns: A new Entity or a reference to an existing identical entity. - """ - entity = _ACLEntity(entity_type=entity_type, identifier=identifier) - if self.has_entity(entity): - entity = self.get_entity(entity) - else: - self.add_entity(entity) - return entity - - def user(self, identifier): - """Factory method for a user Entity. - - :type identifier: string - :param identifier: An id or e-mail for this particular user. - - :rtype: :class:`_ACLEntity` - :returns: An Entity corresponding to this user. - """ - return self.entity('user', identifier=identifier) - - def group(self, identifier): - """Factory method for a group Entity. - - :type identifier: string - :param identifier: An id or e-mail for this particular group. - - :rtype: :class:`_ACLEntity` - :returns: An Entity corresponding to this group. - """ - return self.entity('group', identifier=identifier) - - def domain(self, domain): - """Factory method for a domain Entity. - - :type domain: string - :param domain: The domain for this entity. - - :rtype: :class:`_ACLEntity` - :returns: An entity corresponding to this domain. - """ - return self.entity('domain', identifier=domain) - - def all(self): - """Factory method for an Entity representing all users. - - :rtype: :class:`_ACLEntity` - :returns: An entity representing all users. - """ - return self.entity('allUsers') - - def all_authenticated(self): - """Factory method for an Entity representing all authenticated users. - - :rtype: :class:`_ACLEntity` - :returns: An entity representing all authenticated users. - """ - return self.entity('allAuthenticatedUsers') - - def get_entities(self): - """Get a list of all Entity objects. - - :rtype: list of :class:`_ACLEntity` objects - :returns: A list of all Entity objects. - """ - self._ensure_loaded() - return list(self.entities.values()) - - @property - def client(self): - """Abstract getter for the object client.""" - raise NotImplementedError - - def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current ACL. - - :rtype: :class:`gcloud.storage.client.Client` - :returns: The client passed in or the currently bound client. - """ - if client is None: - client = self.client - return client - - def reload(self, client=None): - """Reload the ACL data from Cloud Storage. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - path = self.reload_path - client = self._require_client(client) - - self.entities.clear() - - found = client.connection.api_request(method='GET', path=path) - self.loaded = True - for entry in found.get('items', ()): - self.add_entity(self.entity_from_dict(entry)) - - def _save(self, acl, predefined, client): - """Helper for :meth:`save` and :meth:`save_predefined`. - - :type acl: :class:`gcloud.storage.acl.ACL`, or a compatible list. - :param acl: The ACL object to save. If left blank, this will save - current entries. - - :type predefined: string or None - :param predefined: An identifier for a predefined ACL. Must be one - of the keys in :attr:`PREDEFINED_JSON_ACLS` - If passed, `acl` must be None. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - query_params = {'projection': 'full'} - if predefined is not None: - acl = [] - query_params[self._PREDEFINED_QUERY_PARAM] = predefined - - path = self.save_path - client = self._require_client(client) - result = client.connection.api_request( - method='PATCH', - path=path, - data={self._URL_PATH_ELEM: list(acl)}, - query_params=query_params) - self.entities.clear() - for entry in result.get(self._URL_PATH_ELEM, ()): - self.add_entity(self.entity_from_dict(entry)) - self.loaded = True - - def save(self, acl=None, client=None): - """Save this ACL for the current bucket. - - :type acl: :class:`gcloud.storage.acl.ACL`, or a compatible list. - :param acl: The ACL object to save. If left blank, this will save - current entries. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - if acl is None: - acl = self - save_to_backend = acl.loaded - else: - save_to_backend = True - - if save_to_backend: - self._save(acl, None, client) - - def save_predefined(self, predefined, client=None): - """Save this ACL for the current bucket using a predefined ACL. - - :type predefined: string - :param predefined: An identifier for a predefined ACL. Must be one - of the keys in :attr:`PREDEFINED_JSON_ACLS` - or :attr:`PREDEFINED_XML_ACLS` (which will be - aliased to the corresponding JSON name). - If passed, `acl` must be None. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - predefined = self.PREDEFINED_XML_ACLS.get(predefined, predefined) - - if predefined not in self.PREDEFINED_JSON_ACLS: - raise ValueError("Invalid predefined ACL: %s" % (predefined,)) - - self._save(None, predefined, client) - - def clear(self, client=None): - """Remove all ACL entries. - - Note that this won't actually remove *ALL* the rules, but it - will remove all the non-default rules. In short, you'll still - have access to a bucket that you created even after you clear - ACL rules with this method. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the ACL's parent. - """ - self.save([], client=client) - - -class BucketACL(ACL): - """An ACL specifically for a bucket. - - :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket to which this ACL relates. - """ - - def __init__(self, bucket): - super(BucketACL, self).__init__() - self.bucket = bucket - - @property - def client(self): - """The client bound to this ACL's bucket.""" - return self.bucket.client - - @property - def reload_path(self): - """Compute the path for GET API requests for this ACL.""" - return '%s/%s' % (self.bucket.path, self._URL_PATH_ELEM) - - @property - def save_path(self): - """Compute the path for PATCH API requests for this ACL.""" - return self.bucket.path - - -class DefaultObjectACL(BucketACL): - """A class representing the default object ACL for a bucket.""" - - _URL_PATH_ELEM = 'defaultObjectAcl' - _PREDEFINED_QUERY_PARAM = 'predefinedDefaultObjectAcl' - - -class ObjectACL(ACL): - """An ACL specifically for a Cloud Storage object / blob. - - :type blob: :class:`gcloud.storage.blob.Blob` - :param blob: The blob that this ACL corresponds to. - """ - - def __init__(self, blob): - super(ObjectACL, self).__init__() - self.blob = blob - - @property - def client(self): - """The client bound to this ACL's blob.""" - return self.blob.client - - @property - def reload_path(self): - """Compute the path for GET API requests for this ACL.""" - return '%s/acl' % self.blob.path - - @property - def save_path(self): - """Compute the path for PATCH API requests for this ACL.""" - return self.blob.path diff --git a/gcloud/storage/batch.py b/gcloud/storage/batch.py deleted file mode 100644 index 185df116ec35..000000000000 --- a/gcloud/storage/batch.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Batch updates / deletes of storage buckets / blobs. - -See: https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch -""" -from email.encoders import encode_noop -from email.generator import Generator -from email.mime.application import MIMEApplication -from email.mime.multipart import MIMEMultipart -from email.parser import Parser -import io -import json - -import httplib2 -import six - -from gcloud.exceptions import make_exception -from gcloud.storage.connection import Connection - - -class MIMEApplicationHTTP(MIMEApplication): - """MIME type for ``application/http``. - - Constructs payload from headers and body - - :type method: string - :param method: HTTP method - - :type uri: string - :param uri: URI for HTTP request - - :type headers: dict - :param headers: HTTP headers - - :type body: text or None - :param body: HTTP payload - """ - def __init__(self, method, uri, headers, body): - if isinstance(body, dict): - body = json.dumps(body) - headers['Content-Type'] = 'application/json' - headers['Content-Length'] = len(body) - if body is None: - body = '' - lines = ['%s %s HTTP/1.1' % (method, uri)] - lines.extend(['%s: %s' % (key, value) - for key, value in sorted(headers.items())]) - lines.append('') - lines.append(body) - payload = '\r\n'.join(lines) - if six.PY2: # pragma: NO COVER Python2 - # Sigh. email.message.Message is an old-style class, so we - # cannot use 'super()'. - MIMEApplication.__init__(self, payload, 'http', encode_noop) - else: # pragma: NO COVER Python3 - super_init = super(MIMEApplicationHTTP, self).__init__ - super_init(payload, 'http', encode_noop) - - -class NoContent(object): - """Emulate an HTTP '204 No Content' response.""" - status = 204 - - -class _FutureDict(object): - """Class to hold a future value for a deferred request. - - Used by for requests that get sent in a :class:`Batch`. - """ - - @staticmethod - def get(key, default=None): - """Stand-in for dict.get. - - :type key: object - :param key: Hashable dictionary key. - - :type default: object - :param default: Fallback value to dict.get. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError('Cannot get(%r, default=%r) on a future' % ( - key, default)) - - def __getitem__(self, key): - """Stand-in for dict[key]. - - :type key: object - :param key: Hashable dictionary key. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError('Cannot get item %r from a future' % (key,)) - - def __setitem__(self, key, value): - """Stand-in for dict[key] = value. - - :type key: object - :param key: Hashable dictionary key. - - :type value: object - :param value: Dictionary value. - - :raises: :class:`KeyError` always since the future is intended to fail - as a dictionary. - """ - raise KeyError('Cannot set %r -> %r on a future' % (key, value)) - - -class Batch(Connection): - """Proxy an underlying connection, batching up change operations. - - :type client: :class:`gcloud.storage.client.Client` - :param client: The client to use for making connections. - """ - _MAX_BATCH_SIZE = 1000 - - def __init__(self, client): - super(Batch, self).__init__() - self._client = client - self._requests = [] - self._target_objects = [] - - def _do_request(self, method, url, headers, data, target_object): - """Override Connection: defer actual HTTP request. - - Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. - - :type method: string - :param method: The HTTP method to use in the request. - - :type url: string - :param url: The URL to send the request to. - - :type headers: dict - :param headers: A dictionary of HTTP headers to send with the request. - - :type data: string - :param data: The data to send as the body of the request. - - :type target_object: object or :class:`NoneType` - :param target_object: This allows us to enable custom behavior in our - batch connection. Here we defer an HTTP request - and complete initialization of the object at a - later time. - - :rtype: tuple of ``response`` (a dictionary of sorts) - and ``content`` (a string). - :returns: The HTTP response object and the content of the response. - """ - if len(self._requests) >= self._MAX_BATCH_SIZE: - raise ValueError("Too many deferred requests (max %d)" % - self._MAX_BATCH_SIZE) - self._requests.append((method, url, headers, data)) - result = _FutureDict() - self._target_objects.append(target_object) - if target_object is not None: - target_object._properties = result - return NoContent(), result - - def _prepare_batch_request(self): - """Prepares headers and body for a batch request. - - :rtype: tuple (dict, string) - :returns: The pair of headers and body of the batch request to be sent. - :raises: :class:`ValueError` if no requests have been deferred. - """ - if len(self._requests) == 0: - raise ValueError("No deferred requests") - - multi = MIMEMultipart() - - for method, uri, headers, body in self._requests: - subrequest = MIMEApplicationHTTP(method, uri, headers, body) - multi.attach(subrequest) - - # The `email` package expects to deal with "native" strings - if six.PY3: # pragma: NO COVER Python3 - buf = io.StringIO() - else: # pragma: NO COVER Python2 - buf = io.BytesIO() - generator = Generator(buf, False, 0) - generator.flatten(multi) - payload = buf.getvalue() - - # Strip off redundant header text - _, body = payload.split('\n\n', 1) - return dict(multi._headers), body - - def _finish_futures(self, responses): - """Apply all the batch responses to the futures created. - - :type responses: list of (headers, payload) tuples. - :param responses: List of headers and payloads from each response in - the batch. - - :raises: :class:`ValueError` if no requests have been deferred. - """ - # If a bad status occurs, we track it, but don't raise an exception - # until all futures have been populated. - exception_args = None - - if len(self._target_objects) != len(responses): - raise ValueError('Expected a response for every request.') - - for target_object, sub_response in zip(self._target_objects, - responses): - resp_headers, sub_payload = sub_response - if not 200 <= resp_headers.status < 300: - exception_args = exception_args or (resp_headers, - sub_payload) - elif target_object is not None: - target_object._properties = sub_payload - - if exception_args is not None: - raise make_exception(*exception_args) - - def finish(self): - """Submit a single `multipart/mixed` request w/ deferred requests. - - :rtype: list of tuples - :returns: one ``(headers, payload)`` tuple per deferred request. - """ - headers, body = self._prepare_batch_request() - - url = '%s/batch' % self.API_BASE_URL - - # Use the private ``_connection`` rather than the public - # ``.connection``, since the public connection may be this - # current batch. - response, content = self._client._connection._make_request( - 'POST', url, data=body, headers=headers) - responses = list(_unpack_batch_response(response, content)) - self._finish_futures(responses) - return responses - - def current(self): - """Return the topmost batch, or None.""" - return self._client.current_batch - - def __enter__(self): - self._client._push_batch(self) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - try: - if exc_type is None: - self.finish() - finally: - self._client._pop_batch() - - -def _generate_faux_mime_message(parser, response, content): - """Convert response, content -> (multipart) email.message. - - Helper for _unpack_batch_response. - """ - # We coerce to bytes to get consitent concat across - # Py2 and Py3. Percent formatting is insufficient since - # it includes the b in Py3. - if not isinstance(content, six.binary_type): - content = content.encode('utf-8') - content_type = response['content-type'] - if not isinstance(content_type, six.binary_type): - content_type = content_type.encode('utf-8') - faux_message = b''.join([ - b'Content-Type: ', - content_type, - b'\nMIME-Version: 1.0\n\n', - content, - ]) - - if six.PY2: - return parser.parsestr(faux_message) - else: # pragma: NO COVER Python3 - return parser.parsestr(faux_message.decode('utf-8')) - - -def _unpack_batch_response(response, content): - """Convert response, content -> [(headers, payload)]. - - Creates a generator of tuples of emulating the responses to - :meth:`httplib2.Http.request` (a pair of headers and payload). - - :type response: :class:`httplib2.Response` - :param response: HTTP response / headers from a request. - - :type content: string - :param content: Response payload with a batch response. - - :rtype: generator - :returns: A generator of header, payload pairs. - """ - parser = Parser() - message = _generate_faux_mime_message(parser, response, content) - - if not isinstance(message._payload, list): - raise ValueError('Bad response: not multi-part') - - for subrequest in message._payload: - status_line, rest = subrequest._payload.split('\n', 1) - _, status, _ = status_line.split(' ', 2) - sub_message = parser.parsestr(rest) - payload = sub_message._payload - ctype = sub_message['Content-Type'] - msg_headers = dict(sub_message._headers) - msg_headers['status'] = status - headers = httplib2.Response(msg_headers) - if ctype and ctype.startswith('application/json'): - payload = json.loads(payload) - yield headers, payload diff --git a/gcloud/storage/blob.py b/gcloud/storage/blob.py deleted file mode 100644 index fe73a9b1a9a0..000000000000 --- a/gcloud/storage/blob.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with Google Cloud Storage blobs.""" - -import copy -from io import BytesIO -import json -import mimetypes -import os -import time - -import six -from six.moves.urllib.parse import quote - -from gcloud._helpers import _rfc3339_to_datetime -from gcloud.credentials import generate_signed_url -from gcloud.exceptions import NotFound -from gcloud.storage._helpers import _PropertyMixin -from gcloud.storage._helpers import _scalar_property -from gcloud.storage.acl import ObjectACL -from gcloud.streaming.http_wrapper import Request -from gcloud.streaming.http_wrapper import make_api_request -from gcloud.streaming.transfer import Download -from gcloud.streaming.transfer import RESUMABLE_UPLOAD -from gcloud.streaming.transfer import Upload - - -_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com' - - -class Blob(_PropertyMixin): - """A wrapper around Cloud Storage's concept of an ``Object``. - - :type name: string - :param name: The name of the blob. This corresponds to the - unique path of the object in the bucket. - - :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket to which this blob belongs. - - :type chunk_size: integer - :param chunk_size: The size of a chunk of data whenever iterating (1 MB). - This must be a multiple of 256 KB per the API - specification. - """ - - _chunk_size = None # Default value for each instance. - - _CHUNK_SIZE_MULTIPLE = 256 * 1024 - """Number (256 KB, in bytes) that must divide the chunk size.""" - - def __init__(self, name, bucket, chunk_size=None): - super(Blob, self).__init__(name=name) - - self.chunk_size = chunk_size # Check that setter accepts value. - self.bucket = bucket - self._acl = ObjectACL(self) - - @property - def chunk_size(self): - """Get the blob's default chunk size. - - :rtype: integer or ``NoneType`` - :returns: The current blob's chunk size, if it is set. - """ - return self._chunk_size - - @chunk_size.setter - def chunk_size(self, value): - """Set the blob's default chunk size. - - :type value: integer or ``NoneType`` - :param value: The current blob's chunk size, if it is set. - - :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a - multiple of 256 KB. - """ - if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0: - raise ValueError('Chunk size must be a multiple of %d.' % ( - self._CHUNK_SIZE_MULTIPLE,)) - self._chunk_size = value - - @staticmethod - def path_helper(bucket_path, blob_name): - """Relative URL path for a blob. - - :type bucket_path: string - :param bucket_path: The URL path for a bucket. - - :type blob_name: string - :param blob_name: The name of the blob. - - :rtype: string - :returns: The relative URL path for ``blob_name``. - """ - return bucket_path + '/o/' + quote(blob_name, safe='') - - @property - def acl(self): - """Create our ACL on demand.""" - return self._acl - - def __repr__(self): - if self.bucket: - bucket_name = self.bucket.name - else: - bucket_name = None - - return '' % (bucket_name, self.name) - - @property - def path(self): - """Getter property for the URL path to this Blob. - - :rtype: string - :returns: The URL path to this Blob. - """ - if not self.name: - raise ValueError('Cannot determine path without a blob name.') - - return self.path_helper(self.bucket.path, self.name) - - @property - def client(self): - """The client bound to this blob.""" - return self.bucket.client - - @property - def public_url(self): - """The public URL for this blob's object. - - :rtype: `string` - :returns: The public URL for this blob. - """ - return '{storage_base_url}/{bucket_name}/{quoted_name}'.format( - storage_base_url='https://storage.googleapis.com', - bucket_name=self.bucket.name, - quoted_name=quote(self.name, safe='')) - - def generate_signed_url(self, expiration, method='GET', - content_type=None, - generation=None, response_disposition=None, - response_type=None, client=None, credentials=None): - """Generates a signed URL for this blob. - - .. note:: - - If you are on Google Compute Engine, you can't generate a signed - URL. Follow `Issue 922`_ for updates on this. If you'd like to - be able to generate a signed URL from GCE, you can use a standard - service account from a JSON file rather than a GCE service account. - - .. _Issue 922: https://github.com/GoogleCloudPlatform/\ - gcloud-python/issues/922 - - If you have a blob that you want to allow access to for a set - amount of time, you can use this method to generate a URL that - is only valid within a certain time period. - - This is particularly useful if you don't want publicly - accessible blobs, but don't want to require users to explicitly - log in. - - :type expiration: int, long, datetime.datetime, datetime.timedelta - :param expiration: When the signed URL should expire. - - :type method: str - :param method: The HTTP verb that will be used when requesting the URL. - - :type content_type: str - :param content_type: (Optional) The content type of the object - referenced by ``resource``. - - :type generation: str - :param generation: (Optional) A value that indicates which generation - of the resource to fetch. - - :type response_disposition: str - :param response_disposition: (Optional) Content disposition of - responses to requests for the signed URL. - For example, to enable the signed URL - to initiate a file of ``blog.png``, use - the value - ``'attachment; filename=blob.png'``. - - :type response_type: str - :param response_type: (Optional) Content type of responses to requests - for the signed URL. Used to over-ride the content - type of the underlying blob/object. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: (Optional) The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: (Optional) The OAuth2 credentials to use to sign - the URL. Defaults to the credentials stored on the - client used. - - :rtype: str - :returns: A signed URL you can use to access the resource - until expiration. - """ - resource = '/{bucket_name}/{quoted_name}'.format( - bucket_name=self.bucket.name, - quoted_name=quote(self.name, safe='')) - - if credentials is None: - client = self._require_client(client) - credentials = client._connection.credentials - - return generate_signed_url( - credentials, resource=resource, - api_access_endpoint=_API_ACCESS_ENDPOINT, - expiration=expiration, method=method, - content_type=content_type, - response_type=response_type, - response_disposition=response_disposition, - generation=generation) - - def exists(self, client=None): - """Determines whether or not this blob exists. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: boolean - :returns: True if the blob exists in Cloud Storage. - """ - client = self._require_client(client) - try: - # We only need the status code (200 or not) so we seek to - # minimize the returned payload. - query_params = {'fields': 'name'} - # We intentionally pass `_target_object=None` since fields=name - # would limit the local properties. - client.connection.api_request(method='GET', path=self.path, - query_params=query_params, - _target_object=None) - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - return True - except NotFound: - return False - - def delete(self, client=None): - """Deletes a blob from Cloud Storage. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: :class:`Blob` - :returns: The blob that was just deleted. - :raises: :class:`gcloud.exceptions.NotFound` - (propagated from - :meth:`gcloud.storage.bucket.Bucket.delete_blob`). - """ - return self.bucket.delete_blob(self.name, client=client) - - def download_to_file(self, file_obj, client=None): - """Download the contents of this blob into a file-like object. - - .. note:: - - If the server-set property, :attr:`media_link`, is not yet - initialized, makes an additional API request to load it. - - :type file_obj: file - :param file_obj: A file handle to which to write the blob's data. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :raises: :class:`gcloud.exceptions.NotFound` - """ - client = self._require_client(client) - if self.media_link is None: # not yet loaded - self.reload() - - download_url = self.media_link - - # Use apitools 'Download' facility. - download = Download.from_stream(file_obj) - - if self.chunk_size is not None: - download.chunksize = self.chunk_size - - request = Request(download_url, 'GET') - - # Use the private ``_connection`` rather than the public - # ``.connection``, since the public connection may be a batch. A - # batch wraps a client's connection, but does not store the `http` - # object. The rest (API_BASE_URL and build_api_url) are also defined - # on the Batch class, but we just use the wrapped connection since - # it has all three (http, API_BASE_URL and build_api_url). - download.initialize_download(request, client._connection.http) - - def download_to_filename(self, filename, client=None): - """Download the contents of this blob into a named file. - - :type filename: string - :param filename: A filename to be passed to ``open``. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :raises: :class:`gcloud.exceptions.NotFound` - """ - with open(filename, 'wb') as file_obj: - self.download_to_file(file_obj, client=client) - - mtime = time.mktime(self.updated.timetuple()) - os.utime(file_obj.name, (mtime, mtime)) - - def download_as_string(self, client=None): - """Download the contents of this blob as a string. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :rtype: bytes - :returns: The data stored in this blob. - :raises: :class:`gcloud.exceptions.NotFound` - """ - string_buffer = BytesIO() - self.download_to_file(string_buffer, client=client) - return string_buffer.getvalue() - - def upload_from_file(self, file_obj, rewind=False, size=None, - content_type=None, num_retries=6, client=None): - """Upload the contents of this blob from a file-like object. - - The content type of the upload will either be - - The value passed in to the function (if any) - - The value stored on the current blob - - The default value of 'application/octet-stream' - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - :type file_obj: file - :param file_obj: A file handle open for reading. - - :type rewind: boolean - :param rewind: If True, seek to the beginning of the file handle before - writing the file to Cloud Storage. - - :type size: int - :param size: The number of bytes to read from the file handle. - If not provided, we'll try to guess the size using - :func:`os.fstat`. (If the file handle is not from the - filesystem this won't be possible.) - - :type content_type: string or ``NoneType`` - :param content_type: Optional type of content being uploaded. - - :type num_retries: integer - :param num_retries: Number of upload retries. Defaults to 6. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - - :raises: :class:`ValueError` if size is not passed in and can not be - determined - """ - client = self._require_client(client) - # Use the private ``_connection`` rather than the public - # ``.connection``, since the public connection may be a batch. A - # batch wraps a client's connection, but does not store the `http` - # object. The rest (API_BASE_URL and build_api_url) are also defined - # on the Batch class, but we just use the wrapped connection since - # it has all three (http, API_BASE_URL and build_api_url). - connection = client._connection - content_type = (content_type or self._properties.get('contentType') or - 'application/octet-stream') - - # Rewind the file if desired. - if rewind: - file_obj.seek(0, os.SEEK_SET) - - # Get the basic stats about the file. - total_bytes = size - if total_bytes is None: - if hasattr(file_obj, 'fileno'): - total_bytes = os.fstat(file_obj.fileno()).st_size - else: - raise ValueError('total bytes could not be determined. Please ' - 'pass an explicit size.') - headers = { - 'Accept': 'application/json', - 'Accept-Encoding': 'gzip, deflate', - 'User-Agent': connection.USER_AGENT, - } - - upload = Upload(file_obj, content_type, total_bytes, - auto_transfer=False) - - if self.chunk_size is not None: - upload.chunksize = self.chunk_size - - url_builder = _UrlBuilder(bucket_name=self.bucket.name, - object_name=self.name) - upload_config = _UploadConfig() - - # Temporary URL, until we know simple vs. resumable. - base_url = connection.API_BASE_URL + '/upload' - upload_url = connection.build_api_url(api_base_url=base_url, - path=self.bucket.path + '/o') - - # Use apitools 'Upload' facility. - request = Request(upload_url, 'POST', headers) - - upload.configure_request(upload_config, request, url_builder) - query_params = url_builder.query_params - base_url = connection.API_BASE_URL + '/upload' - request.url = connection.build_api_url(api_base_url=base_url, - path=self.bucket.path + '/o', - query_params=query_params) - upload.initialize_upload(request, connection.http) - - if upload.strategy == RESUMABLE_UPLOAD: - http_response = upload.stream_file(use_chunks=True) - else: - http_response = make_api_request(connection.http, request, - retries=num_retries) - response_content = http_response.content - if not isinstance(response_content, - six.string_types): # pragma: NO COVER Python3 - response_content = response_content.decode('utf-8') - self._set_properties(json.loads(response_content)) - - def upload_from_filename(self, filename, content_type=None, - client=None): - """Upload this blob's contents from the content of a named file. - - The content type of the upload will either be - - The value passed in to the function (if any) - - The value stored on the current blob - - The value given by mimetypes.guess_type - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - :type filename: string - :param filename: The path to the file. - - :type content_type: string or ``NoneType`` - :param content_type: Optional type of content being uploaded. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - content_type = content_type or self._properties.get('contentType') - if content_type is None: - content_type, _ = mimetypes.guess_type(filename) - - with open(filename, 'rb') as file_obj: - self.upload_from_file(file_obj, content_type=content_type, - client=client) - - def upload_from_string(self, data, content_type='text/plain', - client=None): - """Upload contents of this blob from the provided string. - - .. note:: - The effect of uploading to an existing blob depends on the - "versioning" and "lifecycle" policies defined on the blob's - bucket. In the absence of those policies, upload will - overwrite any existing contents. - - See the `object versioning - `_ and - `lifecycle `_ - API documents for details. - - :type data: bytes or text - :param data: The data to store in this blob. If the value is - text, it will be encoded as UTF-8. - - :type content_type: string - :param content_type: Optional type of content being uploaded. Defaults - to ``'text/plain'``. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - if isinstance(data, six.text_type): - data = data.encode('utf-8') - string_buffer = BytesIO() - string_buffer.write(data) - self.upload_from_file(file_obj=string_buffer, rewind=True, - size=len(data), content_type=content_type, - client=client) - - def make_public(self, client=None): - """Make this blob public giving all users read access. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the blob's bucket. - """ - self.acl.all().grant_read() - self.acl.save(client=client) - - cache_control = _scalar_property('cacheControl') - """HTTP 'Cache-Control' header for this object. - - See: https://tools.ietf.org/html/rfc7234#section-5.2 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - content_disposition = _scalar_property('contentDisposition') - """HTTP 'Content-Disposition' header for this object. - - See: https://tools.ietf.org/html/rfc6266 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - content_encoding = _scalar_property('contentEncoding') - """HTTP 'Content-Encoding' header for this object. - - See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - content_language = _scalar_property('contentLanguage') - """HTTP 'Content-Language' header for this object. - - See: http://tools.ietf.org/html/bcp47 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - content_type = _scalar_property('contentType') - """HTTP 'Content-Type' header for this object. - - See: https://tools.ietf.org/html/rfc2616#section-14.17 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - crc32c = _scalar_property('crc32c') - """CRC32C checksum for this object. - - See: http://tools.ietf.org/html/rfc4960#appendix-B and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - @property - def component_count(self): - """Number of underlying components that make up this object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: integer or ``NoneType`` - :returns: The component count (in case of a composed object) or - ``None`` if the property is not set locally. This property - will not be set on objects not created via ``compose``. - """ - component_count = self._properties.get('componentCount') - if component_count is not None: - return int(component_count) - - @property - def etag(self): - """Retrieve the ETag for the object. - - See: http://tools.ietf.org/html/rfc2616#section-3.11 and - https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: string or ``NoneType`` - :returns: The blob etag or ``None`` if the property is not set locally. - """ - return self._properties.get('etag') - - @property - def generation(self): - """Retrieve the generation for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: integer or ``NoneType`` - :returns: The generation of the blob or ``None`` if the property - is not set locally. - """ - generation = self._properties.get('generation') - if generation is not None: - return int(generation) - - @property - def id(self): - """Retrieve the ID for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: string or ``NoneType`` - :returns: The ID of the blob or ``None`` if the property is not - set locally. - """ - return self._properties.get('id') - - md5_hash = _scalar_property('md5Hash') - """MD5 hash for this object. - - See: http://tools.ietf.org/html/rfc4960#appendix-B and - https://cloud.google.com/storage/docs/json_api/v1/objects - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - @property - def media_link(self): - """Retrieve the media download URI for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: string or ``NoneType`` - :returns: The media link for the blob or ``None`` if the property is - not set locally. - """ - return self._properties.get('mediaLink') - - @property - def metadata(self): - """Retrieve arbitrary/application specific metadata for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: dict or ``NoneType`` - :returns: The metadata associated with the blob or ``None`` if the - property is not set locally. - """ - return copy.deepcopy(self._properties.get('metadata')) - - @metadata.setter - def metadata(self, value): - """Update arbitrary/application specific metadata for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :type value: dict or ``NoneType`` - :param value: The blob metadata to set. - """ - self._patch_property('metadata', value) - - @property - def metageneration(self): - """Retrieve the metageneration for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: integer or ``NoneType`` - :returns: The metageneration of the blob or ``None`` if the property - is not set locally. - """ - metageneration = self._properties.get('metageneration') - if metageneration is not None: - return int(metageneration) - - @property - def owner(self): - """Retrieve info about the owner of the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: dict or ``NoneType`` - :returns: Mapping of owner's role/ID. If the property is not set - locally, returns ``None``. - """ - return copy.deepcopy(self._properties.get('owner')) - - @property - def self_link(self): - """Retrieve the URI for the object. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: string or ``NoneType`` - :returns: The self link for the blob or ``None`` if the property is - not set locally. - """ - return self._properties.get('selfLink') - - @property - def size(self): - """Size of the object, in bytes. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: integer or ``NoneType`` - :returns: The size of the blob or ``None`` if the property - is not set locally. - """ - size = self._properties.get('size') - if size is not None: - return int(size) - - @property - def storage_class(self): - """Retrieve the storage class for the object. - - See: https://cloud.google.com/storage/docs/storage-classes - https://cloud.google.com/storage/docs/nearline-storage - https://cloud.google.com/storage/docs/durable-reduced-availability - - :rtype: string or ``NoneType`` - :returns: If set, one of "STANDARD", "NEARLINE", or - "DURABLE_REDUCED_AVAILABILITY", else ``None``. - """ - return self._properties.get('storageClass') - - @property - def time_deleted(self): - """Retrieve the timestamp at which the object was deleted. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the property is not set locally. If the blob has - not been deleted, this will never be set. - """ - value = self._properties.get('timeDeleted') - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def updated(self): - """Retrieve the timestamp at which the object was updated. - - See: https://cloud.google.com/storage/docs/json_api/v1/objects - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the property is not set locally. - """ - value = self._properties.get('updated') - if value is not None: - return _rfc3339_to_datetime(value) - - -class _UploadConfig(object): - """Faux message FBO apitools' 'configure_request'. - - Values extracted from apitools - 'samples/storage_sample/storage/storage_v1_client.py' - """ - accept = ['*/*'] - max_size = None - resumable_multipart = True - resumable_path = u'/resumable/upload/storage/v1/b/{bucket}/o' - simple_multipart = True - simple_path = u'/upload/storage/v1/b/{bucket}/o' - - -class _UrlBuilder(object): - """Faux builder FBO apitools' 'configure_request'""" - def __init__(self, bucket_name, object_name): - self.query_params = {'name': object_name} - self._bucket_name = bucket_name - self._relative_path = '' diff --git a/gcloud/storage/bucket.py b/gcloud/storage/bucket.py deleted file mode 100644 index d4e82b61bdb5..000000000000 --- a/gcloud/storage/bucket.py +++ /dev/null @@ -1,829 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud storage buckets.""" - -import copy - -import six - -from gcloud._helpers import _rfc3339_to_datetime -from gcloud.exceptions import NotFound -from gcloud.iterator import Iterator -from gcloud.storage._helpers import _PropertyMixin -from gcloud.storage._helpers import _scalar_property -from gcloud.storage.acl import BucketACL -from gcloud.storage.acl import DefaultObjectACL -from gcloud.storage.blob import Blob - - -class _BlobIterator(Iterator): - """An iterator listing blobs in a bucket - - You shouldn't have to use this directly, but instead should use the - :class:`gcloud.storage.blob.Bucket.list_blobs` method. - - :type bucket: :class:`gcloud.storage.bucket.Bucket` - :param bucket: The bucket from which to list blobs. - - :type extra_params: dict or None - :param extra_params: Extra query string parameters for the API call. - - :type client: :class:`gcloud.storage.client.Client` - :param client: Optional. The client to use for making connections. - Defaults to the bucket's client. - """ - def __init__(self, bucket, extra_params=None, client=None): - if client is None: - client = bucket.client - self.bucket = bucket - self.prefixes = set() - self._current_prefixes = None - super(_BlobIterator, self).__init__( - client=client, path=bucket.path + '/o', - extra_params=extra_params) - - def get_items_from_response(self, response): - """Yield :class:`.storage.blob.Blob` items from response. - - :type response: dict - :param response: The JSON API response for a page of blobs. - """ - self._current_prefixes = tuple(response.get('prefixes', ())) - self.prefixes.update(self._current_prefixes) - for item in response.get('items', []): - name = item.get('name') - blob = Blob(name, bucket=self.bucket) - blob._set_properties(item) - yield blob - - -class Bucket(_PropertyMixin): - """A class representing a Bucket on Cloud Storage. - - :type client: :class:`gcloud.storage.client.Client` - :param client: A client which holds credentials and project configuration - for the bucket (which requires a project). - - :type name: string - :param name: The name of the bucket. - """ - _iterator_class = _BlobIterator - - _MAX_OBJECTS_FOR_ITERATION = 256 - """Maximum number of existing objects allowed in iteration. - - This is used in Bucket.delete() and Bucket.make_public(). - """ - - _STORAGE_CLASSES = ('STANDARD', 'NEARLINE', 'DURABLE_REDUCED_AVAILABILITY') - - def __init__(self, client, name=None): - super(Bucket, self).__init__(name=name) - self._client = client - self._acl = BucketACL(self) - self._default_object_acl = DefaultObjectACL(self) - - def __repr__(self): - return '' % self.name - - @property - def client(self): - """The client bound to this bucket.""" - return self._client - - def blob(self, blob_name, chunk_size=None): - """Factory constructor for blob object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a blob object owned by this bucket. - - :type blob_name: string - :param blob_name: The name of the blob to be instantiated. - - :type chunk_size: integer - :param chunk_size: The size of a chunk of data whenever iterating - (1 MB). This must be a multiple of 256 KB per the - API specification. - - :rtype: :class:`gcloud.storage.blob.Blob` - :returns: The blob object created. - """ - return Blob(name=blob_name, bucket=self, chunk_size=chunk_size) - - def exists(self, client=None): - """Determines whether or not this bucket exists. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: boolean - :returns: True if the bucket exists in Cloud Storage. - """ - client = self._require_client(client) - try: - # We only need the status code (200 or not) so we seek to - # minimize the returned payload. - query_params = {'fields': 'name'} - # We intentionally pass `_target_object=None` since fields=name - # would limit the local properties. - client.connection.api_request(method='GET', path=self.path, - query_params=query_params, - _target_object=None) - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - return True - except NotFound: - return False - - def create(self, client=None): - """Creates current bucket. - - If the bucket already exists, will raise - :class:`gcloud.exceptions.Conflict`. - - This implements "storage.buckets.insert". - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`gcloud.storage.bucket.Bucket` - :returns: The newly created bucket. - """ - client = self._require_client(client) - query_params = {'project': client.project} - properties = dict( - (key, self._properties[key]) for key in self._changes) - properties['name'] = self.name - api_response = client.connection.api_request( - method='POST', path='/b', query_params=query_params, - data=properties, _target_object=self) - self._set_properties(api_response) - - @property - def acl(self): - """Create our ACL on demand.""" - return self._acl - - @property - def default_object_acl(self): - """Create our defaultObjectACL on demand.""" - return self._default_object_acl - - @staticmethod - def path_helper(bucket_name): - """Relative URL path for a bucket. - - :type bucket_name: string - :param bucket_name: The bucket name in the path. - - :rtype: string - :returns: The relative URL path for ``bucket_name``. - """ - return '/b/' + bucket_name - - @property - def path(self): - """The URL path to this bucket.""" - if not self.name: - raise ValueError('Cannot determine path without bucket name.') - - return self.path_helper(self.name) - - def get_blob(self, blob_name, client=None): - """Get a blob object by name. - - This will return None if the blob doesn't exist:: - - >>> from gcloud import storage - >>> client = storage.Client() - >>> bucket = client.get_bucket('my-bucket') - >>> print bucket.get_blob('/path/to/blob.txt') - - >>> print bucket.get_blob('/does-not-exist.txt') - None - - :type blob_name: string - :param blob_name: The name of the blob to retrieve. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`gcloud.storage.blob.Blob` or None - :returns: The blob object if it exists, otherwise None. - """ - client = self._require_client(client) - blob = Blob(bucket=self, name=blob_name) - try: - response = client.connection.api_request( - method='GET', path=blob.path, _target_object=blob) - # NOTE: We assume response.get('name') matches `blob_name`. - blob._set_properties(response) - # NOTE: This will not fail immediately in a batch. However, when - # Batch.finish() is called, the resulting `NotFound` will be - # raised. - return blob - except NotFound: - return None - - def list_blobs(self, max_results=None, page_token=None, prefix=None, - delimiter=None, versions=None, - projection='noAcl', fields=None, client=None): - """Return an iterator used to find blobs in the bucket. - - :type max_results: integer or ``NoneType`` - :param max_results: maximum number of blobs to return. - - :type page_token: string - :param page_token: opaque marker for the next "page" of blobs. If not - passed, will return the first page of blobs. - - :type prefix: string or ``NoneType`` - :param prefix: optional prefix used to filter blobs. - - :type delimiter: string or ``NoneType`` - :param delimiter: optional delimter, used with ``prefix`` to - emulate hierarchy. - - :type versions: boolean or ``NoneType`` - :param versions: whether object versions should be returned as - separate blobs. - - :type projection: string or ``NoneType`` - :param projection: If used, must be 'full' or 'noAcl'. Defaults to - 'noAcl'. Specifies the set of properties to return. - - :type fields: string or ``NoneType`` - :param fields: Selector specifying which fields to include in a - partial response. Must be a list of fields. For example - to get a partial response with just the next page token - and the language of each blob returned: - 'items/contentLanguage,nextPageToken' - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`_BlobIterator`. - :returns: An iterator of blobs. - """ - extra_params = {} - - if max_results is not None: - extra_params['maxResults'] = max_results - - if prefix is not None: - extra_params['prefix'] = prefix - - if delimiter is not None: - extra_params['delimiter'] = delimiter - - if versions is not None: - extra_params['versions'] = versions - - extra_params['projection'] = projection - - if fields is not None: - extra_params['fields'] = fields - - result = self._iterator_class( - self, extra_params=extra_params, client=client) - # Page token must be handled specially since the base `Iterator` - # class has it as a reserved property. - if page_token is not None: - result.next_page_token = page_token - return result - - def delete(self, force=False, client=None): - """Delete this bucket. - - The bucket **must** be empty in order to submit a delete request. If - ``force=True`` is passed, this will first attempt to delete all the - objects / blobs in the bucket (i.e. try to empty the bucket). - - If the bucket doesn't exist, this will raise - :class:`gcloud.exceptions.NotFound`. If the bucket is not empty - (and ``force=False``), will raise :class:`gcloud.exceptions.Conflict`. - - If ``force=True`` and the bucket contains more than 256 objects / blobs - this will cowardly refuse to delete the objects (or the bucket). This - is to prevent accidental bucket deletion and to prevent extremely long - runtime of this method. - - :type force: boolean - :param force: If True, empties the bucket's objects then deletes it. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket - contains more than 256 objects / blobs. - """ - client = self._require_client(client) - if force: - blobs = list(self.list_blobs( - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, - client=client)) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: - message = ( - 'Refusing to delete bucket with more than ' - '%d objects. If you actually want to delete ' - 'this bucket, please delete the objects ' - 'yourself before calling Bucket.delete().' - ) % (self._MAX_OBJECTS_FOR_ITERATION,) - raise ValueError(message) - - # Ignore 404 errors on delete. - self.delete_blobs(blobs, on_error=lambda blob: None, - client=client) - - # We intentionally pass `_target_object=None` since a DELETE - # request has no response value (whether in a standard request or - # in a batch request). - client.connection.api_request(method='DELETE', path=self.path, - _target_object=None) - - def delete_blob(self, blob_name, client=None): - """Deletes a blob from the current bucket. - - If the blob isn't found (backend 404), raises a - :class:`gcloud.exceptions.NotFound`. - - For example:: - - >>> from gcloud.exceptions import NotFound - >>> from gcloud import storage - >>> client = storage.Client() - >>> bucket = client.get_bucket('my-bucket') - >>> print bucket.list_blobs() - [] - >>> bucket.delete_blob('my-file.txt') - >>> try: - ... bucket.delete_blob('doesnt-exist') - ... except NotFound: - ... pass - - :type blob_name: string - :param blob_name: A blob name to delete. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`gcloud.exceptions.NotFound` (to suppress - the exception, call ``delete_blobs``, passing a no-op - ``on_error`` callback, e.g.:: - - >>> bucket.delete_blobs([blob], on_error=lambda blob: None) - """ - client = self._require_client(client) - blob_path = Blob.path_helper(self.path, blob_name) - # We intentionally pass `_target_object=None` since a DELETE - # request has no response value (whether in a standard request or - # in a batch request). - client.connection.api_request(method='DELETE', path=blob_path, - _target_object=None) - - def delete_blobs(self, blobs, on_error=None, client=None): - """Deletes a list of blobs from the current bucket. - - Uses :func:`Bucket.delete_blob` to delete each individual blob. - - :type blobs: list of string or :class:`gcloud.storage.blob.Blob` - :param blobs: A list of blob names or Blob objects to delete. - - :type on_error: a callable taking (blob) - :param on_error: If not ``None``, called once for each blob raising - :class:`gcloud.exceptions.NotFound`; - otherwise, the exception is propagated. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :raises: :class:`gcloud.exceptions.NotFound` (if - `on_error` is not passed). - """ - for blob in blobs: - try: - blob_name = blob - if not isinstance(blob_name, six.string_types): - blob_name = blob.name - self.delete_blob(blob_name, client=client) - except NotFound: - if on_error is not None: - on_error(blob) - else: - raise - - def copy_blob(self, blob, destination_bucket, new_name=None, - client=None): - """Copy the given blob to the given bucket, optionally with a new name. - - :type blob: :class:`gcloud.storage.blob.Blob` - :param blob: The blob to be copied. - - :type destination_bucket: :class:`gcloud.storage.bucket.Bucket` - :param destination_bucket: The bucket into which the blob should be - copied. - - :type new_name: string - :param new_name: (optional) the new name for the copied file. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`gcloud.storage.blob.Blob` - :returns: The new Blob. - """ - client = self._require_client(client) - if new_name is None: - new_name = blob.name - new_blob = Blob(bucket=destination_bucket, name=new_name) - api_path = blob.path + '/copyTo' + new_blob.path - copy_result = client.connection.api_request( - method='POST', path=api_path, _target_object=new_blob) - new_blob._set_properties(copy_result) - return new_blob - - def rename_blob(self, blob, new_name, client=None): - """Rename the given blob using copy and delete operations. - - Effectively, copies blob to the same bucket with a new name, then - deletes the blob. - - .. warning:: - - This method will first duplicate the data and then delete the - old blob. This means that with very large objects renaming - could be a very (temporarily) costly or a very slow operation. - - :type blob: :class:`gcloud.storage.blob.Blob` - :param blob: The blob to be renamed. - - :type new_name: string - :param new_name: The new name for this blob. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - - :rtype: :class:`Blob` - :returns: The newly-renamed blob. - """ - new_blob = self.copy_blob(blob, self, new_name, client=client) - blob.delete(client=client) - return new_blob - - @property - def cors(self): - """Retrieve CORS policies configured for this bucket. - - See: http://www.w3.org/TR/cors/ and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: list of dictionaries - :returns: A sequence of mappings describing each CORS policy. - """ - return [copy.deepcopy(policy) - for policy in self._properties.get('cors', ())] - - @cors.setter - def cors(self, entries): - """Set CORS policies configured for this bucket. - - See: http://www.w3.org/TR/cors/ and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :type entries: list of dictionaries - :param entries: A sequence of mappings describing each CORS policy. - """ - self._patch_property('cors', entries) - - @property - def etag(self): - """Retrieve the ETag for the bucket. - - See: http://tools.ietf.org/html/rfc2616#section-3.11 and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: string or ``NoneType`` - :returns: The bucket etag or ``None`` if the property is not - set locally. - """ - return self._properties.get('etag') - - @property - def id(self): - """Retrieve the ID for the bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: string or ``NoneType`` - :returns: The ID of the bucket or ``None`` if the property is not - set locally. - """ - return self._properties.get('id') - - @property - def lifecycle_rules(self): - """Lifecycle rules configured for this bucket. - - See: https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: list(dict) - :returns: A sequence of mappings describing each lifecycle rule. - """ - info = self._properties.get('lifecycle', {}) - return [copy.deepcopy(rule) for rule in info.get('rule', ())] - - @lifecycle_rules.setter - def lifecycle_rules(self, rules): - """Update the lifecycle rules configured for this bucket. - - See: https://cloud.google.com/storage/docs/lifecycle and - https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: list(dict) - :returns: A sequence of mappings describing each lifecycle rule. - """ - self._patch_property('lifecycle', {'rule': rules}) - - location = _scalar_property('location') - """Retrieve location configured for this bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets and - https://cloud.google.com/storage/docs/concepts-techniques#specifyinglocations - - If the property is not set locally, returns ``None``. - - :rtype: string or ``NoneType`` - """ - - def get_logging(self): - """Return info about access logging for this bucket. - - See: https://cloud.google.com/storage/docs/accesslogs#status - - :rtype: dict or None - :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` - (if logging is enabled), or None (if not). - """ - info = self._properties.get('logging') - return copy.deepcopy(info) - - def enable_logging(self, bucket_name, object_prefix=''): - """Enable access logging for this bucket. - - See: https://cloud.google.com/storage/docs/accesslogs#delivery - - :type bucket_name: string - :param bucket_name: name of bucket in which to store access logs - - :type object_prefix: string - :param object_prefix: prefix for access log filenames - """ - info = {'logBucket': bucket_name, 'logObjectPrefix': object_prefix} - self._patch_property('logging', info) - - def disable_logging(self): - """Disable access logging for this bucket. - - See: https://cloud.google.com/storage/docs/accesslogs#disabling - """ - self._patch_property('logging', None) - - @property - def metageneration(self): - """Retrieve the metageneration for the bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: integer or ``NoneType`` - :returns: The metageneration of the bucket or ``None`` if the property - is not set locally. - """ - metageneration = self._properties.get('metageneration') - if metageneration is not None: - return int(metageneration) - - @property - def owner(self): - """Retrieve info about the owner of the bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: dict or ``NoneType`` - :returns: Mapping of owner's role/ID. If the property is not set - locally, returns ``None``. - """ - return copy.deepcopy(self._properties.get('owner')) - - @property - def project_number(self): - """Retrieve the number of the project to which the bucket is assigned. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: integer or ``NoneType`` - :returns: The project number that owns the bucket or ``None`` if the - property is not set locally. - """ - project_number = self._properties.get('projectNumber') - if project_number is not None: - return int(project_number) - - @property - def self_link(self): - """Retrieve the URI for the bucket. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: string or ``NoneType`` - :returns: The self link for the bucket or ``None`` if the property is - not set locally. - """ - return self._properties.get('selfLink') - - @property - def storage_class(self): - """Retrieve the storage class for the bucket. - - See: https://cloud.google.com/storage/docs/storage-classes - https://cloud.google.com/storage/docs/nearline-storage - https://cloud.google.com/storage/docs/durable-reduced-availability - - :rtype: string or ``NoneType`` - :returns: If set, one of "STANDARD", "NEARLINE", or - "DURABLE_REDUCED_AVAILABILITY", else ``None``. - """ - return self._properties.get('storageClass') - - @storage_class.setter - def storage_class(self, value): - """Set the storage class for the bucket. - - See: https://cloud.google.com/storage/docs/storage-classes - https://cloud.google.com/storage/docs/nearline-storage - https://cloud.google.com/storage/docs/durable-reduced-availability - - :type value: string - :param value: one of "STANDARD", "NEARLINE", or - "DURABLE_REDUCED_AVAILABILITY" - """ - if value not in self._STORAGE_CLASSES: - raise ValueError('Invalid storage class: %s' % (value,)) - self._patch_property('storageClass', value) - - @property - def time_created(self): - """Retrieve the timestamp at which the bucket was created. - - See: https://cloud.google.com/storage/docs/json_api/v1/buckets - - :rtype: :class:`datetime.datetime` or ``NoneType`` - :returns: Datetime object parsed from RFC3339 valid timestamp, or - ``None`` if the property is not set locally. - """ - value = self._properties.get('timeCreated') - if value is not None: - return _rfc3339_to_datetime(value) - - @property - def versioning_enabled(self): - """Is versioning enabled for this bucket? - - See: https://cloud.google.com/storage/docs/object-versioning for - details. - - :rtype: boolean - :returns: True if enabled, else False. - """ - versioning = self._properties.get('versioning', {}) - return versioning.get('enabled', False) - - @versioning_enabled.setter - def versioning_enabled(self, value): - """Enable versioning for this bucket. - - See: https://cloud.google.com/storage/docs/object-versioning for - details. - - :type value: convertible to boolean - :param value: should versioning be anabled for the bucket? - """ - self._patch_property('versioning', {'enabled': bool(value)}) - - def configure_website(self, main_page_suffix=None, not_found_page=None): - """Configure website-related properties. - - See: https://developers.google.com/storage/docs/website-configuration - - .. note:: - This (apparently) only works - if your bucket name is a domain name - (and to do that, you need to get approved somehow...). - - If you want this bucket to host a website, just provide the name - of an index page and a page to use when a blob isn't found:: - - >>> from gcloud import storage - >>> client = storage.Client() - >>> bucket = client.get_bucket(bucket_name) - >>> bucket.configure_website('index.html', '404.html') - - You probably should also make the whole bucket public:: - - >>> bucket.make_public(recursive=True, future=True) - - This says: "Make the bucket public, and all the stuff already in - the bucket, and anything else I add to the bucket. Just make it - all public." - - :type main_page_suffix: string - :param main_page_suffix: The page to use as the main page - of a directory. - Typically something like index.html. - - :type not_found_page: string - :param not_found_page: The file to use when a page isn't found. - """ - data = { - 'mainPageSuffix': main_page_suffix, - 'notFoundPage': not_found_page, - } - self._patch_property('website', data) - - def disable_website(self): - """Disable the website configuration for this bucket. - - This is really just a shortcut for setting the website-related - attributes to ``None``. - """ - return self.configure_website(None, None) - - def make_public(self, recursive=False, future=False, client=None): - """Make a bucket public. - - If ``recursive=True`` and the bucket contains more than 256 - objects / blobs this will cowardly refuse to make the objects public. - This is to prevent extremely long runtime of this method. - - :type recursive: boolean - :param recursive: If True, this will make all blobs inside the bucket - public as well. - - :type future: boolean - :param future: If True, this will make all objects created in the - future public as well. - - :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` - :param client: Optional. The client to use. If not passed, falls back - to the ``client`` stored on the current bucket. - """ - self.acl.all().grant_read() - self.acl.save(client=client) - - if future: - doa = self.default_object_acl - if not doa.loaded: - doa.reload(client=client) - doa.all().grant_read() - doa.save(client=client) - - if recursive: - blobs = list(self.list_blobs( - projection='full', - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, - client=client)) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: - message = ( - 'Refusing to make public recursively with more than ' - '%d objects. If you actually want to make every object ' - 'in this bucket public, please do it on the objects ' - 'yourself.' - ) % (self._MAX_OBJECTS_FOR_ITERATION,) - raise ValueError(message) - - for blob in blobs: - blob.acl.all().grant_read() - blob.acl.save(client=client) diff --git a/gcloud/storage/client.py b/gcloud/storage/client.py deleted file mode 100644 index b3abe09e7913..000000000000 --- a/gcloud/storage/client.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Client for interacting with the Google Cloud Storage API.""" - - -from gcloud._helpers import _LocalStack -from gcloud.client import JSONClient -from gcloud.exceptions import NotFound -from gcloud.iterator import Iterator -from gcloud.storage.batch import Batch -from gcloud.storage.bucket import Bucket -from gcloud.storage.connection import Connection - - -class Client(JSONClient): - """Client to bundle configuration needed for API requests. - - :type project: string - :param project: the project which the client acts on behalf of. Will be - passed when creating a topic. If not passed, - falls back to the default inferred from the environment. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` or - :class:`NoneType` - :param credentials: The OAuth2 Credentials to use for the connection - owned by this client. If not passed (and if no ``http`` - object is passed), falls back to the default inferred - from the environment. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: An optional HTTP object to make requests. If not passed, an - ``http`` object is created that is bound to the - ``credentials`` for the current object. - """ - - _connection_class = Connection - - def __init__(self, project=None, credentials=None, http=None): - self._connection = None - super(Client, self).__init__(project=project, credentials=credentials, - http=http) - self._batch_stack = _LocalStack() - - @property - def connection(self): - """Get connection or batch on the client. - - :rtype: :class:`gcloud.storage.connection.Connection` - :returns: The connection set on the client, or the batch - if one is set. - """ - if self.current_batch is not None: - return self.current_batch - else: - return self._connection - - @connection.setter - def connection(self, value): - """Set connection on the client. - - Intended to be used by constructor (since the base class calls) - self.connection = connection - Will raise if the connection is set more than once. - - :type value: :class:`gcloud.storage.connection.Connection` - :param value: The connection set on the client. - - :raises: :class:`ValueError` if connection has already been set. - """ - if self._connection is not None: - raise ValueError('Connection already set on client') - self._connection = value - - def _push_batch(self, batch): - """Push a batch onto our stack. - - "Protected", intended for use by batch context mgrs. - - :type batch: :class:`gcloud.storage.batch.Batch` - :param batch: newly-active batch - """ - self._batch_stack.push(batch) - - def _pop_batch(self): - """Pop a batch from our stack. - - "Protected", intended for use by batch context mgrs. - - :raises: IndexError if the stack is empty. - :rtype: :class:`gcloud.storage.batch.Batch` - :returns: the top-most batch/transaction, after removing it. - """ - return self._batch_stack.pop() - - @property - def current_batch(self): - """Currently-active batch. - - :rtype: :class:`gcloud.storage.batch.Batch` or ``NoneType`` (if - no batch is active). - :returns: The batch at the top of the batch stack. - """ - return self._batch_stack.top - - def bucket(self, bucket_name): - """Factory constructor for bucket object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a bucket object owned by this client. - - :type bucket_name: string - :param bucket_name: The name of the bucket to be instantiated. - - :rtype: :class:`gcloud.storage.bucket.Bucket` - :returns: The bucket object created. - """ - return Bucket(client=self, name=bucket_name) - - def batch(self): - """Factory constructor for batch object. - - .. note:: - This will not make an HTTP request; it simply instantiates - a batch object owned by this client. - - :rtype: :class:`gcloud.storage.batch.Batch` - :returns: The batch object created. - """ - return Batch(client=self) - - def get_bucket(self, bucket_name): - """Get a bucket by name. - - If the bucket isn't found, this will raise a - :class:`gcloud.storage.exceptions.NotFound`. - - For example:: - - >>> try: - >>> bucket = client.get_bucket('my-bucket') - >>> except gcloud.exceptions.NotFound: - >>> print 'Sorry, that bucket does not exist!' - - This implements "storage.buckets.get". - - :type bucket_name: string - :param bucket_name: The name of the bucket to get. - - :rtype: :class:`gcloud.storage.bucket.Bucket` - :returns: The bucket matching the name provided. - :raises: :class:`gcloud.exceptions.NotFound` - """ - bucket = Bucket(self, name=bucket_name) - bucket.reload(client=self) - return bucket - - def lookup_bucket(self, bucket_name): - """Get a bucket by name, returning None if not found. - - You can use this if you would rather check for a None value - than catching an exception:: - - >>> bucket = client.lookup_bucket('doesnt-exist') - >>> print bucket - None - >>> bucket = client.lookup_bucket('my-bucket') - >>> print bucket - - - :type bucket_name: string - :param bucket_name: The name of the bucket to get. - - :rtype: :class:`gcloud.storage.bucket.Bucket` - :returns: The bucket matching the name provided or None if not found. - """ - try: - return self.get_bucket(bucket_name) - except NotFound: - return None - - def create_bucket(self, bucket_name): - """Create a new bucket. - - For example:: - - >>> bucket = client.create_bucket('my-bucket') - >>> print bucket - - - This implements "storage.buckets.insert". - - If the bucket already exists, will raise - :class:`gcloud.exceptions.Conflict`. - - :type bucket_name: string - :param bucket_name: The bucket name to create. - - :rtype: :class:`gcloud.storage.bucket.Bucket` - :returns: The newly created bucket. - """ - bucket = Bucket(self, name=bucket_name) - bucket.create(client=self) - return bucket - - def list_buckets(self, max_results=None, page_token=None, prefix=None, - projection='noAcl', fields=None): - """Get all buckets in the project associated to the client. - - This will not populate the list of blobs available in each - bucket. - - >>> for bucket in client.list_buckets(): - >>> print bucket - - This implements "storage.buckets.list". - - :type max_results: integer or ``NoneType`` - :param max_results: Optional. Maximum number of buckets to return. - - :type page_token: string or ``NoneType`` - :param page_token: Optional. Opaque marker for the next "page" of - buckets. If not passed, will return the first page - of buckets. - - :type prefix: string or ``NoneType`` - :param prefix: Optional. Filter results to buckets whose names begin - with this prefix. - - :type projection: string or ``NoneType`` - :param projection: If used, must be 'full' or 'noAcl'. Defaults to - 'noAcl'. Specifies the set of properties to return. - - :type fields: string or ``NoneType`` - :param fields: Selector specifying which fields to include in a - partial response. Must be a list of fields. For example - to get a partial response with just the next page token - and the language of each bucket returned: - 'items/id,nextPageToken' - - :rtype: iterable of :class:`gcloud.storage.bucket.Bucket` objects. - :returns: All buckets belonging to this project. - """ - extra_params = {'project': self.project} - - if max_results is not None: - extra_params['maxResults'] = max_results - - if prefix is not None: - extra_params['prefix'] = prefix - - extra_params['projection'] = projection - - if fields is not None: - extra_params['fields'] = fields - - result = _BucketIterator(client=self, - extra_params=extra_params) - # Page token must be handled specially since the base `Iterator` - # class has it as a reserved property. - if page_token is not None: - result.next_page_token = page_token - return result - - -class _BucketIterator(Iterator): - """An iterator listing all buckets. - - You shouldn't have to use this directly, but instead should use the - helper methods on :class:`gcloud.storage.connection.Connection` - objects. - - :type client: :class:`gcloud.storage.client.Client` - :param client: The client to use for making connections. - - :type extra_params: dict or ``NoneType`` - :param extra_params: Extra query string parameters for the API call. - """ - - def __init__(self, client, extra_params=None): - super(_BucketIterator, self).__init__(client=client, path='/b', - extra_params=extra_params) - - def get_items_from_response(self, response): - """Factory method which yields :class:`.Bucket` items from a response. - - :type response: dict - :param response: The JSON API response for a page of buckets. - """ - for item in response.get('items', []): - name = item.get('name') - bucket = Bucket(self.client, name) - bucket._set_properties(item) - yield bucket diff --git a/gcloud/storage/connection.py b/gcloud/storage/connection.py deleted file mode 100644 index 7aec23b3844d..000000000000 --- a/gcloud/storage/connection.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Create / interact with gcloud storage connections.""" - -from gcloud import connection as base_connection - - -class Connection(base_connection.JSONConnection): - """A connection to Google Cloud Storage via the JSON REST API. - - :type credentials: :class:`oauth2client.client.OAuth2Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - connection. - - :type http: :class:`httplib2.Http` or class that defines ``request()``. - :param http: (Optional) HTTP object to make requests. - """ - - API_BASE_URL = base_connection.API_BASE_URL - """The base of the API call URL.""" - - API_VERSION = 'v1' - """The version of the API, used in building the API call's URL.""" - - API_URL_TEMPLATE = '{api_base_url}/storage/{api_version}{path}' - """A template for the URL of a particular API call.""" - - SCOPE = ('https://www.googleapis.com/auth/devstorage.full_control', - 'https://www.googleapis.com/auth/devstorage.read_only', - 'https://www.googleapis.com/auth/devstorage.read_write') - """The scopes required for authenticating as a Cloud Storage consumer.""" diff --git a/gcloud/storage/test__helpers.py b/gcloud/storage/test__helpers.py deleted file mode 100644 index 815cd58ab65e..000000000000 --- a/gcloud/storage/test__helpers.py +++ /dev/null @@ -1,223 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_PropertyMixin(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage._helpers import _PropertyMixin - return _PropertyMixin - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def _derivedClass(self, path=None): - - class Derived(self._getTargetClass()): - - client = None - - @property - def path(self): - return path - - return Derived - - def test_path_is_abstract(self): - mixin = self._makeOne() - self.assertRaises(NotImplementedError, lambda: mixin.path) - - def test_client_is_abstract(self): - mixin = self._makeOne() - self.assertRaises(NotImplementedError, lambda: mixin.client) - - def test_reload(self): - connection = _Connection({'foo': 'Foo'}) - client = _Client(connection) - derived = self._derivedClass('/path')() - # Make sure changes is not a set, so we can observe a change. - derived._changes = object() - derived.reload(client=client) - self.assertEqual(derived._properties, {'foo': 'Foo'}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/path') - self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'}) - # Make sure changes get reset by reload. - self.assertEqual(derived._changes, set()) - - def test__set_properties(self): - mixin = self._makeOne() - self.assertEqual(mixin._properties, {}) - VALUE = object() - mixin._set_properties(VALUE) - self.assertEqual(mixin._properties, VALUE) - - def test__patch_property(self): - derived = self._derivedClass()() - derived._patch_property('foo', 'Foo') - self.assertEqual(derived._properties, {'foo': 'Foo'}) - - def test_patch(self): - connection = _Connection({'foo': 'Foo'}) - client = _Client(connection) - derived = self._derivedClass('/path')() - # Make sure changes is non-empty, so we can observe a change. - BAR = object() - BAZ = object() - derived._properties = {'bar': BAR, 'baz': BAZ} - derived._changes = set(['bar']) # Ignore baz. - derived.patch(client=client) - self.assertEqual(derived._properties, {'foo': 'Foo'}) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/path') - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - # Since changes does not include `baz`, we don't see it sent. - self.assertEqual(kw[0]['data'], {'bar': BAR}) - # Make sure changes get reset by patch(). - self.assertEqual(derived._changes, set()) - - -class Test__scalar_property(unittest2.TestCase): - - def _callFUT(self, fieldName): - from gcloud.storage._helpers import _scalar_property - return _scalar_property(fieldName) - - def test_getter(self): - - class Test(object): - def __init__(self, **kw): - self._properties = kw.copy() - do_re_mi = self._callFUT('solfege') - - test = Test(solfege='Latido') - self.assertEqual(test.do_re_mi, 'Latido') - - def test_setter(self): - - class Test(object): - def _patch_property(self, name, value): - self._patched = (name, value) - do_re_mi = self._callFUT('solfege') - - test = Test() - test.do_re_mi = 'Latido' - self.assertEqual(test._patched, ('solfege', 'Latido')) - - -class Test__base64_md5hash(unittest2.TestCase): - - def _callFUT(self, bytes_to_sign): - from gcloud.storage._helpers import _base64_md5hash - return _base64_md5hash(bytes_to_sign) - - def test_it(self): - from io import BytesIO - BYTES_TO_SIGN = b'FOO' - BUFFER = BytesIO() - BUFFER.write(BYTES_TO_SIGN) - BUFFER.seek(0) - - SIGNED_CONTENT = self._callFUT(BUFFER) - self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==') - - def test_it_with_stubs(self): - from gcloud._testing import _Monkey - from gcloud.storage import _helpers as MUT - - class _Buffer(object): - - def __init__(self, return_vals): - self.return_vals = return_vals - self._block_sizes = [] - - def read(self, block_size): - self._block_sizes.append(block_size) - return self.return_vals.pop() - - BASE64 = _Base64() - DIGEST_VAL = object() - BYTES_TO_SIGN = b'BYTES_TO_SIGN' - BUFFER = _Buffer([b'', BYTES_TO_SIGN]) - MD5 = _MD5(DIGEST_VAL) - - with _Monkey(MUT, base64=BASE64, md5=MD5): - SIGNED_CONTENT = self._callFUT(BUFFER) - - self.assertEqual(BUFFER._block_sizes, [8192, 8192]) - self.assertTrue(SIGNED_CONTENT is DIGEST_VAL) - self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL]) - self.assertEqual(MD5._called, [None]) - self.assertEqual(MD5.hash_obj.num_digest_calls, 1) - self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN]) - - -class _Connection(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def api_request(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _MD5Hash(object): - - def __init__(self, digest_val): - self.digest_val = digest_val - self.num_digest_calls = 0 - self._blocks = [] - - def update(self, block): - self._blocks.append(block) - - def digest(self): - self.num_digest_calls += 1 - return self.digest_val - - -class _MD5(object): - - def __init__(self, digest_val): - self.hash_obj = _MD5Hash(digest_val) - self._called = [] - - def __call__(self, data=None): - self._called.append(data) - return self.hash_obj - - -class _Base64(object): - - def __init__(self): - self._called_b64encode = [] - - def b64encode(self, value): - self._called_b64encode.append(value) - return value - - -class _Client(object): - - def __init__(self, connection): - self.connection = connection diff --git a/gcloud/storage/test_acl.py b/gcloud/storage/test_acl.py deleted file mode 100644 index 2fed5213bb27..000000000000 --- a/gcloud/storage/test_acl.py +++ /dev/null @@ -1,817 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_ACLEntity(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.acl import _ACLEntity - return _ACLEntity - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_default_identifier(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - self.assertEqual(entity.type, TYPE) - self.assertEqual(entity.identifier, None) - self.assertEqual(entity.get_roles(), set()) - - def test_ctor_w_identifier(self): - TYPE = 'type' - ID = 'id' - entity = self._makeOne(TYPE, ID) - self.assertEqual(entity.type, TYPE) - self.assertEqual(entity.identifier, ID) - self.assertEqual(entity.get_roles(), set()) - - def test___str__no_identifier(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - self.assertEqual(str(entity), TYPE) - - def test___str__w_identifier(self): - TYPE = 'type' - ID = 'id' - entity = self._makeOne(TYPE, ID) - self.assertEqual(str(entity), '%s-%s' % (TYPE, ID)) - - def test_grant_simple(self): - TYPE = 'type' - ROLE = 'role' - entity = self._makeOne(TYPE) - entity.grant(ROLE) - self.assertEqual(entity.get_roles(), set([ROLE])) - - def test_grant_duplicate(self): - TYPE = 'type' - ROLE1 = 'role1' - ROLE2 = 'role2' - entity = self._makeOne(TYPE) - entity.grant(ROLE1) - entity.grant(ROLE2) - entity.grant(ROLE1) - self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2])) - - def test_revoke_miss(self): - TYPE = 'type' - ROLE = 'nonesuch' - entity = self._makeOne(TYPE) - entity.revoke(ROLE) - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_hit(self): - TYPE = 'type' - ROLE1 = 'role1' - ROLE2 = 'role2' - entity = self._makeOne(TYPE) - entity.grant(ROLE1) - entity.grant(ROLE2) - entity.revoke(ROLE1) - self.assertEqual(entity.get_roles(), set([ROLE2])) - - def test_grant_read(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant_read() - self.assertEqual(entity.get_roles(), set([entity.READER_ROLE])) - - def test_grant_write(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant_write() - self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE])) - - def test_grant_owner(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant_owner() - self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE])) - - def test_revoke_read(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant(entity.READER_ROLE) - entity.revoke_read() - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_write(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant(entity.WRITER_ROLE) - entity.revoke_write() - self.assertEqual(entity.get_roles(), set()) - - def test_revoke_owner(self): - TYPE = 'type' - entity = self._makeOne(TYPE) - entity.grant(entity.OWNER_ROLE) - entity.revoke_owner() - self.assertEqual(entity.get_roles(), set()) - - -class Test_ACL(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.acl import ACL - return ACL - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - acl = self._makeOne() - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - - def test__ensure_loaded(self): - acl = self._makeOne() - - def _reload(): - acl._really_loaded = True - - acl.reload = _reload - acl._ensure_loaded() - self.assertTrue(acl._really_loaded) - - def test_client_is_abstract(self): - acl = self._makeOne() - self.assertRaises(NotImplementedError, lambda: acl.client) - - def test_reset(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - acl.entity(TYPE, ID) - acl.reset() - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - - def test___iter___empty_eager(self): - acl = self._makeOne() - acl.loaded = True - self.assertEqual(list(acl), []) - - def test___iter___empty_lazy(self): - acl = self._makeOne() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertEqual(list(acl), []) - self.assertTrue(acl.loaded) - - def test___iter___non_empty_no_roles(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertEqual(list(acl), []) - - def test___iter___non_empty_w_roles(self): - TYPE = 'type' - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - entity.grant(ROLE) - self.assertEqual(list(acl), - [{'entity': '%s-%s' % (TYPE, ID), 'role': ROLE}]) - - def test___iter___non_empty_w_empty_role(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - entity.grant('') - self.assertEqual(list(acl), []) - - def test_entity_from_dict_allUsers_eager(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity_from_dict({'entity': 'allUsers', 'role': ROLE}) - self.assertEqual(entity.type, 'allUsers') - self.assertEqual(entity.identifier, None) - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), - [{'entity': 'allUsers', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_allAuthenticatedUsers(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity_from_dict({'entity': 'allAuthenticatedUsers', - 'role': ROLE}) - self.assertEqual(entity.type, 'allAuthenticatedUsers') - self.assertEqual(entity.identifier, None) - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), - [{'entity': 'allAuthenticatedUsers', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_string_w_hyphen(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity_from_dict({'entity': 'type-id', 'role': ROLE}) - self.assertEqual(entity.type, 'type') - self.assertEqual(entity.identifier, 'id') - self.assertEqual(entity.get_roles(), set([ROLE])) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_from_dict_string_wo_hyphen(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - self.assertRaises(ValueError, - acl.entity_from_dict, - {'entity': 'bogus', 'role': ROLE}) - self.assertEqual(list(acl.get_entities()), []) - - def test_has_entity_miss_str_eager(self): - acl = self._makeOne() - acl.loaded = True - self.assertFalse(acl.has_entity('nonesuch')) - - def test_has_entity_miss_str_lazy(self): - acl = self._makeOne() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertFalse(acl.has_entity('nonesuch')) - self.assertTrue(acl.loaded) - - def test_has_entity_miss_entity(self): - from gcloud.storage.acl import _ACLEntity - TYPE = 'type' - ID = 'id' - entity = _ACLEntity(TYPE, ID) - acl = self._makeOne() - acl.loaded = True - self.assertFalse(acl.has_entity(entity)) - - def test_has_entity_hit_str(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID))) - - def test_has_entity_hit_entity(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity(entity)) - - def test_get_entity_miss_str_no_default_eager(self): - acl = self._makeOne() - acl.loaded = True - self.assertEqual(acl.get_entity('nonesuch'), None) - - def test_get_entity_miss_str_no_default_lazy(self): - acl = self._makeOne() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertEqual(acl.get_entity('nonesuch'), None) - self.assertTrue(acl.loaded) - - def test_get_entity_miss_entity_no_default(self): - from gcloud.storage.acl import _ACLEntity - TYPE = 'type' - ID = 'id' - entity = _ACLEntity(TYPE, ID) - acl = self._makeOne() - acl.loaded = True - self.assertEqual(acl.get_entity(entity), None) - - def test_get_entity_miss_str_w_default(self): - DEFAULT = object() - acl = self._makeOne() - acl.loaded = True - self.assertTrue(acl.get_entity('nonesuch', DEFAULT) is DEFAULT) - - def test_get_entity_miss_entity_w_default(self): - from gcloud.storage.acl import _ACLEntity - DEFAULT = object() - TYPE = 'type' - ID = 'id' - entity = _ACLEntity(TYPE, ID) - acl = self._makeOne() - acl.loaded = True - self.assertTrue(acl.get_entity(entity, DEFAULT) is DEFAULT) - - def test_get_entity_hit_str(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID))) - - def test_get_entity_hit_entity(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.has_entity(entity)) - - def test_add_entity_miss_eager(self): - from gcloud.storage.acl import _ACLEntity - TYPE = 'type' - ID = 'id' - ROLE = 'role' - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._makeOne() - acl.loaded = True - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_add_entity_miss_lazy(self): - from gcloud.storage.acl import _ACLEntity - TYPE = 'type' - ID = 'id' - ROLE = 'role' - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._makeOne() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - self.assertTrue(acl.loaded) - - def test_add_entity_hit(self): - from gcloud.storage.acl import _ACLEntity - TYPE = 'type' - ID = 'id' - ENTITY_VAL = '%s-%s' % (TYPE, ID) - ROLE = 'role' - entity = _ACLEntity(TYPE, ID) - entity.grant(ROLE) - acl = self._makeOne() - acl.loaded = True - before = acl.entity(TYPE, ID) - acl.add_entity(entity) - self.assertTrue(acl.loaded) - self.assertFalse(acl.get_entity(ENTITY_VAL) is before) - self.assertTrue(acl.get_entity(ENTITY_VAL) is entity) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_miss(self): - TYPE = 'type' - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertTrue(acl.loaded) - entity.grant(ROLE) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_entity_hit(self): - TYPE = 'type' - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - before = acl.entity(TYPE, ID) - before.grant(ROLE) - entity = acl.entity(TYPE, ID) - self.assertTrue(entity is before) - self.assertEqual(list(acl), - [{'entity': 'type-id', 'role': ROLE}]) - self.assertEqual(list(acl.get_entities()), [entity]) - - def test_user(self): - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.user(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, 'user') - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), - [{'entity': 'user-%s' % ID, 'role': ROLE}]) - - def test_group(self): - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.group(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, 'group') - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), - [{'entity': 'group-%s' % ID, 'role': ROLE}]) - - def test_domain(self): - ID = 'id' - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.domain(ID) - entity.grant(ROLE) - self.assertEqual(entity.type, 'domain') - self.assertEqual(entity.identifier, ID) - self.assertEqual(list(acl), - [{'entity': 'domain-%s' % ID, 'role': ROLE}]) - - def test_all(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.all() - entity.grant(ROLE) - self.assertEqual(entity.type, 'allUsers') - self.assertEqual(entity.identifier, None) - self.assertEqual(list(acl), - [{'entity': 'allUsers', 'role': ROLE}]) - - def test_all_authenticated(self): - ROLE = 'role' - acl = self._makeOne() - acl.loaded = True - entity = acl.all_authenticated() - entity.grant(ROLE) - self.assertEqual(entity.type, 'allAuthenticatedUsers') - self.assertEqual(entity.identifier, None) - self.assertEqual(list(acl), - [{'entity': 'allAuthenticatedUsers', 'role': ROLE}]) - - def test_get_entities_empty_eager(self): - acl = self._makeOne() - acl.loaded = True - self.assertEqual(acl.get_entities(), []) - - def test_get_entities_empty_lazy(self): - acl = self._makeOne() - - def _reload(): - acl.loaded = True - - acl.reload = _reload - self.assertEqual(acl.get_entities(), []) - self.assertTrue(acl.loaded) - - def test_get_entities_nonempty(self): - TYPE = 'type' - ID = 'id' - acl = self._makeOne() - acl.loaded = True - entity = acl.entity(TYPE, ID) - self.assertEqual(acl.get_entities(), [entity]) - - def test_reload_missing(self): - # https://github.com/GoogleCloudPlatform/gcloud-python/issues/652 - ROLE = 'role' - connection = _Connection({}) - client = _Client(connection) - acl = self._makeOne() - acl.reload_path = '/testing/acl' - acl.loaded = True - acl.entity('allUsers', ROLE) - acl.reload(client=client) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/testing/acl') - - def test_reload_empty_result_clears_local(self): - ROLE = 'role' - connection = _Connection({'items': []}) - client = _Client(connection) - acl = self._makeOne() - acl.reload_path = '/testing/acl' - acl.loaded = True - acl.entity('allUsers', ROLE) - acl.reload(client=client) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/testing/acl') - - def test_reload_nonempty_result(self): - ROLE = 'role' - connection = _Connection( - {'items': [{'entity': 'allUsers', 'role': ROLE}]}) - client = _Client(connection) - acl = self._makeOne() - acl.reload_path = '/testing/acl' - acl.loaded = True - acl.reload(client=client) - self.assertTrue(acl.loaded) - self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'GET') - self.assertEqual(kw[0]['path'], '/testing/acl') - - def test_save_none_set_none_passed(self): - connection = _Connection() - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.save(client=client) - kw = connection._requested - self.assertEqual(len(kw), 0) - - def test_save_existing_missing_none_passed(self): - connection = _Connection({}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.save(client=client) - self.assertEqual(list(acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': []}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - def test_save_no_acl(self): - ROLE = 'role' - AFTER = [{'entity': 'allUsers', 'role': ROLE}] - connection = _Connection({'acl': AFTER}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.entity('allUsers').grant(ROLE) - acl.save(client=client) - self.assertEqual(list(acl), AFTER) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': AFTER}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - def test_save_w_acl(self): - ROLE1 = 'role1' - ROLE2 = 'role2' - STICKY = {'entity': 'allUsers', 'role': ROLE2} - new_acl = [{'entity': 'allUsers', 'role': ROLE1}] - connection = _Connection({'acl': [STICKY] + new_acl}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.save(new_acl, client=client) - entries = list(acl) - self.assertEqual(len(entries), 2) - self.assertTrue(STICKY in entries) - self.assertTrue(new_acl[0] in entries) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': new_acl}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - def test_save_prefefined_invalid(self): - connection = _Connection() - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - with self.assertRaises(ValueError): - acl.save_predefined('bogus', client=client) - - def test_save_predefined_valid(self): - PREDEFINED = 'private' - connection = _Connection({'acl': []}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.save_predefined(PREDEFINED, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': []}) - self.assertEqual(kw[0]['query_params'], - {'projection': 'full', 'predefinedAcl': PREDEFINED}) - - def test_save_predefined_w_XML_alias(self): - PREDEFINED_XML = 'project-private' - PREDEFINED_JSON = 'projectPrivate' - connection = _Connection({'acl': []}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.save_predefined(PREDEFINED_XML, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': []}) - self.assertEqual(kw[0]['query_params'], - {'projection': 'full', - 'predefinedAcl': PREDEFINED_JSON}) - - def test_save_predefined_valid_w_alternate_query_param(self): - # Cover case where subclass overrides _PREDEFINED_QUERY_PARAM - PREDEFINED = 'publicRead' - connection = _Connection({'acl': []}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl._PREDEFINED_QUERY_PARAM = 'alternate' - acl.save_predefined(PREDEFINED, client=client) - entries = list(acl) - self.assertEqual(len(entries), 0) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': []}) - self.assertEqual(kw[0]['query_params'], - {'projection': 'full', 'alternate': PREDEFINED}) - - def test_clear(self): - ROLE1 = 'role1' - ROLE2 = 'role2' - STICKY = {'entity': 'allUsers', 'role': ROLE2} - connection = _Connection({'acl': [STICKY]}) - client = _Client(connection) - acl = self._makeOne() - acl.save_path = '/testing' - acl.loaded = True - acl.entity('allUsers', ROLE1) - acl.clear(client=client) - self.assertEqual(list(acl), [STICKY]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/testing') - self.assertEqual(kw[0]['data'], {'acl': []}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - -class Test_BucketACL(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.acl import BucketACL - return BucketACL - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - NAME = 'name' - bucket = _Bucket(NAME) - acl = self._makeOne(bucket) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertTrue(acl.bucket is bucket) - self.assertEqual(acl.reload_path, '/b/%s/acl' % NAME) - self.assertEqual(acl.save_path, '/b/%s' % NAME) - - -class Test_DefaultObjectACL(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.acl import DefaultObjectACL - return DefaultObjectACL - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - NAME = 'name' - bucket = _Bucket(NAME) - acl = self._makeOne(bucket) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertTrue(acl.bucket is bucket) - self.assertEqual(acl.reload_path, '/b/%s/defaultObjectAcl' % NAME) - self.assertEqual(acl.save_path, '/b/%s' % NAME) - - -class Test_ObjectACL(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.acl import ObjectACL - return ObjectACL - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - NAME = 'name' - BLOB_NAME = 'blob-name' - bucket = _Bucket(NAME) - blob = _Blob(bucket, BLOB_NAME) - acl = self._makeOne(blob) - self.assertEqual(acl.entities, {}) - self.assertFalse(acl.loaded) - self.assertTrue(acl.blob is blob) - self.assertEqual(acl.reload_path, '/b/%s/o/%s/acl' % (NAME, BLOB_NAME)) - self.assertEqual(acl.save_path, '/b/%s/o/%s' % (NAME, BLOB_NAME)) - - -class _Blob(object): - - def __init__(self, bucket, blob): - self.bucket = bucket - self.blob = blob - - @property - def path(self): - return '%s/o/%s' % (self.bucket.path, self.blob) - - -class _Bucket(object): - - def __init__(self, name): - self.name = name - - @property - def path(self): - return '/b/%s' % self.name - - -class _Connection(object): - _delete_ok = False - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - self._deleted = [] - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: # pragma: NO COVER - raise NotFound('miss') - else: - return response - - -class _Client(object): - - def __init__(self, connection): - self.connection = connection diff --git a/gcloud/storage/test_batch.py b/gcloud/storage/test_batch.py deleted file mode 100644 index f50ba4fe153e..000000000000 --- a/gcloud/storage/test_batch.py +++ /dev/null @@ -1,625 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestMIMEApplicationHTTP(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.batch import MIMEApplicationHTTP - return MIMEApplicationHTTP - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_body_None(self): - METHOD = 'DELETE' - PATH = '/path/to/api' - LINES = [ - "DELETE /path/to/api HTTP/1.1", - "", - ] - mah = self._makeOne(METHOD, PATH, {}, None) - self.assertEqual(mah.get_content_type(), 'application/http') - self.assertEqual(mah.get_payload().splitlines(), LINES) - - def test_ctor_body_str(self): - METHOD = 'GET' - PATH = '/path/to/api' - BODY = 'ABC' - HEADERS = {'Content-Length': len(BODY), 'Content-Type': 'text/plain'} - LINES = [ - "GET /path/to/api HTTP/1.1", - "Content-Length: 3", - "Content-Type: text/plain", - "", - "ABC", - ] - mah = self._makeOne(METHOD, PATH, HEADERS, BODY) - self.assertEqual(mah.get_payload().splitlines(), LINES) - - def test_ctor_body_dict(self): - METHOD = 'GET' - PATH = '/path/to/api' - BODY = {'foo': 'bar'} - HEADERS = {} - LINES = [ - 'GET /path/to/api HTTP/1.1', - 'Content-Length: 14', - 'Content-Type: application/json', - '', - '{"foo": "bar"}', - ] - mah = self._makeOne(METHOD, PATH, HEADERS, BODY) - self.assertEqual(mah.get_payload().splitlines(), LINES) - - -class TestBatch(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.batch import Batch - return Batch - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - http = _HTTP() - connection = _Connection(http=http) - client = _Client(connection) - batch = self._makeOne(client) - self.assertTrue(batch._client is client) - self.assertEqual(len(batch._requests), 0) - self.assertEqual(len(batch._target_objects), 0) - - def test_current(self): - from gcloud.storage.client import Client - project = 'PROJECT' - credentials = _Credentials() - client = Client(project=project, credentials=credentials) - batch1 = self._makeOne(client) - self.assertTrue(batch1.current() is None) - - client._push_batch(batch1) - self.assertTrue(batch1.current() is batch1) - - batch2 = self._makeOne(client) - client._push_batch(batch2) - self.assertTrue(batch1.current() is batch2) - - def test__make_request_GET_normal(self): - from gcloud.storage.batch import _FutureDict - URL = 'http://example.com/api' - expected = _Response() - http = _HTTP((expected, '')) - connection = _Connection(http=http) - batch = self._makeOne(connection) - target = _MockObject() - response, content = batch._make_request('GET', URL, - target_object=target) - self.assertEqual(response.status, 204) - self.assertTrue(isinstance(content, _FutureDict)) - self.assertTrue(target._properties is content) - self.assertEqual(http._requests, []) - EXPECTED_HEADERS = [ - ('Accept-Encoding', 'gzip'), - ('Content-Length', '0'), - ] - solo_request, = batch._requests - self.assertEqual(solo_request[0], 'GET') - self.assertEqual(solo_request[1], URL) - headers = solo_request[2] - for key, value in EXPECTED_HEADERS: - self.assertEqual(headers[key], value) - self.assertEqual(solo_request[3], None) - - def test__make_request_POST_normal(self): - from gcloud.storage.batch import _FutureDict - URL = 'http://example.com/api' - http = _HTTP() # no requests expected - connection = _Connection(http=http) - batch = self._makeOne(connection) - target = _MockObject() - response, content = batch._make_request('POST', URL, data={'foo': 1}, - target_object=target) - self.assertEqual(response.status, 204) - self.assertTrue(isinstance(content, _FutureDict)) - self.assertTrue(target._properties is content) - self.assertEqual(http._requests, []) - EXPECTED_HEADERS = [ - ('Accept-Encoding', 'gzip'), - ('Content-Length', '10'), - ] - solo_request, = batch._requests - self.assertEqual(solo_request[0], 'POST') - self.assertEqual(solo_request[1], URL) - headers = solo_request[2] - for key, value in EXPECTED_HEADERS: - self.assertEqual(headers[key], value) - self.assertEqual(solo_request[3], {'foo': 1}) - - def test__make_request_PATCH_normal(self): - from gcloud.storage.batch import _FutureDict - URL = 'http://example.com/api' - http = _HTTP() # no requests expected - connection = _Connection(http=http) - batch = self._makeOne(connection) - target = _MockObject() - response, content = batch._make_request('PATCH', URL, data={'foo': 1}, - target_object=target) - self.assertEqual(response.status, 204) - self.assertTrue(isinstance(content, _FutureDict)) - self.assertTrue(target._properties is content) - self.assertEqual(http._requests, []) - EXPECTED_HEADERS = [ - ('Accept-Encoding', 'gzip'), - ('Content-Length', '10'), - ] - solo_request, = batch._requests - self.assertEqual(solo_request[0], 'PATCH') - self.assertEqual(solo_request[1], URL) - headers = solo_request[2] - for key, value in EXPECTED_HEADERS: - self.assertEqual(headers[key], value) - self.assertEqual(solo_request[3], {'foo': 1}) - - def test__make_request_DELETE_normal(self): - from gcloud.storage.batch import _FutureDict - URL = 'http://example.com/api' - http = _HTTP() # no requests expected - connection = _Connection(http=http) - batch = self._makeOne(connection) - target = _MockObject() - response, content = batch._make_request('DELETE', URL, - target_object=target) - self.assertEqual(response.status, 204) - self.assertTrue(isinstance(content, _FutureDict)) - self.assertTrue(target._properties is content) - self.assertEqual(http._requests, []) - EXPECTED_HEADERS = [ - ('Accept-Encoding', 'gzip'), - ('Content-Length', '0'), - ] - solo_request, = batch._requests - self.assertEqual(solo_request[0], 'DELETE') - self.assertEqual(solo_request[1], URL) - headers = solo_request[2] - for key, value in EXPECTED_HEADERS: - self.assertEqual(headers[key], value) - self.assertEqual(solo_request[3], None) - - def test__make_request_POST_too_many_requests(self): - URL = 'http://example.com/api' - http = _HTTP() # no requests expected - connection = _Connection(http=http) - batch = self._makeOne(connection) - batch._MAX_BATCH_SIZE = 1 - batch._requests.append(('POST', URL, {}, {'bar': 2})) - self.assertRaises(ValueError, - batch._make_request, 'POST', URL, data={'foo': 1}) - self.assertTrue(connection.http is http) - - def test_finish_empty(self): - http = _HTTP() # no requests expected - connection = _Connection(http=http) - batch = self._makeOne(connection) - self.assertRaises(ValueError, batch.finish) - self.assertTrue(connection.http is http) - - def _check_subrequest_no_payload(self, chunk, method, url): - lines = chunk.splitlines() - # blank + 2 headers + blank + request + blank + blank - self.assertEqual(len(lines), 7) - self.assertEqual(lines[0], '') - self.assertEqual(lines[1], 'Content-Type: application/http') - self.assertEqual(lines[2], 'MIME-Version: 1.0') - self.assertEqual(lines[3], '') - self.assertEqual(lines[4], '%s %s HTTP/1.1' % (method, url)) - self.assertEqual(lines[5], '') - self.assertEqual(lines[6], '') - - def _check_subrequest_payload(self, chunk, method, url, payload): - import json - lines = chunk.splitlines() - # blank + 2 headers + blank + request + 2 headers + blank + body - payload_str = json.dumps(payload) - self.assertEqual(lines[0], '') - self.assertEqual(lines[1], 'Content-Type: application/http') - self.assertEqual(lines[2], 'MIME-Version: 1.0') - self.assertEqual(lines[3], '') - self.assertEqual(lines[4], '%s %s HTTP/1.1' % (method, url)) - if method == 'GET': - self.assertEqual(len(lines), 7) - self.assertEqual(lines[5], '') - self.assertEqual(lines[6], '') - else: - self.assertEqual(len(lines), 9) - self.assertEqual(lines[5], 'Content-Length: %d' % len(payload_str)) - self.assertEqual(lines[6], 'Content-Type: application/json') - self.assertEqual(lines[7], '') - self.assertEqual(json.loads(lines[8]), payload) - - def test_finish_nonempty(self): - import httplib2 - URL = 'http://api.example.com/other_api' - expected = _Response() - expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="' - http = _HTTP((expected, _THREE_PART_MIME_RESPONSE)) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._makeOne(client) - batch.API_BASE_URL = 'http://api.example.com' - batch._do_request('POST', URL, {}, {'foo': 1, 'bar': 2}, None) - batch._do_request('PATCH', URL, {}, {'bar': 3}, None) - batch._do_request('DELETE', URL, {}, None, None) - result = batch.finish() - self.assertEqual(len(result), len(batch._requests)) - response0 = httplib2.Response({ - 'content-length': '20', - 'content-type': 'application/json; charset=UTF-8', - 'status': '200', - }) - self.assertEqual(result[0], (response0, {'foo': 1, 'bar': 2})) - response1 = response0 - self.assertEqual(result[1], (response1, {u'foo': 1, u'bar': 3})) - response2 = httplib2.Response({ - 'content-length': '0', - 'status': '204', - }) - self.assertEqual(result[2], (response2, '')) - self.assertEqual(len(http._requests), 1) - method, uri, headers, body = http._requests[0] - self.assertEqual(method, 'POST') - self.assertEqual(uri, 'http://api.example.com/batch') - self.assertEqual(len(headers), 2) - ctype, boundary = [x.strip() - for x in headers['Content-Type'].split(';')] - self.assertEqual(ctype, 'multipart/mixed') - self.assertTrue(boundary.startswith('boundary="==')) - self.assertTrue(boundary.endswith('=="')) - self.assertEqual(headers['MIME-Version'], '1.0') - - divider = '--' + boundary[len('boundary="'):-1] - chunks = body.split(divider)[1:-1] # discard prolog / epilog - self.assertEqual(len(chunks), 3) - - self._check_subrequest_payload(chunks[0], 'POST', URL, - {'foo': 1, 'bar': 2}) - - self._check_subrequest_payload(chunks[1], 'PATCH', URL, {'bar': 3}) - - self._check_subrequest_no_payload(chunks[2], 'DELETE', URL) - - def test_finish_responses_mismatch(self): - URL = 'http://api.example.com/other_api' - expected = _Response() - expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="' - http = _HTTP((expected, _TWO_PART_MIME_RESPONSE_WITH_FAIL)) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._makeOne(client) - batch.API_BASE_URL = 'http://api.example.com' - batch._requests.append(('GET', URL, {}, None)) - self.assertRaises(ValueError, batch.finish) - - def test_finish_nonempty_with_status_failure(self): - from gcloud.exceptions import NotFound - URL = 'http://api.example.com/other_api' - expected = _Response() - expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="' - http = _HTTP((expected, _TWO_PART_MIME_RESPONSE_WITH_FAIL)) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._makeOne(client) - batch.API_BASE_URL = 'http://api.example.com' - target1 = _MockObject() - target2 = _MockObject() - batch._do_request('GET', URL, {}, None, target1) - batch._do_request('GET', URL, {}, None, target2) - # Make sure futures are not populated. - self.assertEqual([future for future in batch._target_objects], - [target1, target2]) - target2_future_before = target2._properties - self.assertRaises(NotFound, batch.finish) - self.assertEqual(target1._properties, - {'foo': 1, 'bar': 2}) - self.assertTrue(target2._properties is target2_future_before) - - self.assertEqual(len(http._requests), 1) - method, uri, headers, body = http._requests[0] - self.assertEqual(method, 'POST') - self.assertEqual(uri, 'http://api.example.com/batch') - self.assertEqual(len(headers), 2) - ctype, boundary = [x.strip() - for x in headers['Content-Type'].split(';')] - self.assertEqual(ctype, 'multipart/mixed') - self.assertTrue(boundary.startswith('boundary="==')) - self.assertTrue(boundary.endswith('=="')) - self.assertEqual(headers['MIME-Version'], '1.0') - - divider = '--' + boundary[len('boundary="'):-1] - chunks = body.split(divider)[1:-1] # discard prolog / epilog - self.assertEqual(len(chunks), 2) - - self._check_subrequest_payload(chunks[0], 'GET', URL, {}) - self._check_subrequest_payload(chunks[1], 'GET', URL, {}) - - def test_finish_nonempty_non_multipart_response(self): - URL = 'http://api.example.com/other_api' - expected = _Response() - expected['content-type'] = 'text/plain' - http = _HTTP((expected, 'NOT A MIME_RESPONSE')) - connection = _Connection(http=http) - client = _Client(connection) - batch = self._makeOne(client) - batch._requests.append(('POST', URL, {}, {'foo': 1, 'bar': 2})) - batch._requests.append(('PATCH', URL, {}, {'bar': 3})) - batch._requests.append(('DELETE', URL, {}, None)) - self.assertRaises(ValueError, batch.finish) - - def test_as_context_mgr_wo_error(self): - from gcloud.storage.client import Client - URL = 'http://example.com/api' - expected = _Response() - expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="' - http = _HTTP((expected, _THREE_PART_MIME_RESPONSE)) - project = 'PROJECT' - credentials = _Credentials() - client = Client(project=project, credentials=credentials) - client._connection._http = http - - self.assertEqual(list(client._batch_stack), []) - - target1 = _MockObject() - target2 = _MockObject() - target3 = _MockObject() - with self._makeOne(client) as batch: - self.assertEqual(list(client._batch_stack), [batch]) - batch._make_request('POST', URL, {'foo': 1, 'bar': 2}, - target_object=target1) - batch._make_request('PATCH', URL, {'bar': 3}, - target_object=target2) - batch._make_request('DELETE', URL, target_object=target3) - - self.assertEqual(list(client._batch_stack), []) - self.assertEqual(len(batch._requests), 3) - self.assertEqual(batch._requests[0][0], 'POST') - self.assertEqual(batch._requests[1][0], 'PATCH') - self.assertEqual(batch._requests[2][0], 'DELETE') - self.assertEqual(batch._target_objects, [target1, target2, target3]) - self.assertEqual(target1._properties, - {'foo': 1, 'bar': 2}) - self.assertEqual(target2._properties, - {'foo': 1, 'bar': 3}) - self.assertEqual(target3._properties, '') - - def test_as_context_mgr_w_error(self): - from gcloud.storage.batch import _FutureDict - from gcloud.storage.client import Client - URL = 'http://example.com/api' - http = _HTTP() - connection = _Connection(http=http) - project = 'PROJECT' - credentials = _Credentials() - client = Client(project=project, credentials=credentials) - client._connection = connection - - self.assertEqual(list(client._batch_stack), []) - - target1 = _MockObject() - target2 = _MockObject() - target3 = _MockObject() - try: - with self._makeOne(client) as batch: - self.assertEqual(list(client._batch_stack), [batch]) - batch._make_request('POST', URL, {'foo': 1, 'bar': 2}, - target_object=target1) - batch._make_request('PATCH', URL, {'bar': 3}, - target_object=target2) - batch._make_request('DELETE', URL, target_object=target3) - raise ValueError() - except ValueError: - pass - - self.assertEqual(list(client._batch_stack), []) - self.assertEqual(len(http._requests), 0) - self.assertEqual(len(batch._requests), 3) - self.assertEqual(batch._target_objects, [target1, target2, target3]) - # Since the context manager fails, finish will not get called and - # the _properties will still be futures. - self.assertTrue(isinstance(target1._properties, _FutureDict)) - self.assertTrue(isinstance(target2._properties, _FutureDict)) - self.assertTrue(isinstance(target3._properties, _FutureDict)) - - -class Test__unpack_batch_response(unittest2.TestCase): - - def _callFUT(self, response, content): - from gcloud.storage.batch import _unpack_batch_response - return _unpack_batch_response(response, content) - - def _unpack_helper(self, response, content): - import httplib2 - result = list(self._callFUT(response, content)) - self.assertEqual(len(result), 3) - response0 = httplib2.Response({ - 'content-length': '20', - 'content-type': 'application/json; charset=UTF-8', - 'status': '200', - }) - self.assertEqual(result[0], (response0, {u'bar': 2, u'foo': 1})) - response1 = response0 - self.assertEqual(result[1], (response1, {u'foo': 1, u'bar': 3})) - response2 = httplib2.Response({ - 'content-length': '0', - 'status': '204', - }) - self.assertEqual(result[2], (response2, '')) - - def test_bytes(self): - RESPONSE = {'content-type': b'multipart/mixed; boundary="DEADBEEF="'} - CONTENT = _THREE_PART_MIME_RESPONSE - self._unpack_helper(RESPONSE, CONTENT) - - def test_unicode(self): - RESPONSE = {'content-type': u'multipart/mixed; boundary="DEADBEEF="'} - CONTENT = _THREE_PART_MIME_RESPONSE.decode('utf-8') - self._unpack_helper(RESPONSE, CONTENT) - - -_TWO_PART_MIME_RESPONSE_WITH_FAIL = b"""\ ---DEADBEEF= -Content-Type: application/http -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 2} - ---DEADBEEF= -Content-Type: application/http -Content-ID: - -HTTP/1.1 404 Not Found -Content-Type: application/json; charset=UTF-8 -Content-Length: 35 - -{"error": {"message": "Not Found"}} - ---DEADBEEF=-- -""" - -_THREE_PART_MIME_RESPONSE = b"""\ ---DEADBEEF= -Content-Type: application/http -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 2} - ---DEADBEEF= -Content-Type: application/http -Content-ID: - -HTTP/1.1 200 OK -Content-Type: application/json; charset=UTF-8 -Content-Length: 20 - -{"foo": 1, "bar": 3} - ---DEADBEEF= -Content-Type: application/http -Content-ID: - -HTTP/1.1 204 No Content -Content-Length: 0 - ---DEADBEEF=-- -""" - - -class Test__FutureDict(unittest2.TestCase): - - def _makeOne(self, *args, **kw): - from gcloud.storage.batch import _FutureDict - return _FutureDict(*args, **kw) - - def test_get(self): - future = self._makeOne() - self.assertRaises(KeyError, future.get, None) - - def test___getitem__(self): - future = self._makeOne() - value = orig_value = object() - with self.assertRaises(KeyError): - value = future[None] - self.assertTrue(value is orig_value) - - def test___setitem__(self): - future = self._makeOne() - with self.assertRaises(KeyError): - future[None] = None - - -class _Connection(object): - - project = 'TESTING' - - def __init__(self, **kw): - self.__dict__.update(kw) - - def build_api_url(self, path, **_): # pragma: NO COVER - return 'http://api.example.com%s' % path - - def _make_request(self, method, url, data=None, content_type=None, - headers=None): - if content_type is not None: # pragma: NO COVER - headers['Content-Type'] = content_type - - return self.http.request(uri=url, method=method, - headers=headers, body=data) - - def api_request(self, method, path, query_params=None, - data=None, content_type=None, - api_base_url=None, api_version=None, - expect_json=True): # pragma: NO COVER - pass - - -class _Response(dict): - - def __init__(self, status=200, **kw): - self.status = status - super(_Response, self).__init__(**kw) - - -class _HTTP(object): - - def __init__(self, *responses): - self._requests = [] - self._responses = list(responses) - - def request(self, uri, method, headers, body): - self._requests.append((method, uri, headers, body)) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _MockObject(object): - pass - - -class _Client(object): - - def __init__(self, connection): - self._connection = connection - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self diff --git a/gcloud/storage/test_blob.py b/gcloud/storage/test_blob.py deleted file mode 100644 index 2d7778357a22..000000000000 --- a/gcloud/storage/test_blob.py +++ /dev/null @@ -1,1185 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test_Blob(unittest2.TestCase): - - def _makeOne(self, *args, **kw): - from gcloud.storage.blob import Blob - properties = kw.pop('properties', None) - blob = Blob(*args, **kw) - blob._properties = properties or {} - return blob - - def test_ctor(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - properties = {'key': 'value'} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertTrue(blob.bucket is bucket) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob._properties, properties) - self.assertFalse(blob._acl.loaded) - self.assertTrue(blob._acl.blob is blob) - - def test_chunk_size_ctor(self): - from gcloud.storage.blob import Blob - BLOB_NAME = 'blob-name' - BUCKET = object() - chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE - blob = self._makeOne(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size) - self.assertEqual(blob._chunk_size, chunk_size) - - def test_chunk_size_getter(self): - BLOB_NAME = 'blob-name' - BUCKET = object() - blob = self._makeOne(BLOB_NAME, bucket=BUCKET) - self.assertEqual(blob.chunk_size, None) - VALUE = object() - blob._chunk_size = VALUE - self.assertTrue(blob.chunk_size is VALUE) - - def test_chunk_size_setter(self): - BLOB_NAME = 'blob-name' - BUCKET = object() - blob = self._makeOne(BLOB_NAME, bucket=BUCKET) - self.assertEqual(blob._chunk_size, None) - blob._CHUNK_SIZE_MULTIPLE = 10 - blob.chunk_size = 20 - self.assertEqual(blob._chunk_size, 20) - - def test_chunk_size_setter_bad_value(self): - BLOB_NAME = 'blob-name' - BUCKET = object() - blob = self._makeOne(BLOB_NAME, bucket=BUCKET) - self.assertEqual(blob._chunk_size, None) - blob._CHUNK_SIZE_MULTIPLE = 10 - with self.assertRaises(ValueError): - blob.chunk_size = 11 - - def test_acl_property(self): - from gcloud.storage.acl import ObjectACL - FAKE_BUCKET = _Bucket() - blob = self._makeOne(None, bucket=FAKE_BUCKET) - acl = blob.acl - self.assertTrue(isinstance(acl, ObjectACL)) - self.assertTrue(acl is blob._acl) - - def test_path_no_bucket(self): - FAKE_BUCKET = object() - NAME = 'blob-name' - blob = self._makeOne(NAME, bucket=FAKE_BUCKET) - self.assertRaises(AttributeError, getattr, blob, 'path') - - def test_path_no_name(self): - bucket = _Bucket() - blob = self._makeOne(None, bucket=bucket) - self.assertRaises(ValueError, getattr, blob, 'path') - - def test_path_normal(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME) - - def test_path_w_slash_in_name(self): - BLOB_NAME = 'parent/child' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.path, '/b/name/o/parent%2Fchild') - - def test_public_url(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.public_url, - 'https://storage.googleapis.com/name/%s' % - BLOB_NAME) - - def test_public_url_w_slash_in_name(self): - BLOB_NAME = 'parent/child' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual( - blob.public_url, - 'https://storage.googleapis.com/name/parent%2Fchild') - - def _basic_generate_signed_url_helper(self, credentials=None): - from gcloud._testing import _Monkey - from gcloud.storage import blob as MUT - - BLOB_NAME = 'blob-name' - EXPIRATION = '2014-10-16T20:34:37.000Z' - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37.000Z') - - SIGNER = _Signer() - with _Monkey(MUT, generate_signed_url=SIGNER): - signed_uri = blob.generate_signed_url(EXPIRATION, - credentials=credentials) - self.assertEqual(signed_uri, URI) - - PATH = '/name/%s' % (BLOB_NAME,) - if credentials is None: - EXPECTED_ARGS = (_Connection.credentials,) - else: - EXPECTED_ARGS = (credentials,) - EXPECTED_KWARGS = { - 'api_access_endpoint': 'https://storage.googleapis.com', - 'expiration': EXPIRATION, - 'method': 'GET', - 'resource': PATH, - 'content_type': None, - 'response_type': None, - 'response_disposition': None, - 'generation': None, - } - self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)]) - - def test_generate_signed_url_w_default_method(self): - self._basic_generate_signed_url_helper() - - def test_generate_signed_url_w_content_type(self): - from gcloud._testing import _Monkey - from gcloud.storage import blob as MUT - - BLOB_NAME = 'blob-name' - EXPIRATION = '2014-10-16T20:34:37.000Z' - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37.000Z') - - SIGNER = _Signer() - CONTENT_TYPE = "text/html" - with _Monkey(MUT, generate_signed_url=SIGNER): - signed_url = blob.generate_signed_url(EXPIRATION, - content_type=CONTENT_TYPE) - self.assertEqual(signed_url, URI) - - PATH = '/name/%s' % (BLOB_NAME,) - EXPECTED_ARGS = (_Connection.credentials,) - EXPECTED_KWARGS = { - 'api_access_endpoint': 'https://storage.googleapis.com', - 'expiration': EXPIRATION, - 'method': 'GET', - 'resource': PATH, - 'content_type': CONTENT_TYPE, - 'response_type': None, - 'response_disposition': None, - 'generation': None, - } - self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)]) - - def test_generate_signed_url_w_credentials(self): - credentials = object() - self._basic_generate_signed_url_helper(credentials=credentials) - - def test_generate_signed_url_w_slash_in_name(self): - from gcloud._testing import _Monkey - from gcloud.storage import blob as MUT - - BLOB_NAME = 'parent/child' - EXPIRATION = '2014-10-16T20:34:37.000Z' - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37.000Z') - - SIGNER = _Signer() - with _Monkey(MUT, generate_signed_url=SIGNER): - signed_url = blob.generate_signed_url(EXPIRATION) - self.assertEqual(signed_url, URI) - - EXPECTED_ARGS = (_Connection.credentials,) - EXPECTED_KWARGS = { - 'api_access_endpoint': 'https://storage.googleapis.com', - 'expiration': EXPIRATION, - 'method': 'GET', - 'resource': '/name/parent%2Fchild', - 'content_type': None, - 'response_type': None, - 'response_disposition': None, - 'generation': None, - } - self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)]) - - def test_generate_signed_url_w_method_arg(self): - from gcloud._testing import _Monkey - from gcloud.storage import blob as MUT - - BLOB_NAME = 'blob-name' - EXPIRATION = '2014-10-16T20:34:37.000Z' - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' - '&Expiration=2014-10-16T20:34:37.000Z') - - SIGNER = _Signer() - with _Monkey(MUT, generate_signed_url=SIGNER): - signed_uri = blob.generate_signed_url(EXPIRATION, method='POST') - self.assertEqual(signed_uri, URI) - - PATH = '/name/%s' % (BLOB_NAME,) - EXPECTED_ARGS = (_Connection.credentials,) - EXPECTED_KWARGS = { - 'api_access_endpoint': 'https://storage.googleapis.com', - 'expiration': EXPIRATION, - 'method': 'POST', - 'resource': PATH, - 'content_type': None, - 'response_type': None, - 'response_disposition': None, - 'generation': None, - } - self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)]) - - def test_exists_miss(self): - from six.moves.http_client import NOT_FOUND - NONESUCH = 'nonesuch' - not_found_response = ({'status': NOT_FOUND}, b'') - connection = _Connection(not_found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(NONESUCH, bucket=bucket) - self.assertFalse(blob.exists()) - - def test_exists_hit(self): - from six.moves.http_client import OK - BLOB_NAME = 'blob-name' - found_response = ({'status': OK}, b'') - connection = _Connection(found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - bucket._blobs[BLOB_NAME] = 1 - self.assertTrue(blob.exists()) - - def test_delete(self): - from six.moves.http_client import NOT_FOUND - BLOB_NAME = 'blob-name' - not_found_response = ({'status': NOT_FOUND}, b'') - connection = _Connection(not_found_response) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - bucket._blobs[BLOB_NAME] = 1 - blob.delete() - self.assertFalse(blob.exists()) - self.assertEqual(bucket._deleted, [(BLOB_NAME, None)]) - - def test_download_to_file_wo_media_link(self): - from six.moves.http_client import OK - from six.moves.http_client import PARTIAL_CONTENT - from io import BytesIO - BLOB_NAME = 'blob-name' - MEDIA_LINK = 'http://example.com/media/' - chunk1_response = {'status': PARTIAL_CONTENT, - 'content-range': 'bytes 0-2/6'} - chunk2_response = {'status': OK, - 'content-range': 'bytes 3-5/6'} - connection = _Connection( - (chunk1_response, b'abc'), - (chunk2_response, b'def'), - ) - # Only the 'reload' request hits on this side: the others are done - # through the 'http' object. - reload_response = {'status': OK, 'content-type': 'application/json'} - connection._responses = [(reload_response, {"mediaLink": MEDIA_LINK})] - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - fh = BytesIO() - blob.download_to_file(fh) - self.assertEqual(fh.getvalue(), b'abcdef') - self.assertEqual(blob.media_link, MEDIA_LINK) - - def _download_to_file_helper(self, chunk_size=None): - from six.moves.http_client import OK - from six.moves.http_client import PARTIAL_CONTENT - from io import BytesIO - BLOB_NAME = 'blob-name' - chunk1_response = {'status': PARTIAL_CONTENT, - 'content-range': 'bytes 0-2/6'} - chunk2_response = {'status': OK, - 'content-range': 'bytes 3-5/6'} - connection = _Connection( - (chunk1_response, b'abc'), - (chunk2_response, b'def'), - ) - client = _Client(connection) - bucket = _Bucket(client) - MEDIA_LINK = 'http://example.com/media/' - properties = {'mediaLink': MEDIA_LINK} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - if chunk_size is not None: - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = chunk_size - fh = BytesIO() - blob.download_to_file(fh) - self.assertEqual(fh.getvalue(), b'abcdef') - - def test_download_to_file_default(self): - self._download_to_file_helper() - - def test_download_to_file_with_chunk_size(self): - self._download_to_file_helper(chunk_size=3) - - def test_download_to_filename(self): - import os - import time - from six.moves.http_client import OK - from six.moves.http_client import PARTIAL_CONTENT - from gcloud._testing import _NamedTemporaryFile - - BLOB_NAME = 'blob-name' - chunk1_response = {'status': PARTIAL_CONTENT, - 'content-range': 'bytes 0-2/6'} - chunk2_response = {'status': OK, - 'content-range': 'bytes 3-5/6'} - connection = _Connection( - (chunk1_response, b'abc'), - (chunk2_response, b'def'), - ) - client = _Client(connection) - bucket = _Bucket(client) - MEDIA_LINK = 'http://example.com/media/' - properties = {'mediaLink': MEDIA_LINK, - 'updated': '2014-12-06T13:13:50.690Z'} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 3 - - with _NamedTemporaryFile() as temp: - blob.download_to_filename(temp.name) - with open(temp.name, 'rb') as file_obj: - wrote = file_obj.read() - mtime = os.path.getmtime(temp.name) - updatedTime = time.mktime(blob.updated.timetuple()) - - self.assertEqual(wrote, b'abcdef') - self.assertEqual(mtime, updatedTime) - - def test_download_as_string(self): - from six.moves.http_client import OK - from six.moves.http_client import PARTIAL_CONTENT - BLOB_NAME = 'blob-name' - chunk1_response = {'status': PARTIAL_CONTENT, - 'content-range': 'bytes 0-2/6'} - chunk2_response = {'status': OK, - 'content-range': 'bytes 3-5/6'} - connection = _Connection( - (chunk1_response, b'abc'), - (chunk2_response, b'def'), - ) - client = _Client(connection) - bucket = _Bucket(client) - MEDIA_LINK = 'http://example.com/media/' - properties = {'mediaLink': MEDIA_LINK} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 3 - fetched = blob.download_as_string() - self.assertEqual(fetched, b'abcdef') - - def test_upload_from_file_size_failure(self): - BLOB_NAME = 'blob-name' - connection = _Connection() - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - file_obj = object() - with self.assertRaises(ValueError): - blob.upload_from_file(file_obj, size=None) - - def _upload_from_file_simple_test_helper(self, properties=None, - content_type_arg=None, - expected_content_type=None, - chunk_size=5): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud._testing import _NamedTemporaryFile - - BLOB_NAME = 'blob-name' - DATA = b'ABCDEF' - response = {'status': OK} - connection = _Connection( - (response, b'{}'), - ) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = chunk_size - - with _NamedTemporaryFile() as temp: - with open(temp.name, 'wb') as file_obj: - file_obj.write(DATA) - - with open(temp.name, 'rb') as file_obj: - blob.upload_from_file(file_obj, rewind=True, - content_type=content_type_arg) - - rq = connection.http._requested - self.assertEqual(len(rq), 1) - self.assertEqual(rq[0]['method'], 'POST') - uri = rq[0]['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': BLOB_NAME}) - headers = dict( - [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) - self.assertEqual(headers['Content-Length'], '6') - self.assertEqual(headers['Content-Type'], expected_content_type) - - def test_upload_from_file_simple(self): - self._upload_from_file_simple_test_helper( - expected_content_type='application/octet-stream') - - def test_upload_from_file_simple_w_chunk_size_None(self): - self._upload_from_file_simple_test_helper( - expected_content_type='application/octet-stream', - chunk_size=None) - - def test_upload_from_file_simple_with_content_type(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - self._upload_from_file_simple_test_helper( - properties={'contentType': EXPECTED_CONTENT_TYPE}, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_file_simple_with_content_type_passed(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - self._upload_from_file_simple_test_helper( - content_type_arg=EXPECTED_CONTENT_TYPE, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_file_simple_both_content_type_sources(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - ALT_CONTENT_TYPE = 'foo/baz' - self._upload_from_file_simple_test_helper( - properties={'contentType': ALT_CONTENT_TYPE}, - content_type_arg=EXPECTED_CONTENT_TYPE, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_file_resumable(self): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud._testing import _Monkey - from gcloud._testing import _NamedTemporaryFile - from gcloud.streaming import http_wrapper - from gcloud.streaming import transfer - - BLOB_NAME = 'blob-name' - UPLOAD_URL = 'http://example.com/upload/name/key' - DATA = b'ABCDEF' - loc_response = {'status': OK, 'location': UPLOAD_URL} - chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE, - 'range': 'bytes 0-4'} - chunk2_response = {'status': OK} - # Need valid JSON on last response, since resumable. - connection = _Connection( - (loc_response, b''), - (chunk1_response, b''), - (chunk2_response, b'{}'), - ) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 5 - - # Set the threshhold low enough that we force a resumable uploada. - with _Monkey(transfer, RESUMABLE_UPLOAD_THRESHOLD=5): - with _NamedTemporaryFile() as temp: - with open(temp.name, 'wb') as file_obj: - file_obj.write(DATA) - with open(temp.name, 'rb') as file_obj: - blob.upload_from_file(file_obj, rewind=True) - - rq = connection.http._requested - self.assertEqual(len(rq), 3) - - # Requested[0] - headers = dict( - [(x.title(), str(y)) for x, y in rq[0].pop('headers').items()]) - self.assertEqual(headers['X-Upload-Content-Length'], '6') - self.assertEqual(headers['X-Upload-Content-Type'], - 'application/octet-stream') - - uri = rq[0].pop('uri') - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'resumable', 'name': BLOB_NAME}) - self.assertEqual(rq[0], { - 'method': 'POST', - 'body': '', - 'connection_type': None, - 'redirections': 5, - }) - - # Requested[1] - headers = dict( - [(x.title(), str(y)) for x, y in rq[1].pop('headers').items()]) - self.assertEqual(headers['Content-Range'], 'bytes 0-4/6') - self.assertEqual(rq[1], { - 'method': 'PUT', - 'uri': UPLOAD_URL, - 'body': DATA[:5], - 'connection_type': None, - 'redirections': 5, - }) - - # Requested[2] - headers = dict( - [(x.title(), str(y)) for x, y in rq[2].pop('headers').items()]) - self.assertEqual(headers['Content-Range'], 'bytes 5-5/6') - self.assertEqual(rq[2], { - 'method': 'PUT', - 'uri': UPLOAD_URL, - 'body': DATA[5:], - 'connection_type': None, - 'redirections': 5, - }) - - def test_upload_from_file_w_slash_in_name(self): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud._testing import _NamedTemporaryFile - from gcloud.streaming import http_wrapper - - BLOB_NAME = 'parent/child' - UPLOAD_URL = 'http://example.com/upload/name/parent%2Fchild' - DATA = b'ABCDEF' - loc_response = {'status': OK, 'location': UPLOAD_URL} - chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE, - 'range': 'bytes 0-4'} - chunk2_response = {'status': OK} - connection = _Connection( - (loc_response, '{}'), - (chunk1_response, ''), - (chunk2_response, ''), - ) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 5 - - with _NamedTemporaryFile() as temp: - with open(temp.name, 'wb') as file_obj: - file_obj.write(DATA) - with open(temp.name, 'rb') as file_obj: - blob.upload_from_file(file_obj, rewind=True) - self.assertEqual(file_obj.tell(), len(DATA)) - - rq = connection.http._requested - self.assertEqual(len(rq), 1) - self.assertEqual(rq[0]['redirections'], 5) - self.assertEqual(rq[0]['body'], DATA) - self.assertEqual(rq[0]['connection_type'], None) - self.assertEqual(rq[0]['method'], 'POST') - uri = rq[0]['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': 'parent/child'}) - headers = dict( - [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) - self.assertEqual(headers['Content-Length'], '6') - self.assertEqual(headers['Content-Type'], 'application/octet-stream') - - def _upload_from_filename_test_helper(self, properties=None, - content_type_arg=None, - expected_content_type=None): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud._testing import _NamedTemporaryFile - from gcloud.streaming import http_wrapper - - BLOB_NAME = 'blob-name' - UPLOAD_URL = 'http://example.com/upload/name/key' - DATA = b'ABCDEF' - loc_response = {'status': OK, 'location': UPLOAD_URL} - chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE, - 'range': 'bytes 0-4'} - chunk2_response = {'status': OK} - connection = _Connection( - (loc_response, '{}'), - (chunk1_response, ''), - (chunk2_response, ''), - ) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket, - properties=properties) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 5 - - with _NamedTemporaryFile(suffix='.jpeg') as temp: - with open(temp.name, 'wb') as file_obj: - file_obj.write(DATA) - blob.upload_from_filename(temp.name, - content_type=content_type_arg) - - rq = connection.http._requested - self.assertEqual(len(rq), 1) - self.assertEqual(rq[0]['method'], 'POST') - uri = rq[0]['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': BLOB_NAME}) - headers = dict( - [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) - self.assertEqual(headers['Content-Length'], '6') - self.assertEqual(headers['Content-Type'], expected_content_type) - - def test_upload_from_filename(self): - self._upload_from_filename_test_helper( - expected_content_type='image/jpeg') - - def test_upload_from_filename_with_content_type(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - self._upload_from_filename_test_helper( - properties={'contentType': EXPECTED_CONTENT_TYPE}, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_filename_with_content_type_passed(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - self._upload_from_filename_test_helper( - content_type_arg=EXPECTED_CONTENT_TYPE, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_filename_both_content_type_sources(self): - EXPECTED_CONTENT_TYPE = 'foo/bar' - ALT_CONTENT_TYPE = 'foo/baz' - self._upload_from_filename_test_helper( - properties={'contentType': ALT_CONTENT_TYPE}, - content_type_arg=EXPECTED_CONTENT_TYPE, - expected_content_type=EXPECTED_CONTENT_TYPE) - - def test_upload_from_string_w_bytes(self): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud.streaming import http_wrapper - BLOB_NAME = 'blob-name' - UPLOAD_URL = 'http://example.com/upload/name/key' - DATA = b'ABCDEF' - loc_response = {'status': OK, 'location': UPLOAD_URL} - chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE, - 'range': 'bytes 0-4'} - chunk2_response = {'status': OK} - connection = _Connection( - (loc_response, '{}'), - (chunk1_response, ''), - (chunk2_response, ''), - ) - client = _Client(connection) - bucket = _Bucket(client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 5 - blob.upload_from_string(DATA) - rq = connection.http._requested - self.assertEqual(len(rq), 1) - self.assertEqual(rq[0]['method'], 'POST') - uri = rq[0]['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': BLOB_NAME}) - headers = dict( - [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) - self.assertEqual(headers['Content-Length'], '6') - self.assertEqual(headers['Content-Type'], 'text/plain') - self.assertEqual(rq[0]['body'], DATA) - - def test_upload_from_string_w_text(self): - from six.moves.http_client import OK - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - from gcloud.streaming import http_wrapper - BLOB_NAME = 'blob-name' - UPLOAD_URL = 'http://example.com/upload/name/key' - DATA = u'ABCDEF\u1234' - ENCODED = DATA.encode('utf-8') - loc_response = {'status': OK, 'location': UPLOAD_URL} - chunk1_response = {'status': http_wrapper.RESUME_INCOMPLETE, - 'range': 'bytes 0-4'} - chunk2_response = {'status': OK} - connection = _Connection( - (loc_response, '{}'), - (chunk1_response, ''), - (chunk2_response, ''), - ) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - blob._CHUNK_SIZE_MULTIPLE = 1 - blob.chunk_size = 5 - blob.upload_from_string(DATA) - rq = connection.http._requested - self.assertEqual(len(rq), 1) - self.assertEqual(rq[0]['method'], 'POST') - uri = rq[0]['uri'] - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual(scheme, 'http') - self.assertEqual(netloc, 'example.com') - self.assertEqual(path, '/b/name/o') - self.assertEqual(dict(parse_qsl(qs)), - {'uploadType': 'media', 'name': BLOB_NAME}) - headers = dict( - [(x.title(), str(y)) for x, y in rq[0]['headers'].items()]) - self.assertEqual(headers['Content-Length'], str(len(ENCODED))) - self.assertEqual(headers['Content-Type'], 'text/plain') - self.assertEqual(rq[0]['body'], ENCODED) - - def test_make_public(self): - from six.moves.http_client import OK - from gcloud.storage.acl import _ACLEntity - BLOB_NAME = 'blob-name' - permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] - after = ({'status': OK}, {'acl': permissive}) - connection = _Connection(after) - client = _Client(connection) - bucket = _Bucket(client=client) - blob = self._makeOne(BLOB_NAME, bucket=bucket) - blob.acl.loaded = True - blob.make_public() - self.assertEqual(list(blob.acl), permissive) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME) - self.assertEqual(kw[0]['data'], {'acl': permissive}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - def test_cache_control_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CACHE_CONTROL = 'no-cache' - properties = {'cacheControl': CACHE_CONTROL} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.cache_control, CACHE_CONTROL) - - def test_cache_control_setter(self): - BLOB_NAME = 'blob-name' - CACHE_CONTROL = 'no-cache' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.cache_control, None) - blob.cache_control = CACHE_CONTROL - self.assertEqual(blob.cache_control, CACHE_CONTROL) - - def test_component_count(self): - BUCKET = object() - COMPONENT_COUNT = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'componentCount': COMPONENT_COUNT}) - self.assertEqual(blob.component_count, COMPONENT_COUNT) - - def test_component_count_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.component_count, None) - - def test_component_count_string_val(self): - BUCKET = object() - COMPONENT_COUNT = 42 - blob = self._makeOne( - 'blob-name', bucket=BUCKET, - properties={'componentCount': str(COMPONENT_COUNT)}) - self.assertEqual(blob.component_count, COMPONENT_COUNT) - - def test_content_disposition_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CONTENT_DISPOSITION = 'Attachment; filename=example.jpg' - properties = {'contentDisposition': CONTENT_DISPOSITION} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) - - def test_content_disposition_setter(self): - BLOB_NAME = 'blob-name' - CONTENT_DISPOSITION = 'Attachment; filename=example.jpg' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.content_disposition, None) - blob.content_disposition = CONTENT_DISPOSITION - self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) - - def test_content_encoding_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CONTENT_ENCODING = 'gzip' - properties = {'contentEncoding': CONTENT_ENCODING} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_encoding, CONTENT_ENCODING) - - def test_content_encoding_setter(self): - BLOB_NAME = 'blob-name' - CONTENT_ENCODING = 'gzip' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.content_encoding, None) - blob.content_encoding = CONTENT_ENCODING - self.assertEqual(blob.content_encoding, CONTENT_ENCODING) - - def test_content_language_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CONTENT_LANGUAGE = 'pt-BR' - properties = {'contentLanguage': CONTENT_LANGUAGE} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_language, CONTENT_LANGUAGE) - - def test_content_language_setter(self): - BLOB_NAME = 'blob-name' - CONTENT_LANGUAGE = 'pt-BR' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.content_language, None) - blob.content_language = CONTENT_LANGUAGE - self.assertEqual(blob.content_language, CONTENT_LANGUAGE) - - def test_content_type_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CONTENT_TYPE = 'image/jpeg' - properties = {'contentType': CONTENT_TYPE} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.content_type, CONTENT_TYPE) - - def test_content_type_setter(self): - BLOB_NAME = 'blob-name' - CONTENT_TYPE = 'image/jpeg' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.content_type, None) - blob.content_type = CONTENT_TYPE - self.assertEqual(blob.content_type, CONTENT_TYPE) - - def test_crc32c_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - CRC32C = 'DEADBEEF' - properties = {'crc32c': CRC32C} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.crc32c, CRC32C) - - def test_crc32c_setter(self): - BLOB_NAME = 'blob-name' - CRC32C = 'DEADBEEF' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.crc32c, None) - blob.crc32c = CRC32C - self.assertEqual(blob.crc32c, CRC32C) - - def test_etag(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - ETAG = 'ETAG' - properties = {'etag': ETAG} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.etag, ETAG) - - def test_generation(self): - BUCKET = object() - GENERATION = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'generation': GENERATION}) - self.assertEqual(blob.generation, GENERATION) - - def test_generation_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.generation, None) - - def test_generation_string_val(self): - BUCKET = object() - GENERATION = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'generation': str(GENERATION)}) - self.assertEqual(blob.generation, GENERATION) - - def test_id(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - ID = 'ID' - properties = {'id': ID} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.id, ID) - - def test_md5_hash_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - MD5_HASH = 'DEADBEEF' - properties = {'md5Hash': MD5_HASH} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.md5_hash, MD5_HASH) - - def test_md5_hash_setter(self): - BLOB_NAME = 'blob-name' - MD5_HASH = 'DEADBEEF' - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.md5_hash, None) - blob.md5_hash = MD5_HASH - self.assertEqual(blob.md5_hash, MD5_HASH) - - def test_media_link(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - MEDIA_LINK = 'http://example.com/media/' - properties = {'mediaLink': MEDIA_LINK} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.media_link, MEDIA_LINK) - - def test_metadata_getter(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - METADATA = {'foo': 'Foo'} - properties = {'metadata': METADATA} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.metadata, METADATA) - - def test_metadata_setter(self): - BLOB_NAME = 'blob-name' - METADATA = {'foo': 'Foo'} - bucket = _Bucket() - blob = self._makeOne(BLOB_NAME, bucket=bucket) - self.assertEqual(blob.metadata, None) - blob.metadata = METADATA - self.assertEqual(blob.metadata, METADATA) - - def test_metageneration(self): - BUCKET = object() - METAGENERATION = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'metageneration': METAGENERATION}) - self.assertEqual(blob.metageneration, METAGENERATION) - - def test_metageneration_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.metageneration, None) - - def test_metageneration_string_val(self): - BUCKET = object() - METAGENERATION = 42 - blob = self._makeOne( - 'blob-name', bucket=BUCKET, - properties={'metageneration': str(METAGENERATION)}) - self.assertEqual(blob.metageneration, METAGENERATION) - - def test_owner(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'} - properties = {'owner': OWNER} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - owner = blob.owner - self.assertEqual(owner['entity'], 'project-owner-12345') - self.assertEqual(owner['entityId'], '23456') - - def test_self_link(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - SELF_LINK = 'http://example.com/self/' - properties = {'selfLink': SELF_LINK} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.self_link, SELF_LINK) - - def test_size(self): - BUCKET = object() - SIZE = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'size': SIZE}) - self.assertEqual(blob.size, SIZE) - - def test_size_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.size, None) - - def test_size_string_val(self): - BUCKET = object() - SIZE = 42 - blob = self._makeOne('blob-name', bucket=BUCKET, - properties={'size': str(SIZE)}) - self.assertEqual(blob.size, SIZE) - - def test_storage_class(self): - BLOB_NAME = 'blob-name' - bucket = _Bucket() - STORAGE_CLASS = 'http://example.com/self/' - properties = {'storageClass': STORAGE_CLASS} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.storage_class, STORAGE_CLASS) - - def test_time_deleted(self): - import datetime - from gcloud._helpers import _RFC3339_MICROS - from gcloud._helpers import UTC - BLOB_NAME = 'blob-name' - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {'timeDeleted': TIME_DELETED} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.time_deleted, TIMESTAMP) - - def test_time_deleted_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.time_deleted, None) - - def test_updated(self): - import datetime - from gcloud._helpers import _RFC3339_MICROS - from gcloud._helpers import UTC - BLOB_NAME = 'blob-name' - bucket = _Bucket() - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {'updated': UPDATED} - blob = self._makeOne(BLOB_NAME, bucket=bucket, properties=properties) - self.assertEqual(blob.updated, TIMESTAMP) - - def test_updated_unset(self): - BUCKET = object() - blob = self._makeOne('blob-name', bucket=BUCKET) - self.assertEqual(blob.updated, None) - - -class _Responder(object): - - def __init__(self, *responses): - self._responses = responses[:] - self._requested = [] - - def _respond(self, **kw): - self._requested.append(kw) - response, self._responses = self._responses[0], self._responses[1:] - return response - - -class _Connection(_Responder): - - API_BASE_URL = 'http://example.com' - USER_AGENT = 'testing 1.2.3' - credentials = object() - - def __init__(self, *responses): - super(_Connection, self).__init__(*responses) - self._signed = [] - self.http = _HTTP(*responses) - - def api_request(self, **kw): - from six.moves.http_client import NOT_FOUND - from gcloud.exceptions import NotFound - info, content = self._respond(**kw) - if info.get('status') == NOT_FOUND: - raise NotFound(info) - return content - - def build_api_url(self, path, query_params=None, - api_base_url=API_BASE_URL): - from six.moves.urllib.parse import urlencode - from six.moves.urllib.parse import urlsplit - from six.moves.urllib.parse import urlunsplit - # Mimic the build_api_url interface. - qs = urlencode(query_params or {}) - scheme, netloc, _, _, _ = urlsplit(api_base_url) - return urlunsplit((scheme, netloc, path, qs, '')) - - -class _HTTP(_Responder): - - connections = {} # For google-apitools debugging. - - def request(self, uri, method, headers, body, **kw): - if hasattr(body, 'read'): - body = body.read() - return self._respond(uri=uri, method=method, headers=headers, - body=body, **kw) - - -class _Bucket(object): - path = '/b/name' - name = 'name' - - def __init__(self, client=None): - if client is None: - connection = _Connection() - client = _Client(connection) - self.client = client - self._blobs = {} - self._copied = [] - self._deleted = [] - - def delete_blob(self, blob_name, client=None): - del self._blobs[blob_name] - self._deleted.append((blob_name, client)) - - -class _Signer(object): - - def __init__(self): - self._signed = [] - - def __call__(self, *args, **kwargs): - self._signed.append((args, kwargs)) - return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF' - '&Expiration=%s' % kwargs.get('expiration')) - - -class _Client(object): - - def __init__(self, connection): - self._connection = connection - - @property - def connection(self): - return self._connection diff --git a/gcloud/storage/test_bucket.py b/gcloud/storage/test_bucket.py deleted file mode 100644 index d7f127c44898..000000000000 --- a/gcloud/storage/test_bucket.py +++ /dev/null @@ -1,1041 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class Test__BlobIterator(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.bucket import _BlobIterator - return _BlobIterator - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - connection = _Connection() - client = _Client(connection) - bucket = _Bucket() - iterator = self._makeOne(bucket, client=client) - self.assertTrue(iterator.bucket is bucket) - self.assertTrue(iterator.client is client) - self.assertEqual(iterator.path, '%s/o' % bucket.path) - self.assertEqual(iterator.page_number, 0) - self.assertEqual(iterator.next_page_token, None) - self.assertEqual(iterator.prefixes, set()) - - def test_get_items_from_response_empty(self): - connection = _Connection() - client = _Client(connection) - bucket = _Bucket() - iterator = self._makeOne(bucket, client=client) - blobs = list(iterator.get_items_from_response({})) - self.assertEqual(blobs, []) - self.assertEqual(iterator.prefixes, set()) - - def test_get_items_from_response_non_empty(self): - from gcloud.storage.blob import Blob - BLOB_NAME = 'blob-name' - response = {'items': [{'name': BLOB_NAME}], 'prefixes': ['foo']} - connection = _Connection() - client = _Client(connection) - bucket = _Bucket() - iterator = self._makeOne(bucket, client=client) - blobs = list(iterator.get_items_from_response(response)) - self.assertEqual(len(blobs), 1) - blob = blobs[0] - self.assertTrue(isinstance(blob, Blob)) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(iterator.prefixes, set(['foo'])) - - def test_get_items_from_response_cumulative_prefixes(self): - from gcloud.storage.blob import Blob - BLOB_NAME = 'blob-name1' - response1 = {'items': [{'name': BLOB_NAME}], 'prefixes': ['foo']} - response2 = { - 'items': [], - 'prefixes': ['foo', 'bar'], - } - connection = _Connection() - client = _Client(connection) - bucket = _Bucket() - iterator = self._makeOne(bucket, client=client) - # Parse first response. - blobs = list(iterator.get_items_from_response(response1)) - self.assertEqual(len(blobs), 1) - blob = blobs[0] - self.assertTrue(isinstance(blob, Blob)) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(iterator.prefixes, set(['foo'])) - # Parse second response. - blobs = list(iterator.get_items_from_response(response2)) - self.assertEqual(len(blobs), 0) - self.assertEqual(iterator.prefixes, set(['foo', 'bar'])) - - -class Test_Bucket(unittest2.TestCase): - - def _makeOne(self, client=None, name=None, properties=None): - from gcloud.storage.bucket import Bucket - if client is None: - connection = _Connection() - client = _Client(connection) - bucket = Bucket(client, name=name) - bucket._properties = properties or {} - return bucket - - def test_ctor(self): - NAME = 'name' - properties = {'key': 'value'} - bucket = self._makeOne(name=NAME, properties=properties) - self.assertEqual(bucket.name, NAME) - self.assertEqual(bucket._properties, properties) - self.assertFalse(bucket._acl.loaded) - self.assertTrue(bucket._acl.bucket is bucket) - self.assertFalse(bucket._default_object_acl.loaded) - self.assertTrue(bucket._default_object_acl.bucket is bucket) - - def test_blob(self): - from gcloud.storage.blob import Blob - - BUCKET_NAME = 'BUCKET_NAME' - BLOB_NAME = 'BLOB_NAME' - CHUNK_SIZE = 1024 * 1024 - - bucket = self._makeOne(name=BUCKET_NAME) - blob = bucket.blob(BLOB_NAME, chunk_size=CHUNK_SIZE) - self.assertTrue(isinstance(blob, Blob)) - self.assertTrue(blob.bucket is bucket) - self.assertTrue(blob.client is bucket.client) - self.assertEqual(blob.name, BLOB_NAME) - self.assertEqual(blob.chunk_size, CHUNK_SIZE) - - def test_exists_miss(self): - from gcloud.exceptions import NotFound - - class _FakeConnection(object): - - _called_with = [] - - @classmethod - def api_request(cls, *args, **kwargs): - cls._called_with.append((args, kwargs)) - raise NotFound(args) - - BUCKET_NAME = 'bucket-name' - bucket = self._makeOne(name=BUCKET_NAME) - client = _Client(_FakeConnection) - self.assertFalse(bucket.exists(client=client)) - expected_called_kwargs = { - 'method': 'GET', - 'path': bucket.path, - 'query_params': { - 'fields': 'name', - }, - '_target_object': None, - } - expected_cw = [((), expected_called_kwargs)] - self.assertEqual(_FakeConnection._called_with, expected_cw) - - def test_exists_hit(self): - class _FakeConnection(object): - - _called_with = [] - - @classmethod - def api_request(cls, *args, **kwargs): - cls._called_with.append((args, kwargs)) - # exists() does not use the return value - return object() - - BUCKET_NAME = 'bucket-name' - bucket = self._makeOne(name=BUCKET_NAME) - client = _Client(_FakeConnection) - self.assertTrue(bucket.exists(client=client)) - expected_called_kwargs = { - 'method': 'GET', - 'path': bucket.path, - 'query_params': { - 'fields': 'name', - }, - '_target_object': None, - } - expected_cw = [((), expected_called_kwargs)] - self.assertEqual(_FakeConnection._called_with, expected_cw) - - def test_create_hit(self): - BUCKET_NAME = 'bucket-name' - DATA = {'name': BUCKET_NAME} - connection = _Connection(DATA) - PROJECT = 'PROJECT' - client = _Client(connection, project=PROJECT) - bucket = self._makeOne(client=client, name=BUCKET_NAME) - bucket.create() - - kw, = connection._requested - self.assertEqual(kw['method'], 'POST') - self.assertEqual(kw['path'], '/b') - self.assertEqual(kw['query_params'], {'project': PROJECT}) - self.assertEqual(kw['data'], DATA) - - def test_create_w_extra_properties(self): - BUCKET_NAME = 'bucket-name' - PROJECT = 'PROJECT' - CORS = [{ - 'maxAgeSeconds': 60, - 'methods': ['*'], - 'origin': ['https://example.com/frontend'], - 'responseHeader': ['X-Custom-Header'], - }] - LIFECYCLE_RULES = [{ - "action": {"type": "Delete"}, - "condition": {"age": 365} - }] - LOCATION = 'eu' - STORAGE_CLASS = 'NEARLINE' - DATA = { - 'name': BUCKET_NAME, - 'cors': CORS, - 'lifecycle': {'rule': LIFECYCLE_RULES}, - 'location': LOCATION, - 'storageClass': STORAGE_CLASS, - 'versioning': {'enabled': True}, - } - connection = _Connection(DATA) - client = _Client(connection, project=PROJECT) - bucket = self._makeOne(client=client, name=BUCKET_NAME) - bucket.cors = CORS - bucket.lifecycle_rules = LIFECYCLE_RULES - bucket.location = LOCATION - bucket.storage_class = STORAGE_CLASS - bucket.versioning_enabled = True - bucket.create() - - kw, = connection._requested - self.assertEqual(kw['method'], 'POST') - self.assertEqual(kw['path'], '/b') - self.assertEqual(kw['query_params'], {'project': PROJECT}) - self.assertEqual(kw['data'], DATA) - - def test_acl_property(self): - from gcloud.storage.acl import BucketACL - bucket = self._makeOne() - acl = bucket.acl - self.assertTrue(isinstance(acl, BucketACL)) - self.assertTrue(acl is bucket._acl) - - def test_default_object_acl_property(self): - from gcloud.storage.acl import DefaultObjectACL - bucket = self._makeOne() - acl = bucket.default_object_acl - self.assertTrue(isinstance(acl, DefaultObjectACL)) - self.assertTrue(acl is bucket._default_object_acl) - - def test_path_no_name(self): - bucket = self._makeOne() - self.assertRaises(ValueError, getattr, bucket, 'path') - - def test_path_w_name(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - self.assertEqual(bucket.path, '/b/%s' % NAME) - - def test_get_blob_miss(self): - NAME = 'name' - NONESUCH = 'nonesuch' - connection = _Connection() - client = _Client(connection) - bucket = self._makeOne(name=NAME) - result = bucket.get_blob(NONESUCH, client=client) - self.assertTrue(result is None) - kw, = connection._requested - self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - - def test_get_blob_hit(self): - NAME = 'name' - BLOB_NAME = 'blob-name' - connection = _Connection({'name': BLOB_NAME}) - client = _Client(connection) - bucket = self._makeOne(name=NAME) - blob = bucket.get_blob(BLOB_NAME, client=client) - self.assertTrue(blob.bucket is bucket) - self.assertEqual(blob.name, BLOB_NAME) - kw, = connection._requested - self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - - def test_list_blobs_defaults(self): - NAME = 'name' - connection = _Connection({'items': []}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - iterator = bucket.list_blobs() - blobs = list(iterator) - self.assertEqual(blobs, []) - kw, = connection._requested - self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o' % NAME) - self.assertEqual(kw['query_params'], {'projection': 'noAcl'}) - - def test_list_blobs_w_all_arguments(self): - NAME = 'name' - MAX_RESULTS = 10 - PAGE_TOKEN = 'ABCD' - PREFIX = 'subfolder' - DELIMITER = '/' - VERSIONS = True - PROJECTION = 'full' - FIELDS = 'items/contentLanguage,nextPageToken' - EXPECTED = { - 'maxResults': 10, - 'pageToken': PAGE_TOKEN, - 'prefix': PREFIX, - 'delimiter': DELIMITER, - 'versions': VERSIONS, - 'projection': PROJECTION, - 'fields': FIELDS, - } - connection = _Connection({'items': []}) - client = _Client(connection) - bucket = self._makeOne(name=NAME) - iterator = bucket.list_blobs( - max_results=MAX_RESULTS, - page_token=PAGE_TOKEN, - prefix=PREFIX, - delimiter=DELIMITER, - versions=VERSIONS, - projection=PROJECTION, - fields=FIELDS, - client=client, - ) - blobs = list(iterator) - self.assertEqual(blobs, []) - kw, = connection._requested - self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o' % NAME) - self.assertEqual(kw['query_params'], EXPECTED) - - def test_list_blobs(self): - NAME = 'name' - connection = _Connection({'items': []}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - iterator = bucket.list_blobs() - blobs = list(iterator) - self.assertEqual(blobs, []) - kw, = connection._requested - self.assertEqual(kw['method'], 'GET') - self.assertEqual(kw['path'], '/b/%s/o' % NAME) - self.assertEqual(kw['query_params'], {'projection': 'noAcl'}) - - def test_delete_miss(self): - from gcloud.exceptions import NotFound - NAME = 'name' - connection = _Connection() - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete) - expected_cw = [{ - 'method': 'DELETE', - 'path': bucket.path, - '_target_object': None, - }] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_hit(self): - NAME = 'name' - GET_BLOBS_RESP = {'items': []} - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - result = bucket.delete(force=True) - self.assertTrue(result is None) - expected_cw = [{ - 'method': 'DELETE', - 'path': bucket.path, - '_target_object': None, - }] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_force_delete_blobs(self): - NAME = 'name' - BLOB_NAME1 = 'blob-name1' - BLOB_NAME2 = 'blob-name2' - GET_BLOBS_RESP = { - 'items': [ - {'name': BLOB_NAME1}, - {'name': BLOB_NAME2}, - ], - } - DELETE_BLOB1_RESP = DELETE_BLOB2_RESP = {} - connection = _Connection(GET_BLOBS_RESP, DELETE_BLOB1_RESP, - DELETE_BLOB2_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - result = bucket.delete(force=True) - self.assertTrue(result is None) - expected_cw = [{ - 'method': 'DELETE', - 'path': bucket.path, - '_target_object': None, - }] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_force_miss_blobs(self): - NAME = 'name' - BLOB_NAME = 'blob-name1' - GET_BLOBS_RESP = {'items': [{'name': BLOB_NAME}]} - # Note the connection does not have a response for the blob. - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - result = bucket.delete(force=True) - self.assertTrue(result is None) - expected_cw = [{ - 'method': 'DELETE', - 'path': bucket.path, - '_target_object': None, - }] - self.assertEqual(connection._deleted_buckets, expected_cw) - - def test_delete_too_many(self): - NAME = 'name' - BLOB_NAME1 = 'blob-name1' - BLOB_NAME2 = 'blob-name2' - GET_BLOBS_RESP = { - 'items': [ - {'name': BLOB_NAME1}, - {'name': BLOB_NAME2}, - ], - } - connection = _Connection(GET_BLOBS_RESP) - connection._delete_bucket = True - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - - # Make the Bucket refuse to delete with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 - self.assertRaises(ValueError, bucket.delete, force=True) - self.assertEqual(connection._deleted_buckets, []) - - def test_delete_blob_miss(self): - from gcloud.exceptions import NotFound - NAME = 'name' - NONESUCH = 'nonesuch' - connection = _Connection() - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete_blob, NONESUCH) - kw, = connection._requested - self.assertEqual(kw['method'], 'DELETE') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - - def test_delete_blob_hit(self): - NAME = 'name' - BLOB_NAME = 'blob-name' - connection = _Connection({}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - result = bucket.delete_blob(BLOB_NAME) - self.assertTrue(result is None) - kw, = connection._requested - self.assertEqual(kw['method'], 'DELETE') - self.assertEqual(kw['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - - def test_delete_blobs_empty(self): - NAME = 'name' - connection = _Connection() - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.delete_blobs([]) - self.assertEqual(connection._requested, []) - - def test_delete_blobs_hit(self): - NAME = 'name' - BLOB_NAME = 'blob-name' - connection = _Connection({}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.delete_blobs([BLOB_NAME]) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - - def test_delete_blobs_miss_no_on_error(self): - from gcloud.exceptions import NotFound - NAME = 'name' - BLOB_NAME = 'blob-name' - NONESUCH = 'nonesuch' - connection = _Connection({}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - self.assertRaises(NotFound, bucket.delete_blobs, [BLOB_NAME, NONESUCH]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - self.assertEqual(kw[1]['method'], 'DELETE') - self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - - def test_delete_blobs_miss_w_on_error(self): - NAME = 'name' - BLOB_NAME = 'blob-name' - NONESUCH = 'nonesuch' - connection = _Connection({}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - errors = [] - bucket.delete_blobs([BLOB_NAME, NONESUCH], errors.append) - self.assertEqual(errors, [NONESUCH]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]['method'], 'DELETE') - self.assertEqual(kw[0]['path'], '/b/%s/o/%s' % (NAME, BLOB_NAME)) - self.assertEqual(kw[1]['method'], 'DELETE') - self.assertEqual(kw[1]['path'], '/b/%s/o/%s' % (NAME, NONESUCH)) - - def test_copy_blobs_wo_name(self): - SOURCE = 'source' - DEST = 'dest' - BLOB_NAME = 'blob-name' - - class _Blob(object): - name = BLOB_NAME - path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME) - - connection = _Connection({}) - client = _Client(connection) - source = self._makeOne(client=client, name=SOURCE) - dest = self._makeOne(client=client, name=DEST) - blob = _Blob() - new_blob = source.copy_blob(blob, dest) - self.assertTrue(new_blob.bucket is dest) - self.assertEqual(new_blob.name, BLOB_NAME) - kw, = connection._requested - COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME, - DEST, BLOB_NAME) - self.assertEqual(kw['method'], 'POST') - self.assertEqual(kw['path'], COPY_PATH) - - def test_copy_blobs_w_name(self): - SOURCE = 'source' - DEST = 'dest' - BLOB_NAME = 'blob-name' - NEW_NAME = 'new_name' - - class _Blob(object): - name = BLOB_NAME - path = '/b/%s/o/%s' % (SOURCE, BLOB_NAME) - - connection = _Connection({}) - client = _Client(connection) - source = self._makeOne(client=client, name=SOURCE) - dest = self._makeOne(client=client, name=DEST) - blob = _Blob() - new_blob = source.copy_blob(blob, dest, NEW_NAME) - self.assertTrue(new_blob.bucket is dest) - self.assertEqual(new_blob.name, NEW_NAME) - kw, = connection._requested - COPY_PATH = '/b/%s/o/%s/copyTo/b/%s/o/%s' % (SOURCE, BLOB_NAME, - DEST, NEW_NAME) - self.assertEqual(kw['method'], 'POST') - self.assertEqual(kw['path'], COPY_PATH) - - def test_rename_blob(self): - BUCKET_NAME = 'BUCKET_NAME' - BLOB_NAME = 'blob-name' - NEW_BLOB_NAME = 'new-blob-name' - - DATA = {'name': NEW_BLOB_NAME} - connection = _Connection(DATA) - client = _Client(connection) - bucket = self._makeOne(client=client, name=BUCKET_NAME) - - class _Blob(object): - - def __init__(self, name, bucket_name): - self.name = name - self.path = '/b/%s/o/%s' % (bucket_name, name) - self._deleted = [] - - def delete(self, client=None): - self._deleted.append(client) - - blob = _Blob(BLOB_NAME, BUCKET_NAME) - renamed_blob = bucket.rename_blob(blob, NEW_BLOB_NAME, client=client) - self.assertTrue(renamed_blob.bucket is bucket) - self.assertEqual(renamed_blob.name, NEW_BLOB_NAME) - self.assertEqual(blob._deleted, [client]) - - def test_etag(self): - ETAG = 'ETAG' - properties = {'etag': ETAG} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.etag, ETAG) - - def test_id(self): - ID = 'ID' - properties = {'id': ID} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.id, ID) - - def test_location_getter(self): - NAME = 'name' - before = {'location': 'AS'} - bucket = self._makeOne(name=NAME, properties=before) - self.assertEqual(bucket.location, 'AS') - - def test_location_setter(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - self.assertEqual(bucket.location, None) - bucket.location = 'AS' - self.assertEqual(bucket.location, 'AS') - self.assertTrue('location' in bucket._changes) - - def test_lifecycle_rules_getter(self): - NAME = 'name' - LC_RULE = {'action': {'type': 'Delete'}, 'condition': {'age': 42}} - rules = [LC_RULE] - properties = {'lifecycle': {'rule': rules}} - bucket = self._makeOne(name=NAME, properties=properties) - self.assertEqual(bucket.lifecycle_rules, rules) - # Make sure it's a copy - self.assertFalse(bucket.lifecycle_rules is rules) - - def test_lifecycle_rules_setter(self): - NAME = 'name' - LC_RULE = {'action': {'type': 'Delete'}, 'condition': {'age': 42}} - rules = [LC_RULE] - bucket = self._makeOne(name=NAME) - self.assertEqual(bucket.lifecycle_rules, []) - bucket.lifecycle_rules = rules - self.assertEqual(bucket.lifecycle_rules, rules) - self.assertTrue('lifecycle' in bucket._changes) - - def test_cors_getter(self): - NAME = 'name' - CORS_ENTRY = { - 'maxAgeSeconds': 1234, - 'method': ['OPTIONS', 'GET'], - 'origin': ['127.0.0.1'], - 'responseHeader': ['Content-Type'], - } - properties = {'cors': [CORS_ENTRY, {}]} - bucket = self._makeOne(name=NAME, properties=properties) - entries = bucket.cors - self.assertEqual(len(entries), 2) - self.assertEqual(entries[0], CORS_ENTRY) - self.assertEqual(entries[1], {}) - # Make sure it was a copy, not the same object. - self.assertFalse(entries[0] is CORS_ENTRY) - - def test_cors_setter(self): - NAME = 'name' - CORS_ENTRY = { - 'maxAgeSeconds': 1234, - 'method': ['OPTIONS', 'GET'], - 'origin': ['127.0.0.1'], - 'responseHeader': ['Content-Type'], - } - bucket = self._makeOne(name=NAME) - - self.assertEqual(bucket.cors, []) - bucket.cors = [CORS_ENTRY] - self.assertEqual(bucket.cors, [CORS_ENTRY]) - self.assertTrue('cors' in bucket._changes) - - def test_get_logging_w_prefix(self): - NAME = 'name' - LOG_BUCKET = 'logs' - LOG_PREFIX = 'pfx' - before = { - 'logging': { - 'logBucket': LOG_BUCKET, - 'logObjectPrefix': LOG_PREFIX, - }, - } - bucket = self._makeOne(name=NAME, properties=before) - info = bucket.get_logging() - self.assertEqual(info['logBucket'], LOG_BUCKET) - self.assertEqual(info['logObjectPrefix'], LOG_PREFIX) - - def test_enable_logging_defaults(self): - NAME = 'name' - LOG_BUCKET = 'logs' - before = {'logging': None} - bucket = self._makeOne(name=NAME, properties=before) - self.assertTrue(bucket.get_logging() is None) - bucket.enable_logging(LOG_BUCKET) - info = bucket.get_logging() - self.assertEqual(info['logBucket'], LOG_BUCKET) - self.assertEqual(info['logObjectPrefix'], '') - - def test_enable_logging(self): - NAME = 'name' - LOG_BUCKET = 'logs' - LOG_PFX = 'pfx' - before = {'logging': None} - bucket = self._makeOne(name=NAME, properties=before) - self.assertTrue(bucket.get_logging() is None) - bucket.enable_logging(LOG_BUCKET, LOG_PFX) - info = bucket.get_logging() - self.assertEqual(info['logBucket'], LOG_BUCKET) - self.assertEqual(info['logObjectPrefix'], LOG_PFX) - - def test_disable_logging(self): - NAME = 'name' - before = {'logging': {'logBucket': 'logs', 'logObjectPrefix': 'pfx'}} - bucket = self._makeOne(name=NAME, properties=before) - self.assertTrue(bucket.get_logging() is not None) - bucket.disable_logging() - self.assertTrue(bucket.get_logging() is None) - - def test_metageneration(self): - METAGENERATION = 42 - properties = {'metageneration': METAGENERATION} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.metageneration, METAGENERATION) - - def test_metageneration_unset(self): - bucket = self._makeOne() - self.assertEqual(bucket.metageneration, None) - - def test_metageneration_string_val(self): - METAGENERATION = 42 - properties = {'metageneration': str(METAGENERATION)} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.metageneration, METAGENERATION) - - def test_owner(self): - OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'} - properties = {'owner': OWNER} - bucket = self._makeOne(properties=properties) - owner = bucket.owner - self.assertEqual(owner['entity'], 'project-owner-12345') - self.assertEqual(owner['entityId'], '23456') - - def test_project_number(self): - PROJECT_NUMBER = 12345 - properties = {'projectNumber': PROJECT_NUMBER} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.project_number, PROJECT_NUMBER) - - def test_project_number_unset(self): - bucket = self._makeOne() - self.assertEqual(bucket.project_number, None) - - def test_project_number_string_val(self): - PROJECT_NUMBER = 12345 - properties = {'projectNumber': str(PROJECT_NUMBER)} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.project_number, PROJECT_NUMBER) - - def test_self_link(self): - SELF_LINK = 'http://example.com/self/' - properties = {'selfLink': SELF_LINK} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.self_link, SELF_LINK) - - def test_storage_class_getter(self): - STORAGE_CLASS = 'http://example.com/self/' - properties = {'storageClass': STORAGE_CLASS} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.storage_class, STORAGE_CLASS) - - def test_storage_class_setter_invalid(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - with self.assertRaises(ValueError): - bucket.storage_class = 'BOGUS' - self.assertFalse('storageClass' in bucket._changes) - - def test_storage_class_setter_STANDARD(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - bucket.storage_class = 'STANDARD' - self.assertEqual(bucket.storage_class, 'STANDARD') - self.assertTrue('storageClass' in bucket._changes) - - def test_storage_class_setter_NEARLINE(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - bucket.storage_class = 'NEARLINE' - self.assertEqual(bucket.storage_class, 'NEARLINE') - self.assertTrue('storageClass' in bucket._changes) - - def test_storage_class_setter_DURABLE_REDUCED_AVAILABILITY(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - bucket.storage_class = 'DURABLE_REDUCED_AVAILABILITY' - self.assertEqual(bucket.storage_class, 'DURABLE_REDUCED_AVAILABILITY') - self.assertTrue('storageClass' in bucket._changes) - - def test_time_created(self): - import datetime - from gcloud._helpers import _RFC3339_MICROS - from gcloud._helpers import UTC - TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) - TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) - properties = {'timeCreated': TIME_CREATED} - bucket = self._makeOne(properties=properties) - self.assertEqual(bucket.time_created, TIMESTAMP) - - def test_time_created_unset(self): - bucket = self._makeOne() - self.assertEqual(bucket.time_created, None) - - def test_versioning_enabled_getter_missing(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - self.assertEqual(bucket.versioning_enabled, False) - - def test_versioning_enabled_getter(self): - NAME = 'name' - before = {'versioning': {'enabled': True}} - bucket = self._makeOne(name=NAME, properties=before) - self.assertEqual(bucket.versioning_enabled, True) - - def test_versioning_enabled_setter(self): - NAME = 'name' - bucket = self._makeOne(name=NAME) - self.assertFalse(bucket.versioning_enabled) - bucket.versioning_enabled = True - self.assertTrue(bucket.versioning_enabled) - - def test_configure_website_defaults(self): - NAME = 'name' - UNSET = {'website': {'mainPageSuffix': None, - 'notFoundPage': None}} - bucket = self._makeOne(name=NAME) - bucket.configure_website() - self.assertEqual(bucket._properties, UNSET) - - def test_configure_website(self): - NAME = 'name' - WEBSITE_VAL = {'website': {'mainPageSuffix': 'html', - 'notFoundPage': '404.html'}} - bucket = self._makeOne(name=NAME) - bucket.configure_website('html', '404.html') - self.assertEqual(bucket._properties, WEBSITE_VAL) - - def test_disable_website(self): - NAME = 'name' - UNSET = {'website': {'mainPageSuffix': None, - 'notFoundPage': None}} - bucket = self._makeOne(name=NAME) - bucket.disable_website() - self.assertEqual(bucket._properties, UNSET) - - def test_make_public_defaults(self): - from gcloud.storage.acl import _ACLEntity - NAME = 'name' - permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] - after = {'acl': permissive, 'defaultObjectAcl': []} - connection = _Connection(after) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - bucket.make_public() - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), []) - kw = connection._requested - self.assertEqual(len(kw), 1) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/%s' % NAME) - self.assertEqual(kw[0]['data'], {'acl': after['acl']}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - - def _make_public_w_future_helper(self, default_object_acl_loaded=True): - from gcloud.storage.acl import _ACLEntity - NAME = 'name' - permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] - after1 = {'acl': permissive, 'defaultObjectAcl': []} - after2 = {'acl': permissive, 'defaultObjectAcl': permissive} - if default_object_acl_loaded: - num_requests = 2 - connection = _Connection(after1, after2) - else: - num_requests = 3 - # We return the same value for default_object_acl.reload() - # to consume. - connection = _Connection(after1, after1, after2) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = default_object_acl_loaded - bucket.make_public(future=True) - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), permissive) - kw = connection._requested - self.assertEqual(len(kw), num_requests) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/%s' % NAME) - self.assertEqual(kw[0]['data'], {'acl': permissive}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - if not default_object_acl_loaded: - self.assertEqual(kw[1]['method'], 'GET') - self.assertEqual(kw[1]['path'], '/b/%s/defaultObjectAcl' % NAME) - # Last could be 1 or 2 depending on `default_object_acl_loaded`. - self.assertEqual(kw[-1]['method'], 'PATCH') - self.assertEqual(kw[-1]['path'], '/b/%s' % NAME) - self.assertEqual(kw[-1]['data'], {'defaultObjectAcl': permissive}) - self.assertEqual(kw[-1]['query_params'], {'projection': 'full'}) - - def test_make_public_w_future(self): - self._make_public_w_future_helper(default_object_acl_loaded=True) - - def test_make_public_w_future_reload_default(self): - self._make_public_w_future_helper(default_object_acl_loaded=False) - - def test_make_public_recursive(self): - from gcloud.storage.acl import _ACLEntity - from gcloud.storage.bucket import _BlobIterator - _saved = [] - - class _Blob(object): - _granted = False - - def __init__(self, bucket, name): - self._bucket = bucket - self._name = name - - @property - def acl(self): - return self - - # Faux ACL methods - def all(self): - return self - - def grant_read(self): - self._granted = True - - def save(self, client=None): - _saved.append( - (self._bucket, self._name, self._granted, client)) - - class _Iterator(_BlobIterator): - def get_items_from_response(self, response): - for item in response.get('items', []): - yield _Blob(self.bucket, item['name']) - - NAME = 'name' - BLOB_NAME = 'blob-name' - permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] - after = {'acl': permissive, 'defaultObjectAcl': []} - connection = _Connection(after, {'items': [{'name': BLOB_NAME}]}) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - bucket._iterator_class = _Iterator - bucket.make_public(recursive=True) - self.assertEqual(list(bucket.acl), permissive) - self.assertEqual(list(bucket.default_object_acl), []) - self.assertEqual(_saved, [(bucket, BLOB_NAME, True, None)]) - kw = connection._requested - self.assertEqual(len(kw), 2) - self.assertEqual(kw[0]['method'], 'PATCH') - self.assertEqual(kw[0]['path'], '/b/%s' % NAME) - self.assertEqual(kw[0]['data'], {'acl': permissive}) - self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) - self.assertEqual(kw[1]['method'], 'GET') - self.assertEqual(kw[1]['path'], '/b/%s/o' % NAME) - max_results = bucket._MAX_OBJECTS_FOR_ITERATION + 1 - self.assertEqual(kw[1]['query_params'], - {'maxResults': max_results, 'projection': 'full'}) - - def test_make_public_recursive_too_many(self): - from gcloud.storage.acl import _ACLEntity - - PERMISSIVE = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}] - AFTER = {'acl': PERMISSIVE, 'defaultObjectAcl': []} - - NAME = 'name' - BLOB_NAME1 = 'blob-name1' - BLOB_NAME2 = 'blob-name2' - GET_BLOBS_RESP = { - 'items': [ - {'name': BLOB_NAME1}, - {'name': BLOB_NAME2}, - ], - } - connection = _Connection(AFTER, GET_BLOBS_RESP) - client = _Client(connection) - bucket = self._makeOne(client=client, name=NAME) - bucket.acl.loaded = True - bucket.default_object_acl.loaded = True - - # Make the Bucket refuse to make_public with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 - self.assertRaises(ValueError, bucket.make_public, recursive=True) - - -class _Connection(object): - _delete_bucket = False - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - self._deleted_buckets = [] - - @staticmethod - def _is_bucket_path(path): - if not path.startswith('/b/'): # pragma: NO COVER - return False - # Now just ensure the path only has /b/ and one more segment. - return path.count('/') == 2 - - def api_request(self, **kw): - from gcloud.exceptions import NotFound - self._requested.append(kw) - - method = kw.get('method') - path = kw.get('path', '') - if method == 'DELETE' and self._is_bucket_path(path): - self._deleted_buckets.append(kw) - if self._delete_bucket: - return - else: - raise NotFound('miss') - - try: - response, self._responses = self._responses[0], self._responses[1:] - except: - raise NotFound('miss') - else: - return response - - -class _Bucket(object): - path = '/b/name' - name = 'name' - - def __init__(self, client=None): - self.client = client - - -class _Client(object): - - def __init__(self, connection, project=None): - self.connection = connection - self.project = project diff --git a/gcloud/storage/test_client.py b/gcloud/storage/test_client.py deleted file mode 100644 index 472bf786a348..000000000000 --- a/gcloud/storage/test_client.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestClient(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.client import Client - return Client - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_connection_type(self): - from gcloud.storage.connection import Connection - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - self.assertEqual(client.project, PROJECT) - self.assertTrue(isinstance(client.connection, Connection)) - self.assertTrue(client.connection.credentials is CREDENTIALS) - self.assertTrue(client.current_batch is None) - self.assertEqual(list(client._batch_stack), []) - - def test__push_batch_and__pop_batch(self): - from gcloud.storage.batch import Batch - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - batch1 = Batch(client) - batch2 = Batch(client) - client._push_batch(batch1) - self.assertEqual(list(client._batch_stack), [batch1]) - self.assertTrue(client.current_batch is batch1) - client._push_batch(batch2) - self.assertTrue(client.current_batch is batch2) - # list(_LocalStack) returns in reverse order. - self.assertEqual(list(client._batch_stack), [batch2, batch1]) - self.assertTrue(client._pop_batch() is batch2) - self.assertEqual(list(client._batch_stack), [batch1]) - self.assertTrue(client._pop_batch() is batch1) - self.assertEqual(list(client._batch_stack), []) - - def test_connection_setter(self): - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - client._connection = None # Unset the value from the constructor - client.connection = connection = object() - self.assertTrue(client._connection is connection) - - def test_connection_setter_when_set(self): - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - self.assertRaises(ValueError, setattr, client, 'connection', None) - - def test_connection_getter_no_batch(self): - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - self.assertTrue(client.connection is client._connection) - self.assertTrue(client.current_batch is None) - - def test_connection_getter_with_batch(self): - from gcloud.storage.batch import Batch - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - batch = Batch(client) - client._push_batch(batch) - self.assertTrue(client.connection is not client._connection) - self.assertTrue(client.connection is batch) - self.assertTrue(client.current_batch is batch) - - def test_bucket(self): - from gcloud.storage.bucket import Bucket - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - BUCKET_NAME = 'BUCKET_NAME' - - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - bucket = client.bucket(BUCKET_NAME) - self.assertTrue(isinstance(bucket, Bucket)) - self.assertTrue(bucket.client is client) - self.assertEqual(bucket.name, BUCKET_NAME) - - def test_batch(self): - from gcloud.storage.batch import Batch - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - batch = client.batch() - self.assertTrue(isinstance(batch, Batch)) - self.assertTrue(batch._client is client) - - def test_get_bucket_miss(self): - from gcloud.exceptions import NotFound - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - NONESUCH = 'nonesuch' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b', - 'nonesuch?projection=noAcl', - ]) - http = client.connection._http = _Http( - {'status': '404', 'content-type': 'application/json'}, - b'{}', - ) - self.assertRaises(NotFound, client.get_bucket, NONESUCH) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['uri'], URI) - - def test_get_bucket_hit(self): - from gcloud.storage.bucket import Bucket - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - BLOB_NAME = 'blob-name' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b', - '%s?projection=noAcl' % (BLOB_NAME,), - ]) - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - '{{"name": "{0}"}}'.format(BLOB_NAME).encode('utf-8'), - ) - - bucket = client.get_bucket(BLOB_NAME) - self.assertTrue(isinstance(bucket, Bucket)) - self.assertEqual(bucket.name, BLOB_NAME) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['uri'], URI) - - def test_lookup_bucket_miss(self): - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - NONESUCH = 'nonesuch' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b', - 'nonesuch?projection=noAcl', - ]) - http = client.connection._http = _Http( - {'status': '404', 'content-type': 'application/json'}, - b'{}', - ) - bucket = client.lookup_bucket(NONESUCH) - self.assertEqual(bucket, None) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['uri'], URI) - - def test_lookup_bucket_hit(self): - from gcloud.storage.bucket import Bucket - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - BLOB_NAME = 'blob-name' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b', - '%s?projection=noAcl' % (BLOB_NAME,), - ]) - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - '{{"name": "{0}"}}'.format(BLOB_NAME).encode('utf-8'), - ) - - bucket = client.lookup_bucket(BLOB_NAME) - self.assertTrue(isinstance(bucket, Bucket)) - self.assertEqual(bucket.name, BLOB_NAME) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['uri'], URI) - - def test_create_bucket_conflict(self): - from gcloud.exceptions import Conflict - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - BLOB_NAME = 'blob-name' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b?project=%s' % (PROJECT,), - ]) - http = client.connection._http = _Http( - {'status': '409', 'content-type': 'application/json'}, - '{"error": {"message": "Conflict"}}', - ) - - self.assertRaises(Conflict, client.create_bucket, BLOB_NAME) - self.assertEqual(http._called_with['method'], 'POST') - self.assertEqual(http._called_with['uri'], URI) - - def test_create_bucket_success(self): - from gcloud.storage.bucket import Bucket - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - BLOB_NAME = 'blob-name' - URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b?project=%s' % (PROJECT,), - ]) - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - '{{"name": "{0}"}}'.format(BLOB_NAME).encode('utf-8'), - ) - - bucket = client.create_bucket(BLOB_NAME) - self.assertTrue(isinstance(bucket, Bucket)) - self.assertEqual(bucket.name, BLOB_NAME) - self.assertEqual(http._called_with['method'], 'POST') - self.assertEqual(http._called_with['uri'], URI) - - def test_list_buckets_empty(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlparse - - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - EXPECTED_QUERY = { - 'project': [PROJECT], - 'projection': ['noAcl'], - } - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - b'{}', - ) - buckets = list(client.list_buckets()) - self.assertEqual(len(buckets), 0) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['body'], None) - - BASE_URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b', - ]) - URI = http._called_with['uri'] - self.assertTrue(URI.startswith(BASE_URI)) - uri_parts = urlparse(URI) - self.assertEqual(parse_qs(uri_parts.query), EXPECTED_QUERY) - - def test_list_buckets_non_empty(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlencode - from six.moves.urllib.parse import urlparse - PROJECT = 'PROJECT' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - BUCKET_NAME = 'bucket-name' - query_params = urlencode({'project': PROJECT, 'projection': 'noAcl'}) - BASE_URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - ]) - URI = '/'.join([BASE_URI, 'b?%s' % (query_params,)]) - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - '{{"items": [{{"name": "{0}"}}]}}'.format(BUCKET_NAME) - .encode('utf-8'), - ) - buckets = list(client.list_buckets()) - self.assertEqual(len(buckets), 1) - self.assertEqual(buckets[0].name, BUCKET_NAME) - self.assertEqual(http._called_with['method'], 'GET') - self.assertTrue(http._called_with['uri'].startswith(BASE_URI)) - self.assertEqual(parse_qs(urlparse(http._called_with['uri']).query), - parse_qs(urlparse(URI).query)) - - def test_list_buckets_all_arguments(self): - from six.moves.urllib.parse import parse_qs - from six.moves.urllib.parse import urlparse - - PROJECT = 'foo-bar' - CREDENTIALS = _Credentials() - client = self._makeOne(project=PROJECT, credentials=CREDENTIALS) - - MAX_RESULTS = 10 - PAGE_TOKEN = 'ABCD' - PREFIX = 'subfolder' - PROJECTION = 'full' - FIELDS = 'items/id,nextPageToken' - EXPECTED_QUERY = { - 'project': [PROJECT], - 'maxResults': [str(MAX_RESULTS)], - 'pageToken': [PAGE_TOKEN], - 'prefix': [PREFIX], - 'projection': [PROJECTION], - 'fields': [FIELDS], - } - - http = client.connection._http = _Http( - {'status': '200', 'content-type': 'application/json'}, - '{"items": []}', - ) - iterator = client.list_buckets( - max_results=MAX_RESULTS, - page_token=PAGE_TOKEN, - prefix=PREFIX, - projection=PROJECTION, - fields=FIELDS, - ) - buckets = list(iterator) - self.assertEqual(buckets, []) - self.assertEqual(http._called_with['method'], 'GET') - self.assertEqual(http._called_with['body'], None) - - BASE_URI = '/'.join([ - client.connection.API_BASE_URL, - 'storage', - client.connection.API_VERSION, - 'b' - ]) - URI = http._called_with['uri'] - self.assertTrue(URI.startswith(BASE_URI)) - uri_parts = urlparse(URI) - self.assertEqual(parse_qs(uri_parts.query), EXPECTED_QUERY) - - -class Test__BucketIterator(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.client import _BucketIterator - return _BucketIterator - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - connection = object() - client = _Client(connection) - iterator = self._makeOne(client) - self.assertEqual(iterator.path, '/b') - self.assertEqual(iterator.page_number, 0) - self.assertEqual(iterator.next_page_token, None) - self.assertTrue(iterator.client is client) - - def test_get_items_from_response_empty(self): - connection = object() - client = _Client(connection) - iterator = self._makeOne(client) - self.assertEqual(list(iterator.get_items_from_response({})), []) - - def test_get_items_from_response_non_empty(self): - from gcloud.storage.bucket import Bucket - BLOB_NAME = 'blob-name' - response = {'items': [{'name': BLOB_NAME}]} - connection = object() - client = _Client(connection) - iterator = self._makeOne(client) - buckets = list(iterator.get_items_from_response(response)) - self.assertEqual(len(buckets), 1) - bucket = buckets[0] - self.assertTrue(isinstance(bucket, Bucket)) - self.assertEqual(bucket.name, BLOB_NAME) - - -class _Credentials(object): - - _scopes = None - - @staticmethod - def create_scoped_required(): - return True - - def create_scoped(self, scope): - self._scopes = scope - return self - - -class _Http(object): - - _called_with = None - - def __init__(self, headers, content): - from httplib2 import Response - self._response = Response(headers) - self._content = content - - def request(self, **kw): - self._called_with = kw - return self._response, self._content - - -class _Client(object): - - def __init__(self, connection): - self.connection = connection diff --git a/gcloud/storage/test_connection.py b/gcloud/storage/test_connection.py deleted file mode 100644 index 30ddef173b42..000000000000 --- a/gcloud/storage/test_connection.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest2 - - -class TestConnection(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.storage.connection import Connection - return Connection - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_build_api_url_no_extra_query_params(self): - conn = self._makeOne() - URI = '/'.join([ - conn.API_BASE_URL, - 'storage', - conn.API_VERSION, - 'foo', - ]) - self.assertEqual(conn.build_api_url('/foo'), URI) - - def test_build_api_url_w_extra_query_params(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit - conn = self._makeOne() - uri = conn.build_api_url('/foo', {'bar': 'baz'}) - scheme, netloc, path, qs, _ = urlsplit(uri) - self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL) - self.assertEqual(path, - '/'.join(['', 'storage', conn.API_VERSION, 'foo'])) - parms = dict(parse_qsl(qs)) - self.assertEqual(parms['bar'], 'baz') diff --git a/gcloud/streaming/__init__.py b/gcloud/streaming/__init__.py deleted file mode 100644 index 2017be678403..000000000000 --- a/gcloud/streaming/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Vendored-in for from google-apitools 0.4.11 diff --git a/gcloud/streaming/buffered_stream.py b/gcloud/streaming/buffered_stream.py deleted file mode 100644 index 6ce9299e701d..000000000000 --- a/gcloud/streaming/buffered_stream.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Small helper class to provide a small slice of a stream. - -This class reads ahead to detect if we are at the end of the stream. -""" - - -class BufferedStream(object): - """Buffers a stream, reading ahead to determine if we're at the end. - - :type stream: readable file-like object - :param stream: the stream to be buffered - - :type start: integer - :param start: the starting point in the stream - - :type size: integer - :param size: the size of the buffer - """ - def __init__(self, stream, start, size): - self._stream = stream - self._start_pos = start - self._buffer_pos = 0 - self._buffered_data = self._stream.read(size) - self._stream_at_end = len(self._buffered_data) < size - self._end_pos = self._start_pos + len(self._buffered_data) - - def __repr__(self): - return ('Buffered stream %s from position %s-%s with %s ' - 'bytes remaining' % (self._stream, self._start_pos, - self._end_pos, self._bytes_remaining)) - - def __len__(self): - return len(self._buffered_data) - - @property - def stream_exhausted(self): - """Does the stream have bytes remaining beyond the buffer - - :rtype: boolean - """ - return self._stream_at_end - - @property - def stream_end_position(self): - """Point to which stream was read into the buffer - - :rtype: integer - """ - return self._end_pos - - @property - def _bytes_remaining(self): - """Bytes remaining to be read from the buffer - - :rtype: integer - """ - return len(self._buffered_data) - self._buffer_pos - - def read(self, size=None): - """Read bytes from the buffer. - - :type size: integer or None - :param size: How many bytes to read (defaults to all remaining bytes). - """ - if size is None or size < 0: - raise ValueError( - 'Illegal read of size %s requested on BufferedStream. ' - 'Wrapped stream %s is at position %s-%s, ' - '%s bytes remaining.' % - (size, self._stream, self._start_pos, self._end_pos, - self._bytes_remaining)) - - if not self._bytes_remaining: - return b'' - - size = min(size, self._bytes_remaining) - data = self._buffered_data[self._buffer_pos:self._buffer_pos + size] - self._buffer_pos += size - return data diff --git a/gcloud/streaming/exceptions.py b/gcloud/streaming/exceptions.py deleted file mode 100644 index 1b3a4f43286a..000000000000 --- a/gcloud/streaming/exceptions.py +++ /dev/null @@ -1,106 +0,0 @@ -"""Exceptions for generated client libraries.""" - - -class Error(Exception): - """Base class for all exceptions.""" - - -class CommunicationError(Error): - """Any communication error talking to an API server.""" - - -class HttpError(CommunicationError): - """Error making a request. Soon to be HttpError. - - :type response: dict - :param response: headers from the response which returned the error - - :type content: bytes - :param content: payload of the response which returned the error - - :type url: string - :param url: URL of the response which returned the error - """ - def __init__(self, response, content, url): - super(HttpError, self).__init__() - self.response = response - self.content = content - self.url = url - - def __str__(self): - content = self.content.decode('ascii', 'replace') - return 'HttpError accessing <%s>: response: <%s>, content <%s>' % ( - self.url, self.response, content) - - @property - def status_code(self): - """Status code for the response. - - :rtype: integer - :returns: the code - """ - return int(self.response['status']) - - @classmethod - def from_response(cls, http_response): - """Factory: construct an exception from a response. - - :type http_response: :class:`gcloud.streaming.http_wrapper.Response` - :param http_response: the response which returned the error - - :rtype: :class:`HttpError` - """ - return cls(http_response.info, http_response.content, - http_response.request_url) - - -class TransferError(CommunicationError): - """Errors related to transfers.""" - - -class TransferRetryError(TransferError): - """Retryable errors related to transfers.""" - - -class TransferInvalidError(TransferError): - """The given transfer is invalid.""" - - -class RequestError(CommunicationError): - """The request was not successful.""" - - -class RetryAfterError(HttpError): - """The response contained a retry-after header. - - :type response: dict - :param response: headers from the response which returned the error - - :type content: bytes - :param content: payload of the response which returned the error - - :type url: string - :param url: URL of the response which returned the error - - :type retry_after: integer - :param retry_after: seconds to wait before retrying - """ - def __init__(self, response, content, url, retry_after): - super(RetryAfterError, self).__init__(response, content, url) - self.retry_after = int(retry_after) - - @classmethod - def from_response(cls, http_response): - """Factory: construct an exception from a response. - - :type http_response: :class:`gcloud.streaming.http_wrapper.Response` - :param http_response: the response which returned the error - - :rtype: :class:`RetryAfterError` - """ - return cls(http_response.info, http_response.content, - http_response.request_url, http_response.retry_after) - - -class BadStatusCodeError(HttpError): - """The request completed but returned a bad status code.""" diff --git a/gcloud/streaming/http_wrapper.py b/gcloud/streaming/http_wrapper.py deleted file mode 100644 index 17c3b67171db..000000000000 --- a/gcloud/streaming/http_wrapper.py +++ /dev/null @@ -1,445 +0,0 @@ -"""HTTP wrapper for apitools. - -This library wraps the underlying http library we use, which is -currently :mod:`httplib2`. -""" - -import collections -import contextlib -import logging -import socket -import time - -import httplib2 -import six -from six.moves import http_client -from six.moves.urllib import parse - -from gcloud.streaming.exceptions import BadStatusCodeError -from gcloud.streaming.exceptions import RequestError -from gcloud.streaming.exceptions import RetryAfterError -from gcloud.streaming.util import calculate_wait_for_retry - - -# 308 and 429 don't have names in httplib. -RESUME_INCOMPLETE = 308 -TOO_MANY_REQUESTS = 429 - - -_REDIRECT_STATUS_CODES = ( - http_client.MOVED_PERMANENTLY, - http_client.FOUND, - http_client.SEE_OTHER, - http_client.TEMPORARY_REDIRECT, - RESUME_INCOMPLETE, -) - - -_RETRYABLE_EXCEPTIONS = ( - http_client.BadStatusLine, - http_client.IncompleteRead, - http_client.ResponseNotReady, - socket.error, - httplib2.ServerNotFoundError, - ValueError, - RequestError, - BadStatusCodeError, - RetryAfterError, -) - - -class _ExceptionRetryArgs( - collections.namedtuple( - '_ExceptionRetryArgs', - ['http', 'http_request', 'exc', 'num_retries', 'max_retry_wait'])): - """Bundle of information for retriable exceptions. - - :type http: :class:`httplib2.Http` (or conforming alternative) - :param http: instance used to perform requests. - - :type http_request: :class:`Request` - :param http_request: the request whose response was a retriable error - - :type exc: :class:`Exception` subclass - :param exc: the exception being raised. - - :type num_retries: integer - :param num_retries: Number of retries consumed; used for exponential - backoff. - """ - - -@contextlib.contextmanager -def _httplib2_debug_level(http_request, level, http=None): - """Temporarily change the value of httplib2.debuglevel, if necessary. - - If http_request has a `loggable_body` distinct from `body`, then we - need to prevent httplib2 from logging the full body. This sets - httplib2.debuglevel for the duration of the `with` block; however, - that alone won't change the value of existing HTTP connections. If - an httplib2.Http object is provided, we'll also change the level on - any cached connections attached to it. - - :type http_request: :class:`Request` - :param http_request: the request to be logged. - - :type level: integer - :param level: the debuglevel for logging. - - :type http: :class:`httplib2.Http`, or ``None`` - :param http: the instance on whose connections to set the debuglevel. - """ - if http_request.loggable_body is None: - yield - return - old_level = httplib2.debuglevel - http_levels = {} - httplib2.debuglevel = level - if http is not None: - for connection_key, connection in http.connections.items(): - # httplib2 stores two kinds of values in this dict, connection - # classes and instances. Since the connection types are all - # old-style classes, we can't easily distinguish by connection - # type -- so instead we use the key pattern. - if ':' not in connection_key: - continue - http_levels[connection_key] = connection.debuglevel - connection.set_debuglevel(level) - yield - httplib2.debuglevel = old_level - if http is not None: - for connection_key, old_level in http_levels.items(): - http.connections[connection_key].set_debuglevel(old_level) - - -class Request(object): - """Encapsulates the data for an HTTP request. - - :type url: string - :param url: the URL for the request - - :type http_method: string - :param http_method: the HTTP method to use for the request - - :type headers: mapping or None - :param headers: headers to be sent with the request - - :type body: string - :param body: body to be sent with the request - """ - def __init__(self, url='', http_method='GET', headers=None, body=''): - self.url = url - self.http_method = http_method - self.headers = headers or {} - self.__body = None - self.__loggable_body = None - self.body = body - - @property - def loggable_body(self): - """Request body for logging purposes - - :rtype: string - """ - return self.__loggable_body - - @loggable_body.setter - def loggable_body(self, value): - """Update request body for logging purposes - - :type value: string - :param value: updated body - - :raises: :exc:`RequestError` if the request does not have a body. - """ - if self.body is None: - raise RequestError( - 'Cannot set loggable body on request with no body') - self.__loggable_body = value - - @property - def body(self): - """Request body - - :rtype: string - """ - return self.__body - - @body.setter - def body(self, value): - """Update the request body - - Handles logging and length measurement. - - :type value: string - :param value: updated body - """ - self.__body = value - if value is not None: - # Avoid calling len() which cannot exceed 4GiB in 32-bit python. - body_length = getattr( - self.__body, 'length', None) or len(self.__body) - self.headers['content-length'] = str(body_length) - else: - self.headers.pop('content-length', None) - # This line ensures we don't try to print large requests. - if not isinstance(value, (type(None), six.string_types)): - self.loggable_body = '' - - -def _process_content_range(content_range): - """Convert a 'Content-Range' header into a length for the response. - - Helper for :meth:`Response.length`. - - :type content_range: string - :param content_range: the header value being parsed. - - :rtype: integer - :returns: the length of the response chunk. - """ - _, _, range_spec = content_range.partition(' ') - byte_range, _, _ = range_spec.partition('/') - start, _, end = byte_range.partition('-') - return int(end) - int(start) + 1 - - -# Note: currently the order of fields here is important, since we want -# to be able to pass in the result from httplib2.request. -_ResponseTuple = collections.namedtuple( - 'HttpResponse', ['info', 'content', 'request_url']) - - -class Response(_ResponseTuple): - """Encapsulates data for an HTTP response. - """ - __slots__ = () - - def __len__(self): - return self.length - - @property - def length(self): - """Length of this response. - - Exposed as an attribute since using ``len()`` directly can fail - for responses larger than ``sys.maxint``. - - :rtype: integer or long - """ - if 'content-encoding' in self.info and 'content-range' in self.info: - # httplib2 rewrites content-length in the case of a compressed - # transfer; we can't trust the content-length header in that - # case, but we *can* trust content-range, if it's present. - return _process_content_range(self.info['content-range']) - elif 'content-length' in self.info: - return int(self.info.get('content-length')) - elif 'content-range' in self.info: - return _process_content_range(self.info['content-range']) - return len(self.content) - - @property - def status_code(self): - """HTTP status code - - :rtype: integer - """ - return int(self.info['status']) - - @property - def retry_after(self): - """Retry interval (if set). - - :rtype: integer - :returns: interval in seconds - """ - if 'retry-after' in self.info: - return int(self.info['retry-after']) - - @property - def is_redirect(self): - """Does this response contain a redirect - - :rtype: boolean - :returns: True if the status code indicates a redirect and the - 'location' header is present. - """ - return (self.status_code in _REDIRECT_STATUS_CODES and - 'location' in self.info) - - -def _check_response(response): - """Validate a response - - :type response: :class:`Response` - :param response: the response to validate - - :raises: :exc:`gcloud.streaming.exceptions.RequestError` if response is - None, :exc:`gcloud.streaming.exceptions.BadStatusCodeError` if - response status code indicates an error, or - :exc:`gcloud.streaming.exceptions.RetryAfterError` if response - indicates a retry interval. - """ - if response is None: - # Caller shouldn't call us if the response is None, but handle anyway. - raise RequestError( - 'Request did not return a response.') - elif (response.status_code >= 500 or - response.status_code == TOO_MANY_REQUESTS): - raise BadStatusCodeError.from_response(response) - elif response.retry_after: - raise RetryAfterError.from_response(response) - - -def _reset_http_connections(http): - """Rebuild all http connections in the httplib2.Http instance. - - httplib2 overloads the map in http.connections to contain two different - types of values: - { scheme string: connection class } and - { scheme + authority string : actual http connection } - Here we remove all of the entries for actual connections so that on the - next request httplib2 will rebuild them from the connection types. - - :type http: :class:`httplib2.Http` - :param http: the instance whose connections are to be rebuilt - """ - if getattr(http, 'connections', None): - for conn_key in list(http.connections.keys()): - if ':' in conn_key: - del http.connections[conn_key] - - -def _make_api_request_no_retry(http, http_request, redirections=5, - check_response_func=_check_response): - """Send an HTTP request via the given http instance. - - This wrapper exists to handle translation between the plain httplib2 - request/response types and the Request and Response types above. - - :type http: :class:`httplib2.Http` - :param http: an instance which impelements the `Http` API. - - :type http_request: :class:`Request` - :param http_request: the request to send. - - :type redirections: integer - :param redirections: Number of redirects to follow. - - :type check_response_func: function taking (response, content, url). - :param check_response_func: Function to validate the HTTP response. - - :rtype: :class:`Response` - :returns: an object representing the server's response - - :raises: :exc:`gcloud.streaming.exceptions.RequestError` if no response - could be parsed. - """ - connection_type = None - # Handle overrides for connection types. This is used if the caller - # wants control over the underlying connection for managing callbacks - # or hash digestion. - if getattr(http, 'connections', None): - url_scheme = parse.urlsplit(http_request.url).scheme - if url_scheme and url_scheme in http.connections: - connection_type = http.connections[url_scheme] - - # Custom printing only at debuglevel 4 - new_debuglevel = 4 if httplib2.debuglevel == 4 else 0 - with _httplib2_debug_level(http_request, new_debuglevel, http=http): - info, content = http.request( - str(http_request.url), method=str(http_request.http_method), - body=http_request.body, headers=http_request.headers, - redirections=redirections, connection_type=connection_type) - - if info is None: - raise RequestError() - - response = Response(info, content, http_request.url) - check_response_func(response) - return response - - -def make_api_request(http, http_request, - retries=7, - max_retry_wait=60, - redirections=5, - check_response_func=_check_response, - wo_retry_func=_make_api_request_no_retry): - """Send an HTTP request via the given http, performing error/retry handling. - - :type http: :class:`httplib2.Http` - :param http: an instance which impelements the `Http` API. - - :type http_request: :class:`Request` - :param http_request: the request to send. - - :type retries: integer - :param retries: Number of retries to attempt on retryable - responses (such as 429 or 5XX). - - :type max_retry_wait: integer - :param max_retry_wait: Maximum number of seconds to wait when retrying. - - :type redirections: integer - :param redirections: Number of redirects to follow. - - :type check_response_func: function taking (response, content, url). - :param check_response_func: Function to validate the HTTP response. - - :type wo_retry_func: function taking - (http, request, redirections, check_response_func) - :param wo_retry_func: Function to make HTTP request without retries. - - :rtype: :class:`Response` - :returns: an object representing the server's response - - :raises: :exc:`gcloud.streaming.exceptions.RequestError` if no response - could be parsed. - """ - retry = 0 - while True: - try: - return wo_retry_func( - http, http_request, redirections=redirections, - check_response_func=check_response_func) - except _RETRYABLE_EXCEPTIONS as exc: - retry += 1 - if retry >= retries: - raise - retry_after = getattr(exc, 'retry_after', None) - if retry_after is None: - retry_after = calculate_wait_for_retry(retry, max_retry_wait) - - _reset_http_connections(http) - logging.debug('Retrying request to url %s after exception %s', - http_request.url, type(exc).__name__) - time.sleep(retry_after) - - -_HTTP_FACTORIES = [] - - -def _register_http_factory(factory): - """Register a custom HTTP factory. - - :type factory: callable taking keyword arguments, returning an Http - instance (or an instance implementing the same API); - :param factory: the new factory (it may return ``None`` to defer to - a later factory or the default). - """ - _HTTP_FACTORIES.append(factory) - - -def get_http(**kwds): - """Construct an Http instance. - - :param kwds: keyword arguments to pass to factories. - - :rtype: :class:`httplib2.Http` (or a workalike) - """ - for factory in _HTTP_FACTORIES: - http = factory(**kwds) - if http is not None: - return http - return httplib2.Http(**kwds) diff --git a/gcloud/streaming/stream_slice.py b/gcloud/streaming/stream_slice.py deleted file mode 100644 index ada3c66e2169..000000000000 --- a/gcloud/streaming/stream_slice.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Small helper class to provide a small slice of a stream.""" - -from six.moves import http_client - - -class StreamSlice(object): - """Provides a slice-like object for streams. - - :type stream: readable file-like object - :param stream: the stream to be buffered - - :type max_bytes: integer - :param max_bytes: maximum number of bytes to return in the slice - """ - def __init__(self, stream, max_bytes): - self._stream = stream - self._remaining_bytes = max_bytes - self._max_bytes = max_bytes - - def __repr__(self): - return 'Slice of stream %s with %s/%s bytes not yet read' % ( - self._stream, self._remaining_bytes, self._max_bytes) - - def __len__(self): - return self._max_bytes - - def __nonzero__(self): - # For 32-bit python2.x, len() cannot exceed a 32-bit number; avoid - # accidental len() calls from httplib in the form of "if this_object:". - return bool(self._max_bytes) - - @property - def length(self): - """Maximum number of bytes to return in the slice. - - .. note:: - - For 32-bit python2.x, len() cannot exceed a 32-bit number. - - :rtype: integer - """ - return self._max_bytes - - def read(self, size=None): - """Read bytes from the slice. - - Compared to other streams, there is one case where we may - unexpectedly raise an exception on read: if the underlying stream - is exhausted (i.e. returns no bytes on read), and the size of this - slice indicates we should still be able to read more bytes, we - raise :exc:`IncompleteRead`. - - :type size: integer or None - :param size: If provided, read no more than size bytes from the stream. - - :rtype: bytes - :returns: bytes read from this slice. - - :raises: :exc:`IncompleteRead` - """ - if size is not None: - read_size = min(size, self._remaining_bytes) - else: - read_size = self._remaining_bytes - data = self._stream.read(read_size) - if read_size > 0 and not data: - raise http_client.IncompleteRead( - self._max_bytes - self._remaining_bytes, self._max_bytes) - self._remaining_bytes -= len(data) - return data diff --git a/gcloud/streaming/test_buffered_stream.py b/gcloud/streaming/test_buffered_stream.py deleted file mode 100644 index e18a9e6698dd..000000000000 --- a/gcloud/streaming/test_buffered_stream.py +++ /dev/null @@ -1,103 +0,0 @@ -import unittest2 - - -class Test_BufferedStream(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.buffered_stream import BufferedStream - return BufferedStream - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_start_zero_longer_than_buffer(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 0 - BUFSIZE = 4 - stream = BytesIO(CONTENT) - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertTrue(bufstream._stream is stream) - self.assertEqual(bufstream._start_pos, START) - self.assertEqual(bufstream._buffer_pos, 0) - self.assertEqual(bufstream._buffered_data, CONTENT[:BUFSIZE]) - self.assertEqual(len(bufstream), BUFSIZE) - self.assertFalse(bufstream.stream_exhausted) - self.assertEqual(bufstream.stream_end_position, BUFSIZE) - - def test_ctor_start_nonzero_shorter_than_buffer(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 8 - BUFSIZE = 10 - stream = BytesIO(CONTENT) - stream.read(START) # already consumed - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertTrue(bufstream._stream is stream) - self.assertEqual(bufstream._start_pos, START) - self.assertEqual(bufstream._buffer_pos, 0) - self.assertEqual(bufstream._buffered_data, CONTENT[START:]) - self.assertEqual(len(bufstream), len(CONTENT) - START) - self.assertTrue(bufstream.stream_exhausted) - self.assertEqual(bufstream.stream_end_position, len(CONTENT)) - - def test__bytes_remaining_start_zero_longer_than_buffer(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 0 - BUFSIZE = 4 - stream = BytesIO(CONTENT) - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertEqual(bufstream._bytes_remaining, BUFSIZE) - - def test__bytes_remaining_start_zero_shorter_than_buffer(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 8 - BUFSIZE = 10 - stream = BytesIO(CONTENT) - stream.read(START) # already consumed - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertEqual(bufstream._bytes_remaining, len(CONTENT) - START) - - def test_read_w_none(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 0 - BUFSIZE = 4 - stream = BytesIO(CONTENT) - bufstream = self._makeOne(stream, START, BUFSIZE) - with self.assertRaises(ValueError): - bufstream.read(None) - - def test_read_w_negative_size(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 0 - BUFSIZE = 4 - stream = BytesIO(CONTENT) - bufstream = self._makeOne(stream, START, BUFSIZE) - with self.assertRaises(ValueError): - bufstream.read(-2) - - def test_read_from_start(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = 0 - BUFSIZE = 4 - stream = BytesIO(CONTENT) - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertEqual(bufstream.read(4), CONTENT[:4]) - - def test_read_exhausted(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - START = len(CONTENT) - BUFSIZE = 10 - stream = BytesIO(CONTENT) - stream.read(START) # already consumed - bufstream = self._makeOne(stream, START, BUFSIZE) - self.assertTrue(bufstream.stream_exhausted) - self.assertEqual(bufstream.stream_end_position, len(CONTENT)) - self.assertEqual(bufstream._bytes_remaining, 0) - self.assertEqual(bufstream.read(10), b'') diff --git a/gcloud/streaming/test_exceptions.py b/gcloud/streaming/test_exceptions.py deleted file mode 100644 index 4f7ffc35a61e..000000000000 --- a/gcloud/streaming/test_exceptions.py +++ /dev/null @@ -1,87 +0,0 @@ -import unittest2 - - -class Test_HttpError(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.exceptions import HttpError - return HttpError - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - RESPONSE = {'status': '404'} - CONTENT = b'CONTENT' - URL = 'http://www.example.com' - exception = self._makeOne(RESPONSE, CONTENT, URL) - self.assertEqual(exception.response, RESPONSE) - self.assertEqual(exception.content, CONTENT) - self.assertEqual(exception.url, URL) - self.assertEqual(exception.status_code, 404) - self.assertEqual( - str(exception), - "HttpError accessing : " - "response: <{'status': '404'}>, content ") - - def test_from_response(self): - RESPONSE = {'status': '404'} - CONTENT = b'CONTENT' - URL = 'http://www.example.com' - - class _Response(object): - info = RESPONSE - content = CONTENT - request_url = URL - - klass = self._getTargetClass() - exception = klass.from_response(_Response()) - self.assertTrue(isinstance(exception, klass)) - self.assertEqual(exception.response, RESPONSE) - self.assertEqual(exception.content, CONTENT) - self.assertEqual(exception.url, URL) - - -class Test_RetryAfterError(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.exceptions import RetryAfterError - return RetryAfterError - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - RESPONSE = {'status': '404'} - CONTENT = b'CONTENT' - URL = 'http://www.example.com' - RETRY_AFTER = 60 - exception = self._makeOne(RESPONSE, CONTENT, URL, RETRY_AFTER) - self.assertEqual(exception.response, RESPONSE) - self.assertEqual(exception.content, CONTENT) - self.assertEqual(exception.url, URL) - self.assertEqual(exception.retry_after, RETRY_AFTER) - self.assertEqual( - str(exception), - "HttpError accessing : " - "response: <{'status': '404'}>, content ") - - def test_from_response(self): - RESPONSE = {'status': '404'} - CONTENT = b'CONTENT' - URL = 'http://www.example.com' - RETRY_AFTER = 60 - - class _Response(object): - info = RESPONSE - content = CONTENT - request_url = URL - retry_after = RETRY_AFTER - - klass = self._getTargetClass() - exception = klass.from_response(_Response()) - self.assertTrue(isinstance(exception, klass)) - self.assertEqual(exception.response, RESPONSE) - self.assertEqual(exception.content, CONTENT) - self.assertEqual(exception.url, URL) - self.assertEqual(exception.retry_after, RETRY_AFTER) diff --git a/gcloud/streaming/test_http_wrapper.py b/gcloud/streaming/test_http_wrapper.py deleted file mode 100644 index eb9fb02beb10..000000000000 --- a/gcloud/streaming/test_http_wrapper.py +++ /dev/null @@ -1,556 +0,0 @@ -import unittest2 - - -class Test__httplib2_debug_level(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.http_wrapper import _httplib2_debug_level - return _httplib2_debug_level - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_wo_loggable_body_wo_http(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - - request = _Request() - LEVEL = 1 - _httplib2 = _Dummy(debuglevel=0) - with _Monkey(MUT, httplib2=_httplib2): - with self._makeOne(request, LEVEL): - self.assertEqual(_httplib2.debuglevel, 0) - - def test_w_loggable_body_wo_http(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - - request = _Request(loggable_body=object()) - LEVEL = 1 - _httplib2 = _Dummy(debuglevel=0) - with _Monkey(MUT, httplib2=_httplib2): - with self._makeOne(request, LEVEL): - self.assertEqual(_httplib2.debuglevel, LEVEL) - self.assertEqual(_httplib2.debuglevel, 0) - - def test_w_loggable_body_w_http(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - - class _Connection(object): - debuglevel = 0 - - def set_debuglevel(self, value): - self.debuglevel = value - - request = _Request(loggable_body=object()) - LEVEL = 1 - _httplib2 = _Dummy(debuglevel=0) - update_me = _Connection() - skip_me = _Connection() - connections = {'update:me': update_me, 'skip_me': skip_me} - _http = _Dummy(connections=connections) - with _Monkey(MUT, httplib2=_httplib2): - with self._makeOne(request, LEVEL, _http): - self.assertEqual(_httplib2.debuglevel, LEVEL) - self.assertEqual(update_me.debuglevel, LEVEL) - self.assertEqual(skip_me.debuglevel, 0) - self.assertEqual(_httplib2.debuglevel, 0) - self.assertEqual(update_me.debuglevel, 0) - self.assertEqual(skip_me.debuglevel, 0) - - -class Test_Request(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.http_wrapper import Request - return Request - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - request = self._makeOne() - self.assertEqual(request.url, '') - self.assertEqual(request.http_method, 'GET') - self.assertEqual(request.headers, {'content-length': '0'}) - self.assertEqual(request.body, '') - self.assertEqual(request.loggable_body, None) - - def test_loggable_body_setter_w_body_None(self): - from gcloud.streaming.exceptions import RequestError - request = self._makeOne(body=None) - with self.assertRaises(RequestError): - request.loggable_body = 'abc' - - def test_body_setter_w_None(self): - request = self._makeOne() - request.loggable_body = 'abc' - request.body = None - self.assertEqual(request.headers, {}) - self.assertEqual(request.body, None) - self.assertEqual(request.loggable_body, 'abc') - - def test_body_setter_w_non_string(self): - request = self._makeOne() - request.loggable_body = 'abc' - request.body = body = _Dummy(length=123) - self.assertEqual(request.headers, {'content-length': '123'}) - self.assertTrue(request.body is body) - self.assertEqual(request.loggable_body, '') - - -class Test_Response(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.http_wrapper import Response - return Response - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - info = {'status': '200'} - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(len(response), len(CONTENT)) - self.assertEqual(response.status_code, 200) - self.assertEqual(response.retry_after, None) - self.assertFalse(response.is_redirect) - - def test_length_w_content_encoding_w_content_range(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - RANGE = 'bytes 0-122/5678' - info = { - 'status': '200', - 'content-length': len(CONTENT), - 'content-encoding': 'testing', - 'content-range': RANGE, - } - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(len(response), 123) - - def test_length_w_content_encoding_wo_content_range(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - info = { - 'status': '200', - 'content-length': len(CONTENT), - 'content-encoding': 'testing', - } - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(len(response), len(CONTENT)) - - def test_length_w_content_length_w_content_range(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - RANGE = 'bytes 0-12/5678' - info = { - 'status': '200', - 'content-length': len(CONTENT) * 2, - 'content-range': RANGE, - } - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(len(response), len(CONTENT) * 2) - - def test_length_wo_content_length_w_content_range(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - RANGE = 'bytes 0-122/5678' - info = { - 'status': '200', - 'content-range': RANGE, - } - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(len(response), 123) - - def test_retry_after_w_header(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - info = { - 'status': '200', - 'retry-after': '123', - } - response = self._makeOne(info, CONTENT, URL) - self.assertEqual(response.retry_after, 123) - - def test_is_redirect_w_code_wo_location(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - info = { - 'status': '301', - } - response = self._makeOne(info, CONTENT, URL) - self.assertFalse(response.is_redirect) - - def test_is_redirect_w_code_w_location(self): - CONTENT = 'CONTENT' - URL = 'http://example.com/api' - info = { - 'status': '301', - 'location': 'http://example.com/other', - } - response = self._makeOne(info, CONTENT, URL) - self.assertTrue(response.is_redirect) - - -class Test__check_response(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import _check_response - return _check_response(*args, **kw) - - def test_w_none(self): - from gcloud.streaming.exceptions import RequestError - with self.assertRaises(RequestError): - self._callFUT(None) - - def test_w_TOO_MANY_REQUESTS(self): - from gcloud.streaming.exceptions import BadStatusCodeError - from gcloud.streaming.http_wrapper import TOO_MANY_REQUESTS - - with self.assertRaises(BadStatusCodeError): - self._callFUT(_Response(TOO_MANY_REQUESTS)) - - def test_w_50x(self): - from gcloud.streaming.exceptions import BadStatusCodeError - - with self.assertRaises(BadStatusCodeError): - self._callFUT(_Response(500)) - - with self.assertRaises(BadStatusCodeError): - self._callFUT(_Response(503)) - - def test_w_retry_after(self): - from gcloud.streaming.exceptions import RetryAfterError - - with self.assertRaises(RetryAfterError): - self._callFUT(_Response(200, 20)) - - def test_pass(self): - self._callFUT(_Response(200)) - - -class Test__reset_http_connections(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import _reset_http_connections - return _reset_http_connections(*args, **kw) - - def test_wo_connections(self): - http = object() - self._callFUT(http) - - def test_w_connections(self): - connections = {'delete:me': object(), 'skip_me': object()} - http = _Dummy(connections=connections) - self._callFUT(http) - self.assertFalse('delete:me' in connections) - self.assertTrue('skip_me' in connections) - - -class Test___make_api_request_no_retry(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import _make_api_request_no_retry - return _make_api_request_no_retry(*args, **kw) - - def _verify_requested(self, http, request, - redirections=5, connection_type=None): - self.assertEqual(len(http._requested), 1) - url, kw = http._requested[0] - self.assertEqual(url, request.url) - self.assertEqual(kw['method'], request.http_method) - self.assertEqual(kw['body'], request.body) - self.assertEqual(kw['headers'], request.headers) - self.assertEqual(kw['redirections'], redirections) - self.assertEqual(kw['connection_type'], connection_type) - - def test_defaults_wo_connections(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - INFO = {'status': '200'} - CONTENT = 'CONTENT' - _http = _Http((INFO, CONTENT)) - _httplib2 = _Dummy(debuglevel=1) - _request = _Request() - _checked = [] - with _Monkey(MUT, httplib2=_httplib2): - response = self._callFUT(_http, _request, - check_response_func=_checked.append) - self.assertTrue(isinstance(response, MUT.Response)) - self.assertEqual(response.info, INFO) - self.assertEqual(response.content, CONTENT) - self.assertEqual(response.request_url, _request.url) - self.assertEqual(_checked, [response]) - self._verify_requested(_http, _request) - - def test_w_explicit_redirections(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - INFO = {'status': '200'} - CONTENT = 'CONTENT' - _http = _Http((INFO, CONTENT)) - _httplib2 = _Dummy(debuglevel=1) - _request = _Request() - _checked = [] - with _Monkey(MUT, httplib2=_httplib2): - response = self._callFUT(_http, _request, - redirections=10, - check_response_func=_checked.append) - self.assertTrue(isinstance(response, MUT.Response)) - self.assertEqual(response.info, INFO) - self.assertEqual(response.content, CONTENT) - self.assertEqual(response.request_url, _request.url) - self.assertEqual(_checked, [response]) - self._verify_requested(_http, _request, redirections=10) - - def test_w_http_connections_miss(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - INFO = {'status': '200'} - CONTENT = 'CONTENT' - CONN_TYPE = object() - _http = _Http((INFO, CONTENT)) - _http.connections = {'https': CONN_TYPE} - _httplib2 = _Dummy(debuglevel=1) - _request = _Request() - _checked = [] - with _Monkey(MUT, httplib2=_httplib2): - response = self._callFUT(_http, _request, - check_response_func=_checked.append) - self.assertTrue(isinstance(response, MUT.Response)) - self.assertEqual(response.info, INFO) - self.assertEqual(response.content, CONTENT) - self.assertEqual(response.request_url, _request.url) - self.assertEqual(_checked, [response]) - self._verify_requested(_http, _request) - - def test_w_http_connections_hit(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - INFO = {'status': '200'} - CONTENT = 'CONTENT' - CONN_TYPE = object() - _http = _Http((INFO, CONTENT)) - _http.connections = {'http': CONN_TYPE} - _httplib2 = _Dummy(debuglevel=1) - _request = _Request() - _checked = [] - with _Monkey(MUT, httplib2=_httplib2): - response = self._callFUT(_http, _request, - check_response_func=_checked.append) - self.assertTrue(isinstance(response, MUT.Response)) - self.assertEqual(response.info, INFO) - self.assertEqual(response.content, CONTENT) - self.assertEqual(response.request_url, _request.url) - self.assertEqual(_checked, [response]) - self._verify_requested(_http, _request, connection_type=CONN_TYPE) - - def test_w_request_returning_None(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - from gcloud.streaming.exceptions import RequestError - INFO = None - CONTENT = None - CONN_TYPE = object() - _http = _Http((INFO, CONTENT)) - _http.connections = {'http': CONN_TYPE} - _httplib2 = _Dummy(debuglevel=1) - _request = _Request() - with _Monkey(MUT, httplib2=_httplib2): - with self.assertRaises(RequestError): - self._callFUT(_http, _request) - self._verify_requested(_http, _request, connection_type=CONN_TYPE) - - -class Test_make_api_request(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import make_api_request - return make_api_request(*args, **kw) - - def test_wo_exception(self): - HTTP, REQUEST, RESPONSE = object(), object(), object() - _created, _checked = [], [] - - def _wo_exception(*args, **kw): - _created.append((args, kw)) - return RESPONSE - - response = self._callFUT(HTTP, REQUEST, - wo_retry_func=_wo_exception, - check_response_func=_checked.append) - - self.assertTrue(response is RESPONSE) - expected_kw = { - 'redirections': 5, - 'check_response_func': _checked.append, - } - self.assertEqual(_created, [((HTTP, REQUEST), expected_kw)]) - self.assertEqual(_checked, []) # not called by '_wo_exception' - - def test_w_exceptions_lt_max_retries(self): - from gcloud.streaming.exceptions import RetryAfterError - HTTP, RESPONSE = object(), object() - REQUEST = _Request() - WAIT = 10, - _created, _checked = [], [] - _counter = [None] * 4 - - def _wo_exception(*args, **kw): - _created.append((args, kw)) - if _counter: - _counter.pop() - raise RetryAfterError(RESPONSE, '', REQUEST.url, 0.1) - return RESPONSE - - response = self._callFUT(HTTP, REQUEST, - retries=5, - max_retry_wait=WAIT, - wo_retry_func=_wo_exception, - check_response_func=_checked.append) - - self.assertTrue(response is RESPONSE) - self.assertEqual(len(_created), 5) - expected_kw = { - 'redirections': 5, - 'check_response_func': _checked.append, - } - for attempt in _created: - self.assertEqual(attempt, ((HTTP, REQUEST), expected_kw)) - self.assertEqual(_checked, []) # not called by '_wo_exception' - - def test_w_exceptions_gt_max_retries(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - HTTP = object() - REQUEST = _Request() - WAIT = 10, - _created, _checked = [], [] - - def _wo_exception(*args, **kw): - _created.append((args, kw)) - raise ValueError('Retryable') - - with _Monkey(MUT, calculate_wait_for_retry=lambda *ignored: 0.1): - with self.assertRaises(ValueError): - self._callFUT(HTTP, REQUEST, - retries=3, - max_retry_wait=WAIT, - wo_retry_func=_wo_exception, - check_response_func=_checked.append) - - self.assertEqual(len(_created), 3) - expected_kw = { - 'redirections': 5, - 'check_response_func': _checked.append, - } - for attempt in _created: - self.assertEqual(attempt, ((HTTP, REQUEST), expected_kw)) - self.assertEqual(_checked, []) # not called by '_wo_exception' - - -class Test__register_http_factory(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import _register_http_factory - return _register_http_factory(*args, **kw) - - def test_it(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - _factories = [] - - FACTORY = object() - - with _Monkey(MUT, _HTTP_FACTORIES=_factories): - self._callFUT(FACTORY) - self.assertEqual(_factories, [FACTORY]) - - -class Test_get_http(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.http_wrapper import get_http - return get_http(*args, **kw) - - def test_wo_registered_factories(self): - from httplib2 import Http - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - _factories = [] - - with _Monkey(MUT, _HTTP_FACTORIES=_factories): - http = self._callFUT() - - self.assertTrue(isinstance(http, Http)) - - def test_w_registered_factories(self): - from gcloud._testing import _Monkey - from gcloud.streaming import http_wrapper as MUT - - FOUND = object() - - _misses = [] - - def _miss(**kw): - _misses.append(kw) - return None - - _hits = [] - - def _hit(**kw): - _hits.append(kw) - return FOUND - - _factories = [_miss, _hit] - - with _Monkey(MUT, _HTTP_FACTORIES=_factories): - http = self._callFUT(foo='bar') - - self.assertTrue(http is FOUND) - self.assertEqual(_misses, [{'foo': 'bar'}]) - self.assertEqual(_hits, [{'foo': 'bar'}]) - - -class _Dummy(object): - def __init__(self, **kw): - self.__dict__.update(kw) - - -class _Request(object): - __slots__ = ('url', 'http_method', 'body', 'headers', 'loggable_body',) - URL = 'http://example.com/api' - - def __init__(self, url=URL, http_method='GET', body='', - loggable_body=None): - self.url = url - self.http_method = http_method - self.body = body - self.headers = {} - self.loggable_body = loggable_body - - -class _Response(object): - content = '' - request_url = _Request.URL - - def __init__(self, status_code, retry_after=None): - self.info = {'status': status_code} - self.status_code = status_code - self.retry_after = retry_after - - -class _Http(object): - - def __init__(self, *responses): - self._responses = responses - self._requested = [] - - def request(self, url, **kw): - self._requested.append((url, kw)) - response, self._responses = self._responses[0], self._responses[1:] - return response diff --git a/gcloud/streaming/test_stream_slice.py b/gcloud/streaming/test_stream_slice.py deleted file mode 100644 index 52ec591d2e55..000000000000 --- a/gcloud/streaming/test_stream_slice.py +++ /dev/null @@ -1,68 +0,0 @@ -import unittest2 - - -class Test_StreamSlice(unittest2.TestCase): - - def _getTargetClass(self): - from gcloud.streaming.stream_slice import StreamSlice - return StreamSlice - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - MAXSIZE = 4 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - self.assertTrue(stream_slice._stream is stream) - self.assertEqual(stream_slice._remaining_bytes, MAXSIZE) - self.assertEqual(stream_slice._max_bytes, MAXSIZE) - self.assertEqual(len(stream_slice), MAXSIZE) - self.assertEqual(stream_slice.length, MAXSIZE) - - def test___nonzero___empty(self): - from io import BytesIO - CONTENT = b'' - MAXSIZE = 0 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - self.assertFalse(stream_slice) - - def test___nonzero___nonempty(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - MAXSIZE = 4 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - self.assertTrue(stream_slice) - - def test_read_exhausted(self): - from io import BytesIO - from six.moves import http_client - CONTENT = b'' - MAXSIZE = 4 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - with self.assertRaises(http_client.IncompleteRead): - stream_slice.read() - - def test_read_implicit_size(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - MAXSIZE = 4 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - self.assertEqual(stream_slice.read(), CONTENT[:MAXSIZE]) - self.assertEqual(stream_slice._remaining_bytes, 0) - - def test_read_explicit_size(self): - from io import BytesIO - CONTENT = b'CONTENT GOES HERE' - MAXSIZE = 4 - SIZE = 3 - stream = BytesIO(CONTENT) - stream_slice = self._makeOne(stream, MAXSIZE) - self.assertEqual(stream_slice.read(SIZE), CONTENT[:SIZE]) - self.assertEqual(stream_slice._remaining_bytes, MAXSIZE - SIZE) diff --git a/gcloud/streaming/test_transfer.py b/gcloud/streaming/test_transfer.py deleted file mode 100644 index d705822ea330..000000000000 --- a/gcloud/streaming/test_transfer.py +++ /dev/null @@ -1,1889 +0,0 @@ -# pylint: disable=C0302 -import unittest2 - - -class Test__Transfer(unittest2.TestCase): - URL = 'http://example.com/api' - - def _getTargetClass(self): - from gcloud.streaming.transfer import _Transfer - return _Transfer - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - from gcloud.streaming.transfer import _DEFAULT_CHUNKSIZE - stream = _Stream() - xfer = self._makeOne(stream) - self.assertTrue(xfer.stream is stream) - self.assertFalse(xfer.close_stream) - self.assertEqual(xfer.chunksize, _DEFAULT_CHUNKSIZE) - self.assertTrue(xfer.auto_transfer) - self.assertTrue(xfer.bytes_http is None) - self.assertTrue(xfer.http is None) - self.assertEqual(xfer.num_retries, 5) - self.assertTrue(xfer.url is None) - self.assertFalse(xfer.initialized) - - def test_ctor_explicit(self): - stream = _Stream() - HTTP = object() - CHUNK_SIZE = 1 << 18 - NUM_RETRIES = 8 - xfer = self._makeOne(stream, - close_stream=True, - chunksize=CHUNK_SIZE, - auto_transfer=False, - http=HTTP, - num_retries=NUM_RETRIES) - self.assertTrue(xfer.stream is stream) - self.assertTrue(xfer.close_stream) - self.assertEqual(xfer.chunksize, CHUNK_SIZE) - self.assertFalse(xfer.auto_transfer) - self.assertTrue(xfer.bytes_http is HTTP) - self.assertTrue(xfer.http is HTTP) - self.assertEqual(xfer.num_retries, NUM_RETRIES) - - def test_bytes_http_fallback_to_http(self): - stream = _Stream() - HTTP = object() - xfer = self._makeOne(stream, http=HTTP) - self.assertTrue(xfer.bytes_http is HTTP) - - def test_bytes_http_setter(self): - stream = _Stream() - HTTP = object() - BYTES_HTTP = object() - xfer = self._makeOne(stream, http=HTTP) - xfer.bytes_http = BYTES_HTTP - self.assertTrue(xfer.bytes_http is BYTES_HTTP) - - def test_num_retries_setter_invalid(self): - stream = _Stream() - xfer = self._makeOne(stream) - with self.assertRaises(ValueError): - xfer.num_retries = object() - - def test_num_retries_setter_negative(self): - stream = _Stream() - xfer = self._makeOne(stream) - with self.assertRaises(ValueError): - xfer.num_retries = -1 - - def test__initialize_not_already_initialized_w_http(self): - HTTP = object() - stream = _Stream() - xfer = self._makeOne(stream) - xfer._initialize(HTTP, self.URL) - self.assertTrue(xfer.initialized) - self.assertTrue(xfer.http is HTTP) - self.assertTrue(xfer.url is self.URL) - - def test__initialize_not_already_initialized_wo_http(self): - from httplib2 import Http - stream = _Stream() - xfer = self._makeOne(stream) - xfer._initialize(None, self.URL) - self.assertTrue(xfer.initialized) - self.assertTrue(isinstance(xfer.http, Http)) - self.assertTrue(xfer.url is self.URL) - - def test__initialize_w_existing_http(self): - HTTP_1, HTTP_2 = object(), object() - stream = _Stream() - xfer = self._makeOne(stream, http=HTTP_1) - xfer._initialize(HTTP_2, self.URL) - self.assertTrue(xfer.initialized) - self.assertTrue(xfer.http is HTTP_1) - self.assertTrue(xfer.url is self.URL) - - def test__initialize_already_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - URL_2 = 'http://example.com/other' - HTTP_1, HTTP_2 = object(), object() - stream = _Stream() - xfer = self._makeOne(stream) - xfer._initialize(HTTP_1, self.URL) - with self.assertRaises(TransferInvalidError): - xfer._initialize(HTTP_2, URL_2) - - def test__ensure_initialized_hit(self): - HTTP = object() - stream = _Stream() - xfer = self._makeOne(stream) - xfer._initialize(HTTP, self.URL) - xfer._ensure_initialized() # no raise - - def test__ensure_initialized_miss(self): - from gcloud.streaming.exceptions import TransferInvalidError - stream = _Stream() - xfer = self._makeOne(stream) - with self.assertRaises(TransferInvalidError): - xfer._ensure_initialized() - - def test__ensure_uninitialized_hit(self): - stream = _Stream() - xfer = self._makeOne(stream) - xfer._ensure_uninitialized() # no raise - - def test__ensure_uninitialized_miss(self): - from gcloud.streaming.exceptions import TransferInvalidError - stream = _Stream() - HTTP = object() - xfer = self._makeOne(stream) - xfer._initialize(HTTP, self.URL) - with self.assertRaises(TransferInvalidError): - xfer._ensure_uninitialized() - - def test___del___closes_stream(self): - - stream = _Stream() - xfer = self._makeOne(stream, close_stream=True) - - self.assertFalse(stream._closed) - del xfer - self.assertTrue(stream._closed) - - -class Test_Download(unittest2.TestCase): - URL = "http://example.com/api" - - def _getTargetClass(self): - from gcloud.streaming.transfer import Download - return Download - - def _makeOne(self, *args, **kw): - return self._getTargetClass()(*args, **kw) - - def test_ctor_defaults(self): - stream = _Stream() - download = self._makeOne(stream) - self.assertTrue(download.stream is stream) - self.assertTrue(download._initial_response is None) - self.assertEqual(download.progress, 0) - self.assertTrue(download.total_size is None) - self.assertTrue(download.encoding is None) - - def test_ctor_w_kwds(self): - stream = _Stream() - CHUNK_SIZE = 123 - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - self.assertTrue(download.stream is stream) - self.assertEqual(download.chunksize, CHUNK_SIZE) - - def test_ctor_w_total_size(self): - stream = _Stream() - SIZE = 123 - download = self._makeOne(stream, total_size=SIZE) - self.assertTrue(download.stream is stream) - self.assertEqual(download.total_size, SIZE) - - def test_from_file_w_existing_file_no_override(self): - import os - klass = self._getTargetClass() - with _tempdir() as tempdir: - filename = os.path.join(tempdir, 'file.out') - with open(filename, 'w') as fileobj: - fileobj.write('EXISTING FILE') - with self.assertRaises(ValueError): - klass.from_file(filename) - - def test_from_file_w_existing_file_w_override_wo_auto_transfer(self): - import os - klass = self._getTargetClass() - with _tempdir() as tempdir: - filename = os.path.join(tempdir, 'file.out') - with open(filename, 'w') as fileobj: - fileobj.write('EXISTING FILE') - download = klass.from_file(filename, overwrite=True, - auto_transfer=False) - self.assertFalse(download.auto_transfer) - del download # closes stream - with open(filename, 'rb') as fileobj: - self.assertEqual(fileobj.read(), b'') - - def test_from_stream_defaults(self): - stream = _Stream() - klass = self._getTargetClass() - download = klass.from_stream(stream) - self.assertTrue(download.stream is stream) - self.assertTrue(download.auto_transfer) - self.assertTrue(download.total_size is None) - - def test_from_stream_explicit(self): - CHUNK_SIZE = 1 << 18 - SIZE = 123 - stream = _Stream() - klass = self._getTargetClass() - download = klass.from_stream(stream, auto_transfer=False, - total_size=SIZE, chunksize=CHUNK_SIZE) - self.assertTrue(download.stream is stream) - self.assertFalse(download.auto_transfer) - self.assertEqual(download.total_size, SIZE) - self.assertEqual(download.chunksize, CHUNK_SIZE) - - def test_configure_request(self): - CHUNK_SIZE = 100 - download = self._makeOne(_Stream(), chunksize=CHUNK_SIZE) - request = _Dummy(headers={}) - url_builder = _Dummy(query_params={}) - download.configure_request(request, url_builder) - self.assertEqual(request.headers, {'Range': 'bytes=0-99'}) - self.assertEqual(url_builder.query_params, {'alt': 'media'}) - - def test__set_total_wo_content_range_wo_existing_total(self): - info = {} - download = self._makeOne(_Stream()) - download._set_total(info) - self.assertEqual(download.total_size, 0) - - def test__set_total_wo_content_range_w_existing_total(self): - SIZE = 123 - info = {} - download = self._makeOne(_Stream(), total_size=SIZE) - download._set_total(info) - self.assertEqual(download.total_size, SIZE) - - def test__set_total_w_content_range_w_existing_total(self): - SIZE = 123 - info = {'content-range': 'bytes 123-234/4567'} - download = self._makeOne(_Stream(), total_size=SIZE) - download._set_total(info) - self.assertEqual(download.total_size, 4567) - - def test__set_total_w_content_range_w_asterisk_total(self): - info = {'content-range': 'bytes 123-234/*'} - download = self._makeOne(_Stream()) - download._set_total(info) - self.assertEqual(download.total_size, 0) - - def test_initialize_download_already_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - request = _Request() - download = self._makeOne(_Stream()) - download._initialize(None, self.URL) - with self.assertRaises(TransferInvalidError): - download.initialize_download(request, http=object()) - - def test_initialize_download_wo_autotransfer(self): - request = _Request() - http = object() - download = self._makeOne(_Stream(), auto_transfer=False) - download.initialize_download(request, http) - self.assertTrue(download.http is http) - self.assertEqual(download.url, request.url) - - def test_initialize_download_w_autotransfer_failing(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import HttpError - request = _Request() - http = object() - download = self._makeOne(_Stream(), auto_transfer=True) - - response = _makeResponse(http_client.BAD_REQUEST) - requester = _MakeRequest(response) - - with _Monkey(MUT, make_api_request=requester): - with self.assertRaises(HttpError): - download.initialize_download(request, http) - - self.assertTrue(len(requester._requested), 1) - self.assertTrue(requester._requested[0][0] is request) - - def test_initialize_download_w_autotransfer_w_content_location(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - REDIRECT_URL = 'http://example.com/other' - request = _Request() - http = object() - info = {'content-location': REDIRECT_URL} - download = self._makeOne(_Stream(), auto_transfer=True) - - response = _makeResponse(http_client.NO_CONTENT, info) - requester = _MakeRequest(response) - - with _Monkey(MUT, make_api_request=requester): - download.initialize_download(request, http) - - self.assertTrue(download._initial_response is None) - self.assertEqual(download.total_size, 0) - self.assertTrue(download.http is http) - self.assertEqual(download.url, REDIRECT_URL) - self.assertTrue(len(requester._requested), 1) - self.assertTrue(requester._requested[0][0] is request) - - def test__normalize_start_end_w_end_w_start_lt_0(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - - with self.assertRaises(TransferInvalidError): - download._normalize_start_end(-1, 0) - - def test__normalize_start_end_w_end_w_start_gt_total(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - download._set_total({'content-range': 'bytes 0-1/2'}) - - with self.assertRaises(TransferInvalidError): - download._normalize_start_end(3, 0) - - def test__normalize_start_end_w_end_lt_start(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - download._set_total({'content-range': 'bytes 0-1/2'}) - - with self.assertRaises(TransferInvalidError): - download._normalize_start_end(1, 0) - - def test__normalize_start_end_w_end_gt_start(self): - download = self._makeOne(_Stream()) - download._set_total({'content-range': 'bytes 0-1/2'}) - self.assertEqual(download._normalize_start_end(1, 2), (1, 1)) - - def test__normalize_start_end_wo_end_w_start_lt_0(self): - download = self._makeOne(_Stream()) - download._set_total({'content-range': 'bytes 0-1/2'}) - self.assertEqual(download._normalize_start_end(-2), (0, 1)) - self.assertEqual(download._normalize_start_end(-1), (1, 1)) - - def test__normalize_start_end_wo_end_w_start_ge_0(self): - download = self._makeOne(_Stream()) - download._set_total({'content-range': 'bytes 0-1/100'}) - self.assertEqual(download._normalize_start_end(0), (0, 99)) - self.assertEqual(download._normalize_start_end(1), (1, 99)) - - def test__set_range_header_w_start_lt_0(self): - request = _Request() - download = self._makeOne(_Stream()) - download._set_range_header(request, -1) - self.assertEqual(request.headers['range'], 'bytes=-1') - - def test__set_range_header_w_start_ge_0_wo_end(self): - request = _Request() - download = self._makeOne(_Stream()) - download._set_range_header(request, 0) - self.assertEqual(request.headers['range'], 'bytes=0-') - - def test__set_range_header_w_start_ge_0_w_end(self): - request = _Request() - download = self._makeOne(_Stream()) - download._set_range_header(request, 0, 1) - self.assertEqual(request.headers['range'], 'bytes=0-1') - - def test__compute_end_byte_w_start_lt_0_w_end(self): - download = self._makeOne(_Stream()) - self.assertEqual(download._compute_end_byte(-1, 1), 1) - - def test__compute_end_byte_w_start_ge_0_wo_end_w_use_chunks(self): - CHUNK_SIZE = 5 - download = self._makeOne(_Stream(), chunksize=CHUNK_SIZE) - self.assertEqual(download._compute_end_byte(0, use_chunks=True), 4) - - def test__compute_end_byte_w_start_ge_0_w_end_w_use_chunks(self): - CHUNK_SIZE = 5 - download = self._makeOne(_Stream(), chunksize=CHUNK_SIZE) - self.assertEqual(download._compute_end_byte(0, 3, use_chunks=True), 3) - self.assertEqual(download._compute_end_byte(0, 5, use_chunks=True), 4) - - def test__compute_end_byte_w_start_ge_0_w_end_w_total_size(self): - CHUNK_SIZE = 50 - download = self._makeOne(_Stream(), chunksize=CHUNK_SIZE) - download._set_total({'content-range': 'bytes 0-1/10'}) - self.assertEqual(download._compute_end_byte(0, 100, use_chunks=False), - 9) - self.assertEqual(download._compute_end_byte(0, 8, use_chunks=False), 8) - - def test__compute_end_byte_w_start_ge_0_wo_end_w_total_size(self): - CHUNK_SIZE = 50 - download = self._makeOne(_Stream(), chunksize=CHUNK_SIZE) - download._set_total({'content-range': 'bytes 0-1/10'}) - self.assertEqual(download._compute_end_byte(0, use_chunks=False), 9) - - def test__get_chunk_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - - with self.assertRaises(TransferInvalidError): - download._get_chunk(0, 10) - - def test__get_chunk(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - http = object() - download = self._makeOne(_Stream()) - download._initialize(http, self.URL) - response = _makeResponse(http_client.OK) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - found = download._get_chunk(0, 10) - - self.assertTrue(found is response) - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers['range'], 'bytes=0-10') - - def test__process_response_w_FORBIDDEN(self): - from gcloud.streaming.exceptions import HttpError - from six.moves import http_client - download = self._makeOne(_Stream()) - response = _makeResponse(http_client.FORBIDDEN) - with self.assertRaises(HttpError): - download._process_response(response) - - def test__process_response_w_NOT_FOUND(self): - from gcloud.streaming.exceptions import HttpError - from six.moves import http_client - download = self._makeOne(_Stream()) - response = _makeResponse(http_client.NOT_FOUND) - with self.assertRaises(HttpError): - download._process_response(response) - - def test__process_response_w_other_error(self): - from gcloud.streaming.exceptions import TransferRetryError - from six.moves import http_client - download = self._makeOne(_Stream()) - response = _makeResponse(http_client.BAD_REQUEST) - with self.assertRaises(TransferRetryError): - download._process_response(response) - - def test__process_response_w_OK_wo_encoding(self): - from six.moves import http_client - stream = _Stream() - download = self._makeOne(stream) - response = _makeResponse(http_client.OK, content='OK') - found = download._process_response(response) - self.assertTrue(found is response) - self.assertEqual(stream._written, ['OK']) - self.assertEqual(download.progress, 2) - self.assertEqual(download.encoding, None) - - def test__process_response_w_PARTIAL_CONTENT_w_encoding(self): - from six.moves import http_client - stream = _Stream() - download = self._makeOne(stream) - info = {'content-encoding': 'blah'} - response = _makeResponse(http_client.OK, info, 'PARTIAL') - found = download._process_response(response) - self.assertTrue(found is response) - self.assertEqual(stream._written, ['PARTIAL']) - self.assertEqual(download.progress, 7) - self.assertEqual(download.encoding, 'blah') - - def test__process_response_w_REQUESTED_RANGE_NOT_SATISFIABLE(self): - from six.moves import http_client - stream = _Stream() - download = self._makeOne(stream) - response = _makeResponse( - http_client.REQUESTED_RANGE_NOT_SATISFIABLE) - found = download._process_response(response) - self.assertTrue(found is response) - self.assertEqual(stream._written, []) - self.assertEqual(download.progress, 0) - self.assertEqual(download.encoding, None) - - def test__process_response_w_NO_CONTENT(self): - from six.moves import http_client - stream = _Stream() - download = self._makeOne(stream) - response = _makeResponse(status_code=http_client.NO_CONTENT) - found = download._process_response(response) - self.assertTrue(found is response) - self.assertEqual(stream._written, ['']) - self.assertEqual(download.progress, 0) - self.assertEqual(download.encoding, None) - - def test_get_range_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - with self.assertRaises(TransferInvalidError): - download.get_range(0, 10) - - def test_get_range_wo_total_size_complete(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - REQ_RANGE = 'bytes=0-%d' % (LEN,) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN) - http = object() - stream = _Stream() - download = self._makeOne(stream) - download._initialize(http, self.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.get_range(0, LEN) - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, [CONTENT]) - self.assertEqual(download.total_size, LEN) - - def test_get_range_wo_total_size_wo_end(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - START = 5 - CHUNK_SIZE = 123 - REQ_RANGE = 'bytes=%d-%d' % (START, START + CHUNK_SIZE - 1,) - RESP_RANGE = 'bytes %d-%d/%d' % (START, LEN - 1, LEN) - http = object() - stream = _Stream() - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - download._initialize(http, self.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT[START:]) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.get_range(START) - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, [CONTENT[START:]]) - self.assertEqual(download.total_size, LEN) - - def test_get_range_w_total_size_partial(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - PARTIAL_LEN = 5 - REQ_RANGE = 'bytes=0-%d' % (PARTIAL_LEN,) - RESP_RANGE = 'bytes 0-%d/%d' % (PARTIAL_LEN, LEN,) - http = object() - stream = _Stream() - download = self._makeOne(stream, total_size=LEN) - download._initialize(http, self.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT[:PARTIAL_LEN]) - response.length = LEN - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.get_range(0, PARTIAL_LEN) - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, [CONTENT[:PARTIAL_LEN]]) - self.assertEqual(download.total_size, LEN) - - def test_get_range_w_empty_chunk(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import TransferRetryError - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - START = 5 - CHUNK_SIZE = 123 - REQ_RANGE = 'bytes=%d-%d' % (START, START + CHUNK_SIZE - 1,) - RESP_RANGE = 'bytes %d-%d/%d' % (START, LEN - 1, LEN) - http = object() - stream = _Stream() - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - download._initialize(http, self.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - with self.assertRaises(TransferRetryError): - download.get_range(START) - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, ['']) - self.assertEqual(download.total_size, LEN) - - def test_get_range_w_total_size_wo_use_chunks(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - CHUNK_SIZE = 3 - REQ_RANGE = 'bytes=0-%d' % (LEN - 1,) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN,) - http = object() - stream = _Stream() - download = self._makeOne(stream, total_size=LEN, chunksize=CHUNK_SIZE) - download._initialize(http, self.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.get_range(0, use_chunks=False) - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, [CONTENT]) - self.assertEqual(download.total_size, LEN) - - def test_get_range_w_multiple_chunks(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDE' - LEN = len(CONTENT) - CHUNK_SIZE = 3 - REQ_RANGE_1 = 'bytes=0-%d' % (CHUNK_SIZE - 1,) - RESP_RANGE_1 = 'bytes 0-%d/%d' % (CHUNK_SIZE - 1, LEN) - REQ_RANGE_2 = 'bytes=%d-%d' % (CHUNK_SIZE, LEN - 1) - RESP_RANGE_2 = 'bytes %d-%d/%d' % (CHUNK_SIZE, LEN - 1, LEN) - http = object() - stream = _Stream() - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - download._initialize(http, self.URL) - info_1 = {'content-range': RESP_RANGE_1} - response_1 = _makeResponse(http_client.PARTIAL_CONTENT, info_1, - CONTENT[:CHUNK_SIZE]) - info_2 = {'content-range': RESP_RANGE_2} - response_2 = _makeResponse(http_client.OK, info_2, - CONTENT[CHUNK_SIZE:]) - requester = _MakeRequest(response_1, response_2) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.get_range(0) - - self.assertTrue(len(requester._requested), 2) - request_1 = requester._requested[0][0] - self.assertEqual(request_1.headers, {'range': REQ_RANGE_1}) - request_2 = requester._requested[1][0] - self.assertEqual(request_2.headers, {'range': REQ_RANGE_2}) - self.assertEqual(stream._written, [b'ABC', b'DE']) - self.assertEqual(download.total_size, LEN) - - def test_stream_file_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - download = self._makeOne(_Stream()) - - with self.assertRaises(TransferInvalidError): - download.stream_file() - - def test_stream_file_w_initial_response_complete(self): - from six.moves import http_client - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN,) - stream = _Stream() - download = self._makeOne(stream, total_size=LEN) - info = {'content-range': RESP_RANGE} - download._initial_response = _makeResponse( - http_client.OK, info, CONTENT) - http = object() - download._initialize(http, _Request.URL) - - download.stream_file() - - self.assertEqual(stream._written, [CONTENT]) - self.assertEqual(download.total_size, LEN) - - def test_stream_file_w_initial_response_incomplete(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CHUNK_SIZE = 3 - CONTENT = b'ABCDEF' - LEN = len(CONTENT) - RESP_RANGE_1 = 'bytes 0-%d/%d' % (CHUNK_SIZE - 1, LEN,) - REQ_RANGE_2 = 'bytes=%d-%d' % (CHUNK_SIZE, LEN - 1) - RESP_RANGE_2 = 'bytes %d-%d/%d' % (CHUNK_SIZE, LEN - 1, LEN,) - stream = _Stream() - http = object() - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - info_1 = {'content-range': RESP_RANGE_1} - download._initial_response = _makeResponse( - http_client.PARTIAL_CONTENT, info_1, CONTENT[:CHUNK_SIZE]) - info_2 = {'content-range': RESP_RANGE_2} - response_2 = _makeResponse( - http_client.OK, info_2, CONTENT[CHUNK_SIZE:]) - requester = _MakeRequest(response_2) - - download._initialize(http, _Request.URL) - - request = _Request() - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.stream_file() - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE_2}) - self.assertEqual(stream._written, - [CONTENT[:CHUNK_SIZE], CONTENT[CHUNK_SIZE:]]) - self.assertEqual(download.total_size, LEN) - - def test_stream_file_wo_initial_response_wo_total_size(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - CHUNK_SIZE = 123 - REQ_RANGE = 'bytes=0-%d' % (CHUNK_SIZE - 1) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN,) - stream = _Stream() - http = object() - download = self._makeOne(stream, chunksize=CHUNK_SIZE) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT) - requester = _MakeRequest(response) - download._initialize(http, _Request.URL) - - request = _Request() - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - download.stream_file() - - self.assertTrue(len(requester._requested), 1) - request = requester._requested[0][0] - self.assertEqual(request.headers, {'range': REQ_RANGE}) - self.assertEqual(stream._written, [CONTENT]) - self.assertEqual(download.total_size, LEN) - - -class Test_Upload(unittest2.TestCase): - URL = "http://example.com/api" - MIME_TYPE = 'application/octet-stream' - UPLOAD_URL = 'http://example.com/upload/id=foobar' - - def _getTargetClass(self): - from gcloud.streaming.transfer import Upload - return Upload - - def _makeOne(self, stream, mime_type=MIME_TYPE, *args, **kw): - return self._getTargetClass()(stream, mime_type, *args, **kw) - - def test_ctor_defaults(self): - from gcloud.streaming.transfer import _DEFAULT_CHUNKSIZE - stream = _Stream() - upload = self._makeOne(stream) - self.assertTrue(upload.stream is stream) - self.assertTrue(upload._final_response is None) - self.assertTrue(upload._server_chunk_granularity is None) - self.assertFalse(upload.complete) - self.assertEqual(upload.mime_type, self.MIME_TYPE) - self.assertEqual(upload.progress, 0) - self.assertTrue(upload.strategy is None) - self.assertTrue(upload.total_size is None) - self.assertEqual(upload.chunksize, _DEFAULT_CHUNKSIZE) - - def test_ctor_w_kwds(self): - stream = _Stream() - CHUNK_SIZE = 123 - upload = self._makeOne(stream, chunksize=CHUNK_SIZE) - self.assertTrue(upload.stream is stream) - self.assertEqual(upload.mime_type, self.MIME_TYPE) - self.assertEqual(upload.chunksize, CHUNK_SIZE) - - def test_from_file_w_nonesuch_file(self): - klass = self._getTargetClass() - filename = '~nosuchuser/file.txt' - with self.assertRaises(OSError): - klass.from_file(filename) - - def test_from_file_wo_mimetype_w_unguessable_filename(self): - import os - klass = self._getTargetClass() - CONTENT = b'EXISTING FILE W/ UNGUESSABLE MIMETYPE' - with _tempdir() as tempdir: - filename = os.path.join(tempdir, 'file.unguessable') - with open(filename, 'wb') as fileobj: - fileobj.write(CONTENT) - with self.assertRaises(ValueError): - klass.from_file(filename) - - def test_from_file_wo_mimetype_w_guessable_filename(self): - import os - klass = self._getTargetClass() - CONTENT = b'EXISTING FILE W/ GUESSABLE MIMETYPE' - with _tempdir() as tempdir: - filename = os.path.join(tempdir, 'file.txt') - with open(filename, 'wb') as fileobj: - fileobj.write(CONTENT) - upload = klass.from_file(filename) - self.assertEqual(upload.mime_type, 'text/plain') - self.assertTrue(upload.auto_transfer) - self.assertEqual(upload.total_size, len(CONTENT)) - upload._stream.close() - - def test_from_file_w_mimetype_w_auto_transfer_w_kwds(self): - import os - klass = self._getTargetClass() - CONTENT = b'EXISTING FILE W/ GUESSABLE MIMETYPE' - CHUNK_SIZE = 3 - with _tempdir() as tempdir: - filename = os.path.join(tempdir, 'file.unguessable') - with open(filename, 'wb') as fileobj: - fileobj.write(CONTENT) - upload = klass.from_file( - filename, - mime_type=self.MIME_TYPE, - auto_transfer=False, - chunksize=CHUNK_SIZE) - self.assertEqual(upload.mime_type, self.MIME_TYPE) - self.assertFalse(upload.auto_transfer) - self.assertEqual(upload.total_size, len(CONTENT)) - self.assertEqual(upload.chunksize, CHUNK_SIZE) - upload._stream.close() - - def test_from_stream_wo_mimetype(self): - klass = self._getTargetClass() - stream = _Stream() - with self.assertRaises(ValueError): - klass.from_stream(stream, mime_type=None) - - def test_from_stream_defaults(self): - klass = self._getTargetClass() - stream = _Stream() - upload = klass.from_stream(stream, mime_type=self.MIME_TYPE) - self.assertEqual(upload.mime_type, self.MIME_TYPE) - self.assertTrue(upload.auto_transfer) - self.assertEqual(upload.total_size, None) - - def test_from_stream_explicit(self): - klass = self._getTargetClass() - stream = _Stream() - SIZE = 10 - CHUNK_SIZE = 3 - upload = klass.from_stream( - stream, - mime_type=self.MIME_TYPE, - auto_transfer=False, - total_size=SIZE, - chunksize=CHUNK_SIZE) - self.assertEqual(upload.mime_type, self.MIME_TYPE) - self.assertFalse(upload.auto_transfer) - self.assertEqual(upload.total_size, SIZE) - self.assertEqual(upload.chunksize, CHUNK_SIZE) - - def test_strategy_setter_invalid(self): - upload = self._makeOne(_Stream()) - with self.assertRaises(ValueError): - upload.strategy = object() - with self.assertRaises(ValueError): - upload.strategy = 'unknown' - - def test_strategy_setter_SIMPLE_UPLOAD(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - upload = self._makeOne(_Stream()) - upload.strategy = SIMPLE_UPLOAD - self.assertEqual(upload.strategy, SIMPLE_UPLOAD) - - def test_strategy_setter_RESUMABLE_UPLOAD(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - upload = self._makeOne(_Stream()) - upload.strategy = RESUMABLE_UPLOAD - self.assertEqual(upload.strategy, RESUMABLE_UPLOAD) - - def test_total_size_setter_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - SIZE = 123 - upload = self._makeOne(_Stream) - http = object() - upload._initialize(http, _Request.URL) - with self.assertRaises(TransferInvalidError): - upload.total_size = SIZE - - def test_total_size_setter_not_initialized(self): - SIZE = 123 - upload = self._makeOne(_Stream) - upload.total_size = SIZE - self.assertEqual(upload.total_size, SIZE) - - def test__set_default_strategy_w_existing_strategy(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - config = _Dummy( - resumable_path='/resumable/endpoint', - simple_multipart=True, - simple_path='/upload/endpoint', - ) - request = _Request() - upload = self._makeOne(_Stream) - upload.strategy = RESUMABLE_UPLOAD - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, RESUMABLE_UPLOAD) - - def test__set_default_strategy_wo_resumable_path(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - config = _Dummy( - resumable_path=None, - simple_multipart=True, - simple_path='/upload/endpoint', - ) - request = _Request() - upload = self._makeOne(_Stream()) - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, SIMPLE_UPLOAD) - - def test__set_default_strategy_w_total_size_gt_threshhold(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD_THRESHOLD - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - config = _UploadConfig() - request = _Request() - upload = self._makeOne( - _Stream(), total_size=RESUMABLE_UPLOAD_THRESHOLD + 1) - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, RESUMABLE_UPLOAD) - - def test__set_default_strategy_w_body_wo_multipart(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - config = _UploadConfig() - config.simple_multipart = False - request = _Request(body=CONTENT) - upload = self._makeOne(_Stream(), total_size=len(CONTENT)) - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, RESUMABLE_UPLOAD) - - def test__set_default_strategy_w_body_w_multipart_wo_simple_path(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - config = _UploadConfig() - config.simple_path = None - request = _Request(body=CONTENT) - upload = self._makeOne(_Stream(), total_size=len(CONTENT)) - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, RESUMABLE_UPLOAD) - - def test__set_default_strategy_w_body_w_multipart_w_simple_path(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - config = _UploadConfig() - request = _Request(body=CONTENT) - upload = self._makeOne(_Stream(), total_size=len(CONTENT)) - upload._set_default_strategy(config, request) - self.assertEqual(upload.strategy, SIMPLE_UPLOAD) - - def test_configure_request_w_total_size_gt_max_size(self): - MAX_SIZE = 1000 - config = _UploadConfig() - config.max_size = MAX_SIZE - request = _Request() - url_builder = _Dummy() - upload = self._makeOne(_Stream(), total_size=MAX_SIZE + 1) - with self.assertRaises(ValueError): - upload.configure_request(config, request, url_builder) - - def test_configure_request_w_invalid_mimetype(self): - config = _UploadConfig() - config.accept = ('text/*',) - request = _Request() - url_builder = _Dummy() - upload = self._makeOne(_Stream()) - with self.assertRaises(ValueError): - upload.configure_request(config, request, url_builder) - - def test_configure_request_w_simple_wo_body(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - CONTENT = b'CONTENT' - config = _UploadConfig() - request = _Request() - url_builder = _Dummy(query_params={}) - upload = self._makeOne(_Stream(CONTENT)) - upload.strategy = SIMPLE_UPLOAD - - upload.configure_request(config, request, url_builder) - - self.assertEqual(url_builder.query_params, {'uploadType': 'media'}) - self.assertEqual(url_builder.relative_path, config.simple_path) - - self.assertEqual(request.headers, {'content-type': self.MIME_TYPE}) - self.assertEqual(request.body, CONTENT) - self.assertEqual(request.loggable_body, '') - - def test_configure_request_w_simple_w_body(self): - from email.parser import Parser - from gcloud.streaming.transfer import SIMPLE_UPLOAD - CONTENT = b'CONTENT' - BODY = b'BODY' - config = _UploadConfig() - request = _Request(body=BODY) - request.headers['content-type'] = 'text/plain' - url_builder = _Dummy(query_params={}) - upload = self._makeOne(_Stream(CONTENT)) - upload.strategy = SIMPLE_UPLOAD - - upload.configure_request(config, request, url_builder) - - self.assertEqual(url_builder.query_params, {'uploadType': 'multipart'}) - self.assertEqual(url_builder.relative_path, config.simple_path) - - parser = Parser() - self.assertEqual(list(request.headers), ['content-type']) - ctype, boundary = [x.strip() - for x in request.headers['content-type'].split(';')] - self.assertEqual(ctype, 'multipart/related') - self.assertTrue(boundary.startswith('boundary="==')) - self.assertTrue(boundary.endswith('=="')) - - divider = '--' + boundary[len('boundary="'):-1] - chunks = request.body.split(divider)[1:-1] # discard prolog / epilog - self.assertEqual(len(chunks), 2) - - text_msg = parser.parsestr(chunks[0].strip()) - self.assertEqual(dict(text_msg._headers), - {'Content-Type': 'text/plain', - 'MIME-Version': '1.0'}) - self.assertEqual(text_msg._payload, BODY.decode('ascii')) - - app_msg = parser.parsestr(chunks[1].strip()) - self.assertEqual(dict(app_msg._headers), - {'Content-Type': self.MIME_TYPE, - 'Content-Transfer-Encoding': 'binary', - 'MIME-Version': '1.0'}) - self.assertEqual(app_msg._payload, CONTENT.decode('ascii')) - self.assertTrue('' in request.loggable_body) - - def test_configure_request_w_resumable_wo_total_size(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'CONTENT' - config = _UploadConfig() - request = _Request() - url_builder = _Dummy(query_params={}) - upload = self._makeOne(_Stream(CONTENT)) - upload.strategy = RESUMABLE_UPLOAD - - upload.configure_request(config, request, url_builder) - - self.assertEqual(url_builder.query_params, {'uploadType': 'resumable'}) - self.assertEqual(url_builder.relative_path, config.resumable_path) - - self.assertEqual(request.headers, - {'X-Upload-Content-Type': self.MIME_TYPE}) - - def test_configure_request_w_resumable_w_total_size(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'CONTENT' - LEN = len(CONTENT) - config = _UploadConfig() - request = _Request() - url_builder = _Dummy(query_params={}) - upload = self._makeOne(_Stream(CONTENT)) - upload.total_size = LEN - upload.strategy = RESUMABLE_UPLOAD - - upload.configure_request(config, request, url_builder) - - self.assertEqual(url_builder.query_params, {'uploadType': 'resumable'}) - self.assertEqual(url_builder.relative_path, config.resumable_path) - - self.assertEqual(request.headers, - {'X-Upload-Content-Type': self.MIME_TYPE, - 'X-Upload-Content-Length': '%d' % (LEN,)}) - - def test_refresh_upload_state_w_simple_strategy(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - upload = self._makeOne(_Stream()) - upload.strategy = SIMPLE_UPLOAD - upload.refresh_upload_state() # no-op - - def test_refresh_upload_state_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - upload = self._makeOne(_Stream()) - upload.strategy = RESUMABLE_UPLOAD - with self.assertRaises(TransferInvalidError): - upload.refresh_upload_state() - - def test_refresh_upload_state_w_OK(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN,) - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=LEN) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, _Request.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.OK, info, CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - upload.refresh_upload_state() - - self.assertTrue(upload.complete) - self.assertEqual(upload.progress, LEN) - self.assertEqual(stream.tell(), LEN) - self.assertTrue(upload._final_response is response) - - def test_refresh_upload_state_w_CREATED(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - RESP_RANGE = 'bytes 0-%d/%d' % (LEN - 1, LEN,) - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=LEN) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, _Request.URL) - info = {'content-range': RESP_RANGE} - response = _makeResponse(http_client.CREATED, info, CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - upload.refresh_upload_state() - - self.assertTrue(upload.complete) - self.assertEqual(upload.progress, LEN) - self.assertEqual(stream.tell(), LEN) - self.assertTrue(upload._final_response is response) - - def test_refresh_upload_state_w_RESUME_INCOMPLETE_w_range(self): - from gcloud.streaming import transfer as MUT - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - from gcloud._testing import _Monkey - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - LAST = 5 - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=LEN) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, _Request.URL) - info = {'range': '0-%d' % (LAST - 1,)} - response = _makeResponse(RESUME_INCOMPLETE, info, CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - upload.refresh_upload_state() - - self.assertFalse(upload.complete) - self.assertEqual(upload.progress, LAST) - self.assertEqual(stream.tell(), LAST) - self.assertFalse(upload._final_response is response) - - def test_refresh_upload_state_w_RESUME_INCOMPLETE_wo_range(self): - from gcloud.streaming import transfer as MUT - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - from gcloud._testing import _Monkey - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=LEN) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, _Request.URL) - response = _makeResponse(RESUME_INCOMPLETE, content=CONTENT) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - upload.refresh_upload_state() - - self.assertFalse(upload.complete) - self.assertEqual(upload.progress, 0) - self.assertEqual(stream.tell(), 0) - self.assertFalse(upload._final_response is response) - - def test_refresh_upload_state_w_error(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import HttpError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - LEN = len(CONTENT) - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=LEN) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, _Request.URL) - response = _makeResponse(http_client.FORBIDDEN) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - with self.assertRaises(HttpError): - upload.refresh_upload_state() - - def test__get_range_header_miss(self): - upload = self._makeOne(_Stream()) - response = _makeResponse(None) - self.assertTrue(upload._get_range_header(response) is None) - - def test__get_range_header_w_Range(self): - upload = self._makeOne(_Stream()) - response = _makeResponse(None, {'Range': '123'}) - self.assertEqual(upload._get_range_header(response), '123') - - def test__get_range_header_w_range(self): - upload = self._makeOne(_Stream()) - response = _makeResponse(None, {'range': '123'}) - self.assertEqual(upload._get_range_header(response), '123') - - def test_initialize_upload_no_strategy(self): - request = _Request() - upload = self._makeOne(_Stream()) - with self.assertRaises(ValueError): - upload.initialize_upload(request, http=object()) - - def test_initialize_upload_simple_w_http(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - request = _Request() - upload = self._makeOne(_Stream()) - upload.strategy = SIMPLE_UPLOAD - upload.initialize_upload(request, http=object()) # no-op - - def test_initialize_upload_resumable_already_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - request = _Request() - upload = self._makeOne(_Stream()) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(None, self.URL) - with self.assertRaises(TransferInvalidError): - upload.initialize_upload(request, http=object()) - - def test_initialize_upload_w_http_resumable_not_initialized_w_error(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import HttpError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - request = _Request() - upload = self._makeOne(_Stream()) - upload.strategy = RESUMABLE_UPLOAD - response = _makeResponse(http_client.FORBIDDEN) - requester = _MakeRequest(response) - - with _Monkey(MUT, make_api_request=requester): - with self.assertRaises(HttpError): - upload.initialize_upload(request, http=object()) - - def test_initialize_upload_w_http_wo_auto_transfer_w_OK(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - request = _Request() - upload = self._makeOne(_Stream(), auto_transfer=False) - upload.strategy = RESUMABLE_UPLOAD - info = {'location': self.UPLOAD_URL} - response = _makeResponse(http_client.OK, info) - requester = _MakeRequest(response) - - with _Monkey(MUT, make_api_request=requester): - upload.initialize_upload(request, http=object()) - - self.assertEqual(upload._server_chunk_granularity, None) - self.assertEqual(upload.url, self.UPLOAD_URL) - self.assertEqual(requester._responses, []) - self.assertEqual(len(requester._requested), 1) - self.assertTrue(requester._requested[0][0] is request) - - def test_initialize_upload_w_granularity_w_auto_transfer_w_OK(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - http = object() - request = _Request() - upload = self._makeOne(_Stream(CONTENT), chunksize=1000) - upload.strategy = RESUMABLE_UPLOAD - info = {'X-Goog-Upload-Chunk-Granularity': '100', - 'location': self.UPLOAD_URL} - response = _makeResponse(http_client.OK, info) - chunk_response = _makeResponse(http_client.OK) - requester = _MakeRequest(response, chunk_response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - upload.initialize_upload(request, http) - - self.assertEqual(upload._server_chunk_granularity, 100) - self.assertEqual(upload.url, self.UPLOAD_URL) - self.assertEqual(requester._responses, []) - self.assertEqual(len(requester._requested), 2) - self.assertTrue(requester._requested[0][0] is request) - chunk_request = requester._requested[1][0] - self.assertTrue(isinstance(chunk_request, _Request)) - self.assertEqual(chunk_request.url, self.UPLOAD_URL) - self.assertEqual(chunk_request.http_method, 'PUT') - self.assertEqual(chunk_request.body, CONTENT) - - def test__last_byte(self): - upload = self._makeOne(_Stream()) - self.assertEqual(upload._last_byte('123-456'), 456) - - def test__validate_chunksize_wo__server_chunk_granularity(self): - upload = self._makeOne(_Stream()) - upload._validate_chunksize(123) # no-op - - def test__validate_chunksize_w__server_chunk_granularity_miss(self): - upload = self._makeOne(_Stream()) - upload._server_chunk_granularity = 100 - with self.assertRaises(ValueError): - upload._validate_chunksize(123) - - def test__validate_chunksize_w__server_chunk_granularity_hit(self): - upload = self._makeOne(_Stream()) - upload._server_chunk_granularity = 100 - upload._validate_chunksize(400) - - def test_stream_file_w_simple_strategy(self): - from gcloud.streaming.transfer import SIMPLE_UPLOAD - upload = self._makeOne(_Stream()) - upload.strategy = SIMPLE_UPLOAD - with self.assertRaises(ValueError): - upload.stream_file() - - def test_stream_file_w_use_chunks_invalid_chunk_size(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - upload = self._makeOne(_Stream(), chunksize=1024) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 100 - with self.assertRaises(ValueError): - upload.stream_file(use_chunks=True) - - def test_stream_file_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - upload = self._makeOne(_Stream(), chunksize=1024) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 128 - with self.assertRaises(TransferInvalidError): - upload.stream_file() - - def test_stream_file_already_complete_w_unseekable_stream(self): - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - http = object() - stream = object() - response = object() - upload = self._makeOne(stream, chunksize=1024) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 128 - upload._initialize(http, _Request.URL) - upload._final_response = response - upload._complete = True - self.assertTrue(upload.stream_file() is response) - - def test_stream_file_already_complete_w_seekable_stream_unsynced(self): - from gcloud.streaming.exceptions import CommunicationError - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - http = object() - stream = _Stream(CONTENT) - response = object() - upload = self._makeOne(stream, chunksize=1024) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 128 - upload._initialize(http, _Request.URL) - upload._final_response = response - upload._complete = True - with self.assertRaises(CommunicationError): - upload.stream_file() - - def test_stream_file_already_complete_w_seekable_stream_synced(self): - import os - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - http = object() - stream = _Stream(CONTENT) - stream.seek(0, os.SEEK_END) - response = object() - upload = self._makeOne(stream, chunksize=1024) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 128 - upload._initialize(http, _Request.URL) - upload._final_response = response - upload._complete = True - self.assertTrue(upload.stream_file(use_chunks=False) is response) - - def test_stream_file_incomplete(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream, chunksize=6) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 6 - upload._initialize(http, self.UPLOAD_URL) - - info_1 = {'content-length': '0', 'range': 'bytes=0-5'} - response_1 = _makeResponse(RESUME_INCOMPLETE, info_1) - info_2 = {'content-length': '0', 'range': 'bytes=6-9'} - response_2 = _makeResponse(http_client.OK, info_2) - requester = _MakeRequest(response_1, response_2) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - response = upload.stream_file() - - self.assertTrue(response is response_2) - self.assertEqual(len(requester._responses), 0) - self.assertEqual(len(requester._requested), 2) - - request_1 = requester._requested[0][0] - self.assertEqual(request_1.url, self.UPLOAD_URL) - self.assertEqual(request_1.http_method, 'PUT') - self.assertEqual(request_1.headers, - {'Content-Range': 'bytes 0-5/*', - 'Content-Type': self.MIME_TYPE}) - self.assertEqual(request_1.body, CONTENT[:6]) - - request_2 = requester._requested[1][0] - self.assertEqual(request_2.url, self.UPLOAD_URL) - self.assertEqual(request_2.http_method, 'PUT') - self.assertEqual(request_2.headers, - {'Content-Range': 'bytes 6-9/10', - 'Content-Type': self.MIME_TYPE}) - self.assertEqual(request_2.body, CONTENT[6:]) - - def test_stream_file_incomplete_w_transfer_error(self): - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import CommunicationError - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream, chunksize=6) - upload.strategy = RESUMABLE_UPLOAD - upload._server_chunk_granularity = 6 - upload._initialize(http, self.UPLOAD_URL) - - info = { - 'content-length': '0', - 'range': 'bytes=0-4', # simulate error, s.b. '0-5' - } - response = _makeResponse(RESUME_INCOMPLETE, info) - requester = _MakeRequest(response) - - with _Monkey(MUT, - Request=_Request, - make_api_request=requester): - with self.assertRaises(CommunicationError): - upload.stream_file() - - self.assertEqual(len(requester._responses), 0) - self.assertEqual(len(requester._requested), 1) - - request = requester._requested[0][0] - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - self.assertEqual(request.headers, - {'Content-Range': 'bytes 0-5/*', - 'Content-Type': self.MIME_TYPE}) - self.assertEqual(request.body, CONTENT[:6]) - - def test__send_media_request_wo_error(self): - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - CONTENT = b'ABCDEFGHIJ' - bytes_http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream) - upload.bytes_http = bytes_http - - headers = {'Content-Range': 'bytes 0-9/10', - 'Content-Type': self.MIME_TYPE} - request = _Request(self.UPLOAD_URL, 'PUT', CONTENT, headers) - info = {'content-length': '0', 'range': 'bytes=0-4'} - response = _makeResponse(RESUME_INCOMPLETE, info) - requester = _MakeRequest(response) - - with _Monkey(MUT, make_api_request=requester): - upload._send_media_request(request, 9) - - self.assertEqual(len(requester._responses), 0) - self.assertEqual(len(requester._requested), 1) - used_request, used_http, _ = requester._requested[0] - self.assertTrue(used_request is request) - self.assertTrue(used_http is bytes_http) - self.assertEqual(stream.tell(), 4) - - def test__send_media_request_w_error(self): - from six.moves import http_client - from gcloud._testing import _Monkey - from gcloud.streaming import transfer as MUT - from gcloud.streaming.exceptions import HttpError - from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE - from gcloud.streaming.transfer import RESUMABLE_UPLOAD - CONTENT = b'ABCDEFGHIJ' - bytes_http = object() - http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream) - upload.strategy = RESUMABLE_UPLOAD - upload._initialize(http, self.UPLOAD_URL) - upload.bytes_http = bytes_http - - headers = {'Content-Range': 'bytes 0-9/10', - 'Content-Type': self.MIME_TYPE} - request = _Request(self.UPLOAD_URL, 'PUT', CONTENT, headers) - info_1 = {'content-length': '0', 'range': 'bytes=0-4'} - response_1 = _makeResponse(http_client.FORBIDDEN, info_1) - info_2 = {'Content-Length': '0', 'Range': 'bytes=0-4'} - response_2 = _makeResponse(RESUME_INCOMPLETE, info_2) - requester = _MakeRequest(response_1, response_2) - - with _Monkey(MUT, Request=_Request, make_api_request=requester): - with self.assertRaises(HttpError): - upload._send_media_request(request, 9) - - self.assertEqual(len(requester._responses), 0) - self.assertEqual(len(requester._requested), 2) - first_request, first_http, _ = requester._requested[0] - self.assertTrue(first_request is request) - self.assertTrue(first_http is bytes_http) - second_request, second_http, _ = requester._requested[1] - self.assertEqual(second_request.url, self.UPLOAD_URL) - self.assertEqual(second_request.http_method, 'PUT') # ACK! - self.assertEqual(second_request.headers, - {'Content-Range': 'bytes */*'}) - self.assertTrue(second_http is http) - - def test__send_media_body_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - upload = self._makeOne(_Stream()) - with self.assertRaises(TransferInvalidError): - upload._send_media_body(0) - - def test__send_media_body_wo_total_size(self): - from gcloud.streaming.exceptions import TransferInvalidError - http = object() - upload = self._makeOne(_Stream()) - upload._initialize(http, _Request.URL) - with self.assertRaises(TransferInvalidError): - upload._send_media_body(0) - - def test__send_media_body_start_lt_total_size(self): - from gcloud.streaming.stream_slice import StreamSlice - SIZE = 1234 - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=SIZE) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - - found = upload._send_media_body(0) - - self.assertTrue(found is response) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - body_stream = request.body - self.assertTrue(isinstance(body_stream, StreamSlice)) - self.assertTrue(body_stream._stream is stream) - self.assertEqual(len(body_stream), SIZE) - self.assertEqual(request.headers, - {'content-length': '%d' % (SIZE,), # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes 0-%d/%d' % (SIZE - 1, SIZE)}) - self.assertEqual(end, SIZE) - - def test__send_media_body_start_eq_total_size(self): - from gcloud.streaming.stream_slice import StreamSlice - SIZE = 1234 - http = object() - stream = _Stream() - upload = self._makeOne(stream, total_size=SIZE) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - - found = upload._send_media_body(SIZE) - - self.assertTrue(found is response) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - body_stream = request.body - self.assertTrue(isinstance(body_stream, StreamSlice)) - self.assertTrue(body_stream._stream is stream) - self.assertEqual(len(body_stream), 0) - self.assertEqual(request.headers, - {'content-length': '0', # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes */%d' % (SIZE,)}) - self.assertEqual(end, SIZE) - - def test__send_chunk_not_initialized(self): - from gcloud.streaming.exceptions import TransferInvalidError - upload = self._makeOne(_Stream()) - with self.assertRaises(TransferInvalidError): - upload._send_chunk(0) - - def test__send_chunk_wo_total_size_stream_exhausted(self): - CONTENT = b'ABCDEFGHIJ' - SIZE = len(CONTENT) - http = object() - upload = self._makeOne(_Stream(CONTENT), chunksize=1000) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - self.assertEqual(upload.total_size, None) - - found = upload._send_chunk(0) - - self.assertTrue(found is response) - self.assertEqual(upload.total_size, SIZE) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - self.assertEqual(request.body, CONTENT) - self.assertEqual(request.headers, - {'content-length': '%d' % SIZE, # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes 0-%d/%d' % (SIZE - 1, SIZE)}) - self.assertEqual(end, SIZE) - - def test__send_chunk_wo_total_size_stream_not_exhausted(self): - CONTENT = b'ABCDEFGHIJ' - SIZE = len(CONTENT) - CHUNK_SIZE = SIZE - 5 - http = object() - upload = self._makeOne(_Stream(CONTENT), chunksize=CHUNK_SIZE) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - self.assertEqual(upload.total_size, None) - - found = upload._send_chunk(0) - - self.assertTrue(found is response) - self.assertEqual(upload.total_size, None) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - self.assertEqual(request.body, CONTENT[:CHUNK_SIZE]) - expected_headers = { - 'content-length': '%d' % CHUNK_SIZE, # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes 0-%d/*' % (CHUNK_SIZE - 1,), - } - self.assertEqual(request.headers, expected_headers) - self.assertEqual(end, CHUNK_SIZE) - - def test__send_chunk_w_total_size_stream_not_exhausted(self): - from gcloud.streaming.stream_slice import StreamSlice - CONTENT = b'ABCDEFGHIJ' - SIZE = len(CONTENT) - CHUNK_SIZE = SIZE - 5 - http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream, total_size=SIZE, chunksize=CHUNK_SIZE) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - - found = upload._send_chunk(0) - - self.assertTrue(found is response) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - body_stream = request.body - self.assertTrue(isinstance(body_stream, StreamSlice)) - self.assertTrue(body_stream._stream is stream) - self.assertEqual(len(body_stream), CHUNK_SIZE) - expected_headers = { - 'content-length': '%d' % CHUNK_SIZE, # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes 0-%d/%d' % (CHUNK_SIZE - 1, SIZE), - } - self.assertEqual(request.headers, expected_headers) - self.assertEqual(end, CHUNK_SIZE) - - def test__send_chunk_w_total_size_stream_exhausted(self): - from gcloud.streaming.stream_slice import StreamSlice - CONTENT = b'ABCDEFGHIJ' - SIZE = len(CONTENT) - CHUNK_SIZE = 1000 - http = object() - stream = _Stream(CONTENT) - upload = self._makeOne(stream, total_size=SIZE, chunksize=CHUNK_SIZE) - upload._initialize(http, self.UPLOAD_URL) - response = object() - streamer = _MediaStreamer(response) - upload._send_media_request = streamer - - found = upload._send_chunk(SIZE) - - self.assertTrue(found is response) - request, end = streamer._called_with - self.assertEqual(request.url, self.UPLOAD_URL) - self.assertEqual(request.http_method, 'PUT') - body_stream = request.body - self.assertTrue(isinstance(body_stream, StreamSlice)) - self.assertTrue(body_stream._stream is stream) - self.assertEqual(len(body_stream), 0) - self.assertEqual(request.headers, - {'content-length': '0', # speling! - 'Content-Type': self.MIME_TYPE, - 'Content-Range': 'bytes */%d' % (SIZE,)}) - self.assertEqual(end, SIZE) - - -class _Dummy(object): - def __init__(self, **kw): - self.__dict__.update(kw) - - -class _UploadConfig(object): - accept = ('*/*',) - max_size = None - resumable_path = '/resumable/endpoint' - simple_multipart = True - simple_path = '/upload/endpoint' - - -class _Stream(object): - _closed = False - - def __init__(self, to_read=b''): - import io - self._written = [] - self._to_read = io.BytesIO(to_read) - - def write(self, to_write): - self._written.append(to_write) - - def seek(self, offset, whence=0): - self._to_read.seek(offset, whence) - - def read(self, size=None): - if size is not None: - return self._to_read.read(size) - return self._to_read.read() - - def tell(self): - return self._to_read.tell() - - def close(self): - self._closed = True - - -class _Request(object): - __slots__ = ('url', 'http_method', 'body', 'headers', 'loggable_body') - URL = 'http://example.com/api' - - def __init__(self, url=URL, http_method='GET', body='', headers=None): - self.url = url - self.http_method = http_method - self.body = self.loggable_body = body - if headers is None: - headers = {} - self.headers = headers - - -class _MakeRequest(object): - - def __init__(self, *responses): - self._responses = list(responses) - self._requested = [] - - def __call__(self, http, request, **kw): - self._requested.append((request, http, kw)) - return self._responses.pop(0) - - -def _makeResponse(status_code, info=None, content='', - request_url=_Request.URL): - if info is None: - info = {} - return _Dummy(status_code=status_code, - info=info, - content=content, - length=len(content), - request_url=request_url) - - -class _MediaStreamer(object): - - _called_with = None - - def __init__(self, response): - self._response = response - - def __call__(self, request, end): - assert self._called_with is None - self._called_with = (request, end) - return self._response - - -def _tempdir_maker(): - import contextlib - import shutil - import tempfile - - @contextlib.contextmanager - def _tempdir_mgr(): - temp_dir = tempfile.mkdtemp() - yield temp_dir - shutil.rmtree(temp_dir) - - return _tempdir_mgr - -_tempdir = _tempdir_maker() -del _tempdir_maker diff --git a/gcloud/streaming/test_util.py b/gcloud/streaming/test_util.py deleted file mode 100644 index ea80146ad4cc..000000000000 --- a/gcloud/streaming/test_util.py +++ /dev/null @@ -1,48 +0,0 @@ -import unittest2 - - -class Test_calculate_wait_for_retry(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.util import calculate_wait_for_retry - return calculate_wait_for_retry(*args, **kw) - - def test_w_negative_jitter_lt_max_wait(self): - import random - from gcloud._testing import _Monkey - with _Monkey(random, uniform=lambda lower, upper: lower): - self.assertEqual(self._callFUT(1, 60), 1.5) - - def test_w_positive_jitter_gt_max_wait(self): - import random - from gcloud._testing import _Monkey - with _Monkey(random, uniform=lambda lower, upper: upper): - self.assertEqual(self._callFUT(4, 10), 10) - - -class Test_acceptable_mime_type(unittest2.TestCase): - - def _callFUT(self, *args, **kw): - from gcloud.streaming.util import acceptable_mime_type - return acceptable_mime_type(*args, **kw) - - def test_pattern_wo_slash(self): - with self.assertRaises(ValueError) as err: - self._callFUT(['text/*'], 'BOGUS') - self.assertEqual( - err.exception.args, - ('Invalid MIME type: "BOGUS"',)) - - def test_accept_pattern_w_semicolon(self): - with self.assertRaises(ValueError) as err: - self._callFUT(['text/*;charset=utf-8'], 'text/plain') - self.assertEqual( - err.exception.args, - ('MIME patterns with parameter unsupported: ' - '"text/*;charset=utf-8"',)) - - def test_miss(self): - self.assertFalse(self._callFUT(['image/*'], 'text/plain')) - - def test_hit(self): - self.assertTrue(self._callFUT(['text/*'], 'text/plain')) diff --git a/gcloud/streaming/transfer.py b/gcloud/streaming/transfer.py deleted file mode 100644 index 48b8bad53488..000000000000 --- a/gcloud/streaming/transfer.py +++ /dev/null @@ -1,1150 +0,0 @@ -"""Upload and download support for apitools.""" - -import email.generator as email_generator -import email.mime.multipart as mime_multipart -import email.mime.nonmultipart as mime_nonmultipart -import mimetypes -import os - -import six -from six.moves import http_client - -from gcloud.streaming.buffered_stream import BufferedStream -from gcloud.streaming.exceptions import CommunicationError -from gcloud.streaming.exceptions import HttpError -from gcloud.streaming.exceptions import TransferInvalidError -from gcloud.streaming.exceptions import TransferRetryError -from gcloud.streaming.http_wrapper import get_http -from gcloud.streaming.http_wrapper import make_api_request -from gcloud.streaming.http_wrapper import Request -from gcloud.streaming.http_wrapper import RESUME_INCOMPLETE -from gcloud.streaming.stream_slice import StreamSlice -from gcloud.streaming.util import acceptable_mime_type - - -RESUMABLE_UPLOAD_THRESHOLD = 5 << 20 -SIMPLE_UPLOAD = 'simple' -RESUMABLE_UPLOAD = 'resumable' - - -_DEFAULT_CHUNKSIZE = 1 << 20 - - -class _Transfer(object): - """Generic bits common to Uploads and Downloads. - - :type stream: file-like object - :param stream: stream to/from which data is downloaded/uploaded. - - :type close_stream: boolean - :param close_stream: should this instance close the stream when deleted - - :type chunksize: integer - :param chunksize: the size of chunks used to download/upload a file. - - :type auto_transfer: boolean - :param auto_transfer: should this instance automatically begin transfering - data when initialized - - :type http: :class:`httplib2.Http` (or workalike) - :param http: Http instance used to perform requests. - - :type num_retries: integer - :param num_retries: how many retries should the transfer attempt - """ - - _num_retries = None - - def __init__(self, stream, close_stream=False, - chunksize=_DEFAULT_CHUNKSIZE, auto_transfer=True, - http=None, num_retries=5): - self._bytes_http = None - self._close_stream = close_stream - self._http = http - self._stream = stream - self._url = None - - # Let the @property do validation. - self.num_retries = num_retries - - self.auto_transfer = auto_transfer - self.chunksize = chunksize - - def __repr__(self): - return str(self) - - @property - def close_stream(self): - """Should this instance close the stream when deleted - - :rtype: boolean - """ - return self._close_stream - - @property - def http(self): - """Http instance used to perform requests. - - :rtype: :class:`httplib2.Http` (or workalike) - """ - return self._http - - @property - def bytes_http(self): - """Http instance used to perform binary requests. - - Defaults to :attr:`http`. - - :rtype: :class:`httplib2.Http` (or workalike) - """ - return self._bytes_http or self.http - - @bytes_http.setter - def bytes_http(self, value): - """Update Http instance used to perform binary requests. - - :type value: :class:`httplib2.Http` (or workalike) - :param value: new instance - """ - self._bytes_http = value - - @property - def num_retries(self): - """How many retries should the transfer attempt - - :rtype: integer - """ - return self._num_retries - - @num_retries.setter - def num_retries(self, value): - """Update how many retries should the transfer attempt - - :type value: integer - """ - if not isinstance(value, six.integer_types): - raise ValueError("num_retries: pass an integer") - - if value < 0: - raise ValueError( - 'Cannot have negative value for num_retries') - self._num_retries = value - - @property - def stream(self): - """Stream to/from which data is downloaded/uploaded. - - :rtype: file-like object - """ - return self._stream - - @property - def url(self): - """URL to / from which data is downloaded/uploaded. - - :rtype: string - """ - return self._url - - def _initialize(self, http, url): - """Initialize this download by setting :attr:`http` and :attr`url`. - - Allow the user to be able to pre-initialize :attr:`http` by setting - the value in the constructor; in that case, we ignore the provided - http. - - :type http: :class:`httplib2.Http` (or a worklike) or None. - :param http: the Http instance to use to make requests. - - :type url: string - :param url: The url for this transfer. - """ - self._ensure_uninitialized() - if self.http is None: - self._http = http or get_http() - self._url = url - - @property - def initialized(self): - """Has the instance been initialized - - :rtype: boolean - """ - return self.url is not None and self.http is not None - - def _ensure_initialized(self): - """Helper: assert that the instance is initialized. - - :raises: :exc:`gcloud.streaming.exceptions.TransferInvalidError` - if the instance is not initialized. - """ - if not self.initialized: - raise TransferInvalidError( - 'Cannot use uninitialized %s', type(self).__name__) - - def _ensure_uninitialized(self): - """Helper: assert that the instance is not initialized. - - :raises: :exc:`gcloud.streaming.exceptions.TransferInvalidError` - if the instance is already initialized. - """ - if self.initialized: - raise TransferInvalidError( - 'Cannot re-initialize %s', type(self).__name__) - - def __del__(self): - if self._close_stream: - self._stream.close() - - -class Download(_Transfer): - """Represent a single download. - - :type stream: file-like object - :param stream: stream to/from which data is downloaded/uploaded. - - :type kwds: dict - :param kwds: keyword arguments: all except ``total_size`` are passed - through to :meth:`_Transfer.__init__()`. - """ - _ACCEPTABLE_STATUSES = set(( - http_client.OK, - http_client.NO_CONTENT, - http_client.PARTIAL_CONTENT, - http_client.REQUESTED_RANGE_NOT_SATISFIABLE, - )) - - def __init__(self, stream, **kwds): - total_size = kwds.pop('total_size', None) - super(Download, self).__init__(stream, **kwds) - self._initial_response = None - self._progress = 0 - self._total_size = total_size - self._encoding = None - - @classmethod - def from_file(cls, filename, overwrite=False, auto_transfer=True, **kwds): - """Create a new download object from a filename. - - :type filename: string - :param filename: path/filename for the target file - - :type overwrite: boolean - :param overwrite: should an existing file be overwritten - - :type auto_transfer: boolean - :param auto_transfer: should the transfer be started immediately - - :type kwds: dict - :param kwds: keyword arguments: passed - through to :meth:`_Transfer.__init__()`. - """ - path = os.path.expanduser(filename) - if os.path.exists(path) and not overwrite: - raise ValueError( - 'File %s exists and overwrite not specified' % path) - return cls(open(path, 'wb'), close_stream=True, - auto_transfer=auto_transfer, **kwds) - - @classmethod - def from_stream(cls, stream, auto_transfer=True, total_size=None, **kwds): - """Create a new Download object from a stream. - - :type stream: writable file-like object - :param stream: the target file - - :type total_size: integer or None - :param total_size: total size of the file to be downloaded - - :type auto_transfer: boolean - :param auto_transfer: should the transfer be started immediately - - :type kwds: dict - :param kwds: keyword arguments: passed - through to :meth:`_Transfer.__init__()`. - """ - return cls(stream, auto_transfer=auto_transfer, total_size=total_size, - **kwds) - - @property - def progress(self): - """Number of bytes have been downloaded. - - :rtype: integer >= 0 - """ - return self._progress - - @property - def total_size(self): - """Total number of bytes to be downloaded. - - :rtype: integer or None - """ - return self._total_size - - @property - def encoding(self): - """'Content-Encoding' used to transfer the file - - :rtype: string or None - """ - return self._encoding - - def __repr__(self): - if not self.initialized: - return 'Download (uninitialized)' - else: - return 'Download with %d/%s bytes transferred from url %s' % ( - self.progress, self.total_size, self.url) - - def configure_request(self, http_request, url_builder): - """Update http_request/url_builder with download-appropriate values. - - :type http_request: :class:`gcloud.streaming.http_wrapper.Request` - :param http_request: the request to be updated - - :type url_builder: instance with settable 'query_params' attribute. - :param url_builder: transfer policy object to be updated - """ - url_builder.query_params['alt'] = 'media' - http_request.headers['Range'] = 'bytes=0-%d' % (self.chunksize - 1,) - - def _set_total(self, info): - """Update 'total_size' based on data from a response. - - :type info: mapping - :param info: response headers - """ - if 'content-range' in info: - _, _, total = info['content-range'].rpartition('/') - if total != '*': - self._total_size = int(total) - # Note "total_size is None" means we don't know it; if no size - # info was returned on our initial range request, that means we - # have a 0-byte file. (That last statement has been verified - # empirically, but is not clearly documented anywhere.) - if self.total_size is None: - self._total_size = 0 - - def initialize_download(self, http_request, http): - """Initialize this download. - - If the instance has :attr:`auto_transfer` enabled, begins the - download immediately. - - :type http_request: :class:`gcloud.streaming.http_wrapper.Request` - :param http_request: the request to use to initialize this download. - - :type http: :class:`httplib2.Http` (or workalike) - :param http: Http instance for this request. - """ - self._ensure_uninitialized() - url = http_request.url - if self.auto_transfer: - end_byte = self._compute_end_byte(0) - self._set_range_header(http_request, 0, end_byte) - response = make_api_request( - self.bytes_http or http, http_request) - if response.status_code not in self._ACCEPTABLE_STATUSES: - raise HttpError.from_response(response) - self._initial_response = response - self._set_total(response.info) - url = response.info.get('content-location', response.request_url) - self._initialize(http, url) - # Unless the user has requested otherwise, we want to just - # go ahead and pump the bytes now. - if self.auto_transfer: - self.stream_file(use_chunks=True) - - def _normalize_start_end(self, start, end=None): - """Validate / fix up byte range. - - :type start: integer - :param start: start byte of the range: if negative, used as an - offset from the end. - - :type end: integer - :param end: end byte of the range. - - :rtype: tuple, (start, end) - :returns: the normalized start, end pair. - :raises: :exc:`gcloud.streaming.exceptions.TransferInvalidError` - for invalid combinations of start, end. - """ - if end is not None: - if start < 0: - raise TransferInvalidError( - 'Cannot have end index with negative start index') - elif start >= self.total_size: - raise TransferInvalidError( - 'Cannot have start index greater than total size') - end = min(end, self.total_size - 1) - if end < start: - raise TransferInvalidError( - 'Range requested with end[%s] < start[%s]' % (end, start)) - return start, end - else: - if start < 0: - start = max(0, start + self.total_size) - return start, self.total_size - 1 - - @staticmethod - def _set_range_header(request, start, end=None): - """Update the 'Range' header in a request to match a byte range. - - :type request: :class:`gcloud.streaming.http_wrapper.Request` - :param request: the request to update - - :type start: integer - :param start: start byte of the range: if negative, used as an - offset from the end. - - :type end: integer - :param end: end byte of the range. - """ - if start < 0: - request.headers['range'] = 'bytes=%d' % start - elif end is None: - request.headers['range'] = 'bytes=%d-' % start - else: - request.headers['range'] = 'bytes=%d-%d' % (start, end) - - def _compute_end_byte(self, start, end=None, use_chunks=True): - """Compute the last byte to fetch for this request. - - Based on the HTTP spec for Range and Content-Range. - - .. note:: - This is potentially confusing in several ways: - - the value for the last byte is 0-based, eg "fetch 10 bytes - from the beginning" would return 9 here. - - if we have no information about size, and don't want to - use the chunksize, we'll return None. - - :type start: integer - :param start: start byte of the range. - - :type end: integer or None - :param end: suggested last byte of the range. - - :type use_chunks: boolean - :param use_chunks: If False, ignore :attr:`chunksize`. - - :returns: Last byte to use in a 'Range' header, or None. - """ - end_byte = end - - if start < 0 and not self.total_size: - return end_byte - - if use_chunks: - alternate = start + self.chunksize - 1 - if end_byte is not None: - end_byte = min(end_byte, alternate) - else: - end_byte = alternate - - if self.total_size: - alternate = self.total_size - 1 - if end_byte is not None: - end_byte = min(end_byte, alternate) - else: - end_byte = alternate - - return end_byte - - def _get_chunk(self, start, end): - """Retrieve a chunk of the file. - - :type start: integer - :param start: start byte of the range. - - :type end: integer or None - :param end: end byte of the range. - - :rtype: :class:`gcloud.streaming.http_wrapper.Response` - :returns: response from the chunk request. - """ - self._ensure_initialized() - request = Request(url=self.url) - self._set_range_header(request, start, end=end) - return make_api_request( - self.bytes_http, request, retries=self.num_retries) - - def _process_response(self, response): - """Update attribtes and writing stream, based on response. - - :type response: :class:`gcloud.streaming.http_wrapper.Response` - :param response: response from a download request. - - :rtype: :class:`gcloud.streaming.http_wrapper.Response` - :returns: the response - :raises: :exc:`gcloud.streaming.exceptions.HttpError` for - missing / unauthorized responses; - :exc:`gcloud.streaming.exceptions.TransferRetryError` - for other error responses. - """ - if response.status_code not in self._ACCEPTABLE_STATUSES: - # We distinguish errors that mean we made a mistake in setting - # up the transfer versus something we should attempt again. - if response.status_code in (http_client.FORBIDDEN, - http_client.NOT_FOUND): - raise HttpError.from_response(response) - else: - raise TransferRetryError(response.content) - if response.status_code in (http_client.OK, - http_client.PARTIAL_CONTENT): - self.stream.write(response.content) - self._progress += response.length - if response.info and 'content-encoding' in response.info: - self._encoding = response.info['content-encoding'] - elif response.status_code == http_client.NO_CONTENT: - # It's important to write something to the stream for the case - # of a 0-byte download to a file, as otherwise python won't - # create the file. - self.stream.write('') - return response - - def get_range(self, start, end=None, use_chunks=True): - """Retrieve a given byte range from this download, inclusive. - - Writes retrieved bytes into :attr:`stream`. - - Range must be of one of these three forms: - * 0 <= start, end = None: Fetch from start to the end of the file. - * 0 <= start <= end: Fetch the bytes from start to end. - * start < 0, end = None: Fetch the last -start bytes of the file. - - (These variations correspond to those described in the HTTP 1.1 - protocol for range headers in RFC 2616, sec. 14.35.1.) - - :type start: integer - :param start: Where to start fetching bytes. (See above.) - - :type end: integer or ``None`` - :param end: Where to stop fetching bytes. (See above.) - - :type use_chunks: boolean - :param use_chunks: If False, ignore :attr:`chunksize` - and fetch this range in a single request. - If True, streams via chunks. - - :raises: :exc:`gcloud.streaming.exceptions.TransferRetryError` - if a request returns an empty response. - """ - self._ensure_initialized() - progress_end_normalized = False - if self.total_size is not None: - progress, end_byte = self._normalize_start_end(start, end) - progress_end_normalized = True - else: - progress = start - end_byte = end - while (not progress_end_normalized or end_byte is None or - progress <= end_byte): - end_byte = self._compute_end_byte(progress, end=end_byte, - use_chunks=use_chunks) - response = self._get_chunk(progress, end_byte) - if not progress_end_normalized: - self._set_total(response.info) - progress, end_byte = self._normalize_start_end(start, end) - progress_end_normalized = True - response = self._process_response(response) - progress += response.length - if response.length == 0: - raise TransferRetryError( - 'Zero bytes unexpectedly returned in download response') - - def stream_file(self, use_chunks=True): - """Stream the entire download. - - Writes retrieved bytes into :attr:`stream`. - - :type use_chunks: boolean - :param use_chunks: If False, ignore :attr:`chunksize` - and stream this download in a single request. - If True, streams via chunks. - """ - self._ensure_initialized() - while True: - if self._initial_response is not None: - response = self._initial_response - self._initial_response = None - else: - end_byte = self._compute_end_byte(self.progress, - use_chunks=use_chunks) - response = self._get_chunk(self.progress, end_byte) - if self.total_size is None: - self._set_total(response.info) - response = self._process_response(response) - if (response.status_code == http_client.OK or - self.progress >= self.total_size): - break - - -class Upload(_Transfer): - """Represent a single Upload. - - :type stream: file-like object - :param stream: stream to/from which data is downloaded/uploaded. - - :type mime_type: string: - :param mime_type: MIME type of the upload. - - :type total_size: integer or None - :param total_size: Total upload size for the stream. - - :type http: :class:`httplib2.Http` (or workalike) - :param http: Http instance used to perform requests. - - :type close_stream: boolean - :param close_stream: should this instance close the stream when deleted - - :type auto_transfer: boolean - :param auto_transfer: should this instance automatically begin transfering - data when initialized - - :type kwds: dict - :param kwds: keyword arguments: all except ``total_size`` are passed - through to :meth:`_Transfer.__init__()`. - """ - _REQUIRED_SERIALIZATION_KEYS = set(( - 'auto_transfer', 'mime_type', 'total_size', 'url')) - - def __init__(self, stream, mime_type, total_size=None, http=None, - close_stream=False, auto_transfer=True, - **kwds): - super(Upload, self).__init__( - stream, close_stream=close_stream, auto_transfer=auto_transfer, - http=http, **kwds) - self._final_response = None - self._server_chunk_granularity = None - self._complete = False - self._mime_type = mime_type - self._progress = 0 - self._strategy = None - self._total_size = total_size - - @classmethod - def from_file(cls, filename, mime_type=None, auto_transfer=True, **kwds): - """Create a new Upload object from a filename. - - :type filename: string - :param filename: path/filename to the file being uploaded - - :type mime_type: string - :param mime_type: MIMEtype of the file being uploaded - - :type auto_transfer: boolean or None - :param auto_transfer: should the transfer be started immediately - - :type kwds: dict - :param kwds: keyword arguments: passed - through to :meth:`_Transfer.__init__()`. - """ - path = os.path.expanduser(filename) - if not mime_type: - mime_type, _ = mimetypes.guess_type(path) - if mime_type is None: - raise ValueError( - 'Could not guess mime type for %s' % path) - size = os.stat(path).st_size - return cls(open(path, 'rb'), mime_type, total_size=size, - close_stream=True, auto_transfer=auto_transfer, **kwds) - - @classmethod - def from_stream(cls, stream, mime_type, - total_size=None, auto_transfer=True, **kwds): - """Create a new Upload object from a stream. - - :type stream: writable file-like object - :param stream: the target file - - :type mime_type: string - :param mime_type: MIMEtype of the file being uploaded - - :type total_size: integer or None - :param total_size: Size of the file being uploaded - - :type auto_transfer: boolean or None - :param auto_transfer: should the transfer be started immediately - - :type kwds: dict - :param kwds: keyword arguments: passed - through to :meth:`_Transfer.__init__()`. - """ - if mime_type is None: - raise ValueError( - 'No mime_type specified for stream') - return cls(stream, mime_type, total_size=total_size, - close_stream=False, auto_transfer=auto_transfer, **kwds) - - @property - def complete(self): - """Has the entire stream been uploaded. - - :rtype: boolean - """ - return self._complete - - @property - def mime_type(self): - """MIMEtype of the file being uploaded. - - :rtype: string - """ - return self._mime_type - - @property - def progress(self): - """Bytes uploaded so far - - :rtype: integer - """ - return self._progress - - @property - def strategy(self): - """Upload strategy to use - - :rtype: string or None - """ - return self._strategy - - @strategy.setter - def strategy(self, value): - """Update upload strategy to use - - :type value: string (one of :data:`SIMPLE_UPLOAD` or - :data:`RESUMABLE_UPLOAD`) - - :raises: :exc:`ValueError` if value is not one of the two allowed - strings. - """ - if value not in (SIMPLE_UPLOAD, RESUMABLE_UPLOAD): - raise ValueError(( - 'Invalid value "%s" for upload strategy, must be one of ' - '"simple" or "resumable".') % value) - self._strategy = value - - @property - def total_size(self): - """Total size of the stream to be uploaded. - - :rtype: integer or None - """ - return self._total_size - - @total_size.setter - def total_size(self, value): - """Update total size of the stream to be uploaded. - - :type value: integer or None - :param value: the size - """ - self._ensure_uninitialized() - self._total_size = value - - def __repr__(self): - if not self.initialized: - return 'Upload (uninitialized)' - else: - return 'Upload with %d/%s bytes transferred for url %s' % ( - self.progress, self.total_size or '???', self.url) - - def _set_default_strategy(self, upload_config, http_request): - """Determine and set the default upload strategy for this upload. - - We generally prefer simple or multipart, unless we're forced to - use resumable. This happens when any of (1) the upload is too - large, (2) the simple endpoint doesn't support multipart requests - and we have metadata, or (3) there is no simple upload endpoint. - - :type upload_config: instance w/ ``max_size`` and ``accept`` - attributes - :param upload_config: Configuration for the upload endpoint. - - :type http_request: :class:`gcloud.streaming.http_wrapper.Request` - :param http_request: The associated http request. - """ - if upload_config.resumable_path is None: - self.strategy = SIMPLE_UPLOAD - if self.strategy is not None: - return - strategy = SIMPLE_UPLOAD - if (self.total_size is not None and - self.total_size > RESUMABLE_UPLOAD_THRESHOLD): - strategy = RESUMABLE_UPLOAD - if http_request.body and not upload_config.simple_multipart: - strategy = RESUMABLE_UPLOAD - if not upload_config.simple_path: - strategy = RESUMABLE_UPLOAD - self.strategy = strategy - - def configure_request(self, upload_config, http_request, url_builder): - """Configure the request and url for this upload. - - :type upload_config: instance w/ ``max_size`` and ``accept`` - attributes - :param upload_config: transfer policy object to be queried - - :type http_request: :class:`gcloud.streaming.http_wrapper.Request` - :param http_request: the request to be updated - - :type url_builder: instance with settable 'relative_path' and - 'query_params' attributes. - :param url_builder: transfer policy object to be updated - - :raises: :exc:`ValueError` if the requested upload is too big, - or does not have an acceptable MIME type. - """ - # Validate total_size vs. max_size - if (self.total_size and upload_config.max_size and - self.total_size > upload_config.max_size): - raise ValueError( - 'Upload too big: %s larger than max size %s' % ( - self.total_size, upload_config.max_size)) - # Validate mime type - if not acceptable_mime_type(upload_config.accept, self.mime_type): - raise ValueError( - 'MIME type %s does not match any accepted MIME ranges %s' % ( - self.mime_type, upload_config.accept)) - - self._set_default_strategy(upload_config, http_request) - if self.strategy == SIMPLE_UPLOAD: - url_builder.relative_path = upload_config.simple_path - if http_request.body: - url_builder.query_params['uploadType'] = 'multipart' - self._configure_multipart_request(http_request) - else: - url_builder.query_params['uploadType'] = 'media' - self._configure_media_request(http_request) - else: - url_builder.relative_path = upload_config.resumable_path - url_builder.query_params['uploadType'] = 'resumable' - self._configure_resumable_request(http_request) - - def _configure_media_request(self, http_request): - """Helper for 'configure_request': set up simple request.""" - http_request.headers['content-type'] = self.mime_type - http_request.body = self.stream.read() - http_request.loggable_body = '' - - def _configure_multipart_request(self, http_request): - """Helper for 'configure_request': set up multipart request.""" - # This is a multipart/related upload. - msg_root = mime_multipart.MIMEMultipart('related') - # msg_root should not write out its own headers - setattr(msg_root, '_write_headers', lambda self: None) - - # attach the body as one part - msg = mime_nonmultipart.MIMENonMultipart( - *http_request.headers['content-type'].split('/')) - msg.set_payload(http_request.body) - msg_root.attach(msg) - - # attach the media as the second part - msg = mime_nonmultipart.MIMENonMultipart(*self.mime_type.split('/')) - msg['Content-Transfer-Encoding'] = 'binary' - msg.set_payload(self.stream.read()) - msg_root.attach(msg) - - # NOTE: We encode the body, but can't use - # `email.message.Message.as_string` because it prepends - # `> ` to `From ` lines. - # NOTE: We must use six.StringIO() instead of io.StringIO() since the - # `email` library uses cStringIO in Py2 and io.StringIO in Py3. - stream = six.StringIO() - generator = email_generator.Generator(stream, mangle_from_=False) - generator.flatten(msg_root, unixfrom=False) - http_request.body = stream.getvalue() - - multipart_boundary = msg_root.get_boundary() - http_request.headers['content-type'] = ( - 'multipart/related; boundary="%s"' % multipart_boundary) - - body_components = http_request.body.split(multipart_boundary) - headers, _, _ = body_components[-2].partition('\n\n') - body_components[-2] = '\n\n'.join([headers, '\n\n--']) - http_request.loggable_body = multipart_boundary.join(body_components) - - def _configure_resumable_request(self, http_request): - """Helper for 'configure_request': set up resumable request.""" - http_request.headers['X-Upload-Content-Type'] = self.mime_type - if self.total_size is not None: - http_request.headers[ - 'X-Upload-Content-Length'] = str(self.total_size) - - def refresh_upload_state(self): - """Refresh the state of a resumable upload via query to the back-end. - """ - if self.strategy != RESUMABLE_UPLOAD: - return - self._ensure_initialized() - # NOTE: Per RFC 2616[1]/7231[2], a 'PUT' request is inappropriate - # here: it is intended to be used to replace the entire - # resource, not to query for a status. - # - # If the back-end doesn't provide a way to query for this state - # via a 'GET' request, somebody should be spanked. - # - # The violation is documented[3]. - # - # [1] http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.6 - # [2] http://tools.ietf.org/html/rfc7231#section-4.3.4 - # [3] - # https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#resume-upload - refresh_request = Request( - url=self.url, http_method='PUT', - headers={'Content-Range': 'bytes */*'}) - refresh_response = make_api_request( - self.http, refresh_request, redirections=0, - retries=self.num_retries) - range_header = self._get_range_header(refresh_response) - if refresh_response.status_code in (http_client.OK, - http_client.CREATED): - self._complete = True - self._progress = self.total_size - self.stream.seek(self.progress) - # If we're finished, the refresh response will contain the metadata - # originally requested. Cache it so it can be returned in - # StreamInChunks. - self._final_response = refresh_response - elif refresh_response.status_code == RESUME_INCOMPLETE: - if range_header is None: - self._progress = 0 - else: - self._progress = self._last_byte(range_header) + 1 - self.stream.seek(self.progress) - else: - raise HttpError.from_response(refresh_response) - - @staticmethod - def _get_range_header(response): - """Return a 'Range' header from a response. - - :type response: :class:`gcloud.streaming.http_wrapper.Response` - :param response: response to be queried - - :rtype: string - """ - # NOTE: Per RFC 2616[1]/7233[2][3], 'Range' is a request header, - # not a response header. If the back-end is actually setting - # 'Range' on responses, somebody should be spanked: it should - # be sending 'Content-Range' (including the # '/' - # trailer). - # - # The violation is documented[4]. - # - # [1] http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - # [2] http://tools.ietf.org/html/rfc7233#section-3.1 - # [3] http://tools.ietf.org/html/rfc7233#section-4.2 - # [4] - # https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#chunking - return response.info.get('Range', response.info.get('range')) - - def initialize_upload(self, http_request, http): - """Initialize this upload from the given http_request. - - :type http_request: :class:`gcloud.streaming.http_wrapper.Request` - :param http_request: the request to be used - - :type http: :class:`httplib2.Http` (or workalike) - :param http: Http instance for this request. - - :raises: :exc:`ValueError` if the instance has not been configured - with a strategy. - """ - if self.strategy is None: - raise ValueError( - 'No upload strategy set; did you call configure_request?') - if self.strategy != RESUMABLE_UPLOAD: - return - self._ensure_uninitialized() - http_response = make_api_request(http, http_request, - retries=self.num_retries) - if http_response.status_code != http_client.OK: - raise HttpError.from_response(http_response) - - granularity = http_response.info.get('X-Goog-Upload-Chunk-Granularity') - if granularity is not None: - granularity = int(granularity) - self._server_chunk_granularity = granularity - url = http_response.info['location'] - self._initialize(http, url) - - # Unless the user has requested otherwise, we want to just - # go ahead and pump the bytes now. - if self.auto_transfer: - return self.stream_file(use_chunks=True) - else: - return http_response - - @staticmethod - def _last_byte(range_header): - """Parse the last byte from a 'Range' header. - - :type range_header: string - :param range_header: 'Range' header value per RFC 2616/7233 - """ - _, _, end = range_header.partition('-') - return int(end) - - def _validate_chunksize(self, chunksize=None): - """Validate chunksize against server-specified granularity. - - Helper for :meth:`stream_file`. - - :type chunksize: integer or None - :param chunksize: the chunk size to be tested. - - :raises: :exc:`ValueError` if ``chunksize`` is not a multiple - of the server-specified granulariy. - """ - if self._server_chunk_granularity is None: - return - chunksize = chunksize or self.chunksize - if chunksize % self._server_chunk_granularity: - raise ValueError( - 'Server requires chunksize to be a multiple of %d', - self._server_chunk_granularity) - - def stream_file(self, use_chunks=True): - """Upload the stream. - - :type use_chunks: boolean - :param use_chunks: If False, send the stream in a single request. - Otherwise, send it in chunks. - """ - if self.strategy != RESUMABLE_UPLOAD: - raise ValueError( - 'Cannot stream non-resumable upload') - # final_response is set if we resumed an already-completed upload. - response = self._final_response - send_func = self._send_chunk if use_chunks else self._send_media_body - if use_chunks: - self._validate_chunksize(self.chunksize) - self._ensure_initialized() - while not self.complete: - response = send_func(self.stream.tell()) - if response.status_code in (http_client.OK, http_client.CREATED): - self._complete = True - break - self._progress = self._last_byte(response.info['range']) - if self.progress + 1 != self.stream.tell(): - raise CommunicationError( - 'Failed to transfer all bytes in chunk, upload paused at ' - 'byte %d' % self.progress) - if self.complete and hasattr(self.stream, 'seek'): - current_pos = self.stream.tell() - self.stream.seek(0, os.SEEK_END) - end_pos = self.stream.tell() - self.stream.seek(current_pos) - if current_pos != end_pos: - raise TransferInvalidError( - 'Upload complete with %s additional bytes left in stream' % - (int(end_pos) - int(current_pos))) - return response - - def _send_media_request(self, request, end): - """Peform API upload request. - - Helper for _send_media_body & _send_chunk: - - :type request: :class:`gcloud.streaming.http_wrapper.Request` - :param request: the request to upload - - :type end: integer - :param end: end byte of the to be uploaded - - :rtype: :class:`gcloud.streaming.http_wrapper.Response` - :returns: the response - :raises: :exc:`gcloud.streaming.exceptions.HttpError` if the status - code from the response indicates an error. - """ - response = make_api_request( - self.bytes_http, request, retries=self.num_retries) - if response.status_code not in (http_client.OK, http_client.CREATED, - RESUME_INCOMPLETE): - # We want to reset our state to wherever the server left us - # before this failed request, and then raise. - self.refresh_upload_state() - raise HttpError.from_response(response) - if response.status_code == RESUME_INCOMPLETE: - last_byte = self._last_byte( - self._get_range_header(response)) - if last_byte + 1 != end: - self.stream.seek(last_byte) - return response - - def _send_media_body(self, start): - """ Send the entire stream in a single request. - - Helper for :meth:`stream_file`: - - :type start: integer - :param start: start byte of the range. - """ - self._ensure_initialized() - if self.total_size is None: - raise TransferInvalidError( - 'Total size must be known for SendMediaBody') - body_stream = StreamSlice(self.stream, self.total_size - start) - - request = Request(url=self.url, http_method='PUT', body=body_stream) - request.headers['Content-Type'] = self.mime_type - if start == self.total_size: - # End of an upload with 0 bytes left to send; just finalize. - range_string = 'bytes */%s' % self.total_size - else: - range_string = 'bytes %s-%s/%s' % (start, self.total_size - 1, - self.total_size) - - request.headers['Content-Range'] = range_string - - return self._send_media_request(request, self.total_size) - - def _send_chunk(self, start): - """Send a chunk of the stream. - - Helper for :meth:`stream_file`: - - :type start: integer - :param start: start byte of the range. - """ - self._ensure_initialized() - no_log_body = self.total_size is None - if self.total_size is None: - # For the streaming resumable case, we need to detect when - # we're at the end of the stream. - body_stream = BufferedStream( - self.stream, start, self.chunksize) - end = body_stream.stream_end_position - if body_stream.stream_exhausted: - self._total_size = end - # Here, change body_stream from a stream to a string object, - # which means reading a chunk into memory. This works around - # https://code.google.com/p/httplib2/issues/detail?id=176 which can - # cause httplib2 to skip bytes on 401's for file objects. - body_stream = body_stream.read(self.chunksize) - else: - end = min(start + self.chunksize, self.total_size) - body_stream = StreamSlice(self.stream, end - start) - request = Request(url=self.url, http_method='PUT', body=body_stream) - request.headers['Content-Type'] = self.mime_type - if no_log_body: - # Disable logging of streaming body. - request.loggable_body = '' - if self.total_size is None: - # Streaming resumable upload case, unknown total size. - range_string = 'bytes %s-%s/*' % (start, end - 1) - elif end == start: - # End of an upload with 0 bytes left to send; just finalize. - range_string = 'bytes */%s' % self.total_size - else: - # Normal resumable upload case with known sizes. - range_string = 'bytes %s-%s/%s' % (start, end - 1, self.total_size) - - request.headers['Content-Range'] = range_string - - return self._send_media_request(request, end) diff --git a/gcloud/streaming/util.py b/gcloud/streaming/util.py deleted file mode 100644 index 18f8c9d5a349..000000000000 --- a/gcloud/streaming/util.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Assorted utilities shared between parts of apitools.""" - -import random - - -def calculate_wait_for_retry(retry_attempt, max_wait=60): - """Calculate the amount of time to wait before a retry attempt. - - Wait time grows exponentially with the number of attempts. A - random amount of jitter is added to spread out retry attempts from - different clients. - - :type retry_attempt: integer - :param retry_attempt: Retry attempt counter. - - :type max_wait: integer - :param max_wait: Upper bound for wait time [seconds]. - - :rtype: integer - :returns: Number of seconds to wait before retrying request. - """ - - wait_time = 2 ** retry_attempt - max_jitter = wait_time / 4.0 - wait_time += random.uniform(-max_jitter, max_jitter) - return max(1, min(wait_time, max_wait)) - - -def acceptable_mime_type(accept_patterns, mime_type): - """Check that ``mime_type`` matches one of ``accept_patterns``. - - Note that this function assumes that all patterns in accept_patterns - will be simple types of the form "type/subtype", where one or both - of these can be "*". We do not support parameters (i.e. "; q=") in - patterns. - - :type accept_patterns: list of string - :param accept_patterns: acceptable MIME types. - - :type mime_type: string - :param mime_type: the MIME being checked - - :rtype: boolean - :returns: True if the supplied MIME type matches at least one of the - patterns, else False. - """ - if '/' not in mime_type: - raise ValueError( - 'Invalid MIME type: "%s"' % mime_type) - unsupported_patterns = [p for p in accept_patterns if ';' in p] - if unsupported_patterns: - raise ValueError( - 'MIME patterns with parameter unsupported: "%s"' % ', '.join( - unsupported_patterns)) - - def _match(pattern, mime_type): - """Return True iff mime_type is acceptable for pattern.""" - return all(accept in ('*', provided) for accept, provided - in zip(pattern.split('/'), mime_type.split('/'))) - - return any(_match(pattern, mime_type) for pattern in accept_patterns) diff --git a/gcloud/test__helpers.py b/gcloud/test__helpers.py index fe4a8d2f4b19..8477e73ad259 100644 --- a/gcloud/test__helpers.py +++ b/gcloud/test__helpers.py @@ -123,68 +123,6 @@ def test_invalid_iterable(self): self._callFUT('ARGNAME', invalid_tuple_or_list) -class Test__app_engine_id(unittest2.TestCase): - - def _callFUT(self): - from gcloud._helpers import _app_engine_id - return _app_engine_id() - - def test_no_value(self): - from gcloud._testing import _Monkey - from gcloud import _helpers - - with _Monkey(_helpers, app_identity=None): - dataset_id = self._callFUT() - self.assertEqual(dataset_id, None) - - def test_value_set(self): - from gcloud._testing import _Monkey - from gcloud import _helpers - - APP_ENGINE_ID = object() - APP_IDENTITY = _AppIdentity(APP_ENGINE_ID) - with _Monkey(_helpers, app_identity=APP_IDENTITY): - dataset_id = self._callFUT() - self.assertEqual(dataset_id, APP_ENGINE_ID) - - -class Test__compute_engine_id(unittest2.TestCase): - - def _callFUT(self): - from gcloud._helpers import _compute_engine_id - return _compute_engine_id() - - def _monkeyConnection(self, connection): - from gcloud._testing import _Monkey - from gcloud import _helpers - - def _factory(host, timeout): - connection.host = host - connection.timeout = timeout - return connection - - return _Monkey(_helpers, HTTPConnection=_factory) - - def test_bad_status(self): - connection = _HTTPConnection(404, None) - with self._monkeyConnection(connection): - dataset_id = self._callFUT() - self.assertEqual(dataset_id, None) - - def test_success(self): - COMPUTE_ENGINE_ID = object() - connection = _HTTPConnection(200, COMPUTE_ENGINE_ID) - with self._monkeyConnection(connection): - dataset_id = self._callFUT() - self.assertEqual(dataset_id, COMPUTE_ENGINE_ID) - - def test_socket_raises(self): - connection = _TimeoutHTTPConnection() - with self._monkeyConnection(connection): - dataset_id = self._callFUT() - self.assertEqual(dataset_id, None) - - class Test__get_production_project(unittest2.TestCase): def _callFUT(self): @@ -229,18 +167,8 @@ def prod_mock(): _callers.append('prod_mock') return prod - def gae_mock(): - _callers.append('gae_mock') - return gae - - def gce_mock(): - _callers.append('gce_mock') - return gce - patched_methods = { '_get_production_project': prod_mock, - '_app_engine_id': gae_mock, - '_compute_engine_id': gce_mock, } with _Monkey(_helpers, **patched_methods): @@ -251,7 +179,7 @@ def gce_mock(): def test_no_value(self): project, callers = self._determine_default_helper() self.assertEqual(project, None) - self.assertEqual(callers, ['prod_mock', 'gae_mock', 'gce_mock']) + self.assertEqual(callers, ['prod_mock']) def test_explicit(self): PROJECT = object() @@ -265,18 +193,6 @@ def test_prod(self): self.assertEqual(project, PROJECT) self.assertEqual(callers, ['prod_mock']) - def test_gae(self): - PROJECT = object() - project, callers = self._determine_default_helper(gae=PROJECT) - self.assertEqual(project, PROJECT) - self.assertEqual(callers, ['prod_mock', 'gae_mock']) - - def test_gce(self): - PROJECT = object() - project, callers = self._determine_default_helper(gce=PROJECT) - self.assertEqual(project, PROJECT) - self.assertEqual(callers, ['prod_mock', 'gae_mock', 'gce_mock']) - class Test__millis(unittest2.TestCase): @@ -572,48 +488,6 @@ def test_with_nonstring_type(self): self.assertRaises(TypeError, self._callFUT, value) -class Test__pb_timestamp_to_datetime(unittest2.TestCase): - - def _callFUT(self, timestamp): - from gcloud._helpers import _pb_timestamp_to_datetime - return _pb_timestamp_to_datetime(timestamp) - - def test_it(self): - import datetime - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud._helpers import UTC - - # Epoch is midnight on January 1, 1970 ... - dt_stamp = datetime.datetime(1970, month=1, day=1, hour=0, - minute=1, second=1, microsecond=1234, - tzinfo=UTC) - # ... so 1 minute and 1 second after is 61 seconds and 1234 - # microseconds is 1234000 nanoseconds. - timestamp = Timestamp(seconds=61, nanos=1234000) - self.assertEqual(self._callFUT(timestamp), dt_stamp) - - -class Test__datetime_to_pb_timestamp(unittest2.TestCase): - - def _callFUT(self, when): - from gcloud._helpers import _datetime_to_pb_timestamp - return _datetime_to_pb_timestamp(when) - - def test_it(self): - import datetime - from google.protobuf.timestamp_pb2 import Timestamp - from gcloud._helpers import UTC - - # Epoch is midnight on January 1, 1970 ... - dt_stamp = datetime.datetime(1970, month=1, day=1, hour=0, - minute=1, second=1, microsecond=1234, - tzinfo=UTC) - # ... so 1 minute and 1 second after is 61 seconds and 1234 - # microseconds is 1234000 nanoseconds. - timestamp = Timestamp(seconds=61, nanos=1234000) - self.assertEqual(self._callFUT(dt_stamp), timestamp) - - class Test__name_from_project_path(unittest2.TestCase): PROJECT = 'PROJECT' diff --git a/setup.py b/setup.py index 3c7022aebbb5..e1b7e5942a5f 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,6 @@ 'httplib2 >= 0.9.1', 'googleapis-common-protos', 'oauth2client >= 2.0.1', - 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1', 'pyOpenSSL', 'six', ]