From ce791cfb3a8574b062a9112694319119471e7647 Mon Sep 17 00:00:00 2001 From: larkee Date: Mon, 16 Aug 2021 12:14:13 +1000 Subject: [PATCH 01/26] chore: regen (via synth) : fix conflicts --- .../spanner_admin_database_v1/types/backup.py | 1 + .../types/spanner_database_admin.py | 9 +- google/cloud/spanner_v1/types/transaction.py | 152 +++++++++++------- google/cloud/spanner_v1/types/type.py | 1 + 4 files changed, 97 insertions(+), 66 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py index dd42c409b9..5cb9dc9f76 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup.py +++ b/google/cloud/spanner_admin_database_v1/types/backup.py @@ -190,6 +190,7 @@ class State(proto.Enum): number=12, message=timestamp_pb2.Timestamp, ) + database_dialect = proto.Field(proto.ENUM, number=10, enum=common.DatabaseDialect,) class CreateBackupRequest(proto.Message): diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index 52521db98d..a24d382c9c 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -143,8 +143,7 @@ class Database(proto.Message): DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - Output only. The dialect of the Cloud Spanner - Database. + The dialect of the Cloud Spanner Database. """ class State(proto.Enum): @@ -201,6 +200,8 @@ class State(proto.Enum): number=10, enum=common.DatabaseDialect, ) + default_leader = proto.Field(proto.STRING, number=9,) + database_dialect = proto.Field(proto.ENUM, number=10, enum=common.DatabaseDialect,) class ListDatabasesRequest(proto.Message): @@ -294,8 +295,7 @@ class CreateDatabaseRequest(proto.Message): Cloud Spanner will encrypt/decrypt all data at rest using Google default encryption. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - Optional. The dialect of the Cloud Spanner - Database. + The dialect of the Cloud Spanner Database. """ parent = proto.Field( @@ -320,6 +320,7 @@ class CreateDatabaseRequest(proto.Message): number=5, enum=common.DatabaseDialect, ) + database_dialect = proto.Field(proto.ENUM, number=5, enum=common.DatabaseDialect,) class CreateDatabaseMetadata(proto.Message): diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 7c0a766c58..0addc255e2 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -30,7 +30,8 @@ class TransactionOptions(proto.Message): - r"""Transactions: + r"""Transactions + ============ Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and @@ -39,7 +40,10 @@ class TransactionOptions(proto.Message): the next transaction. It is not necessary to create a new session for each transaction. - Transaction Modes: Cloud Spanner supports three transaction modes: + Transaction Modes + ================= + + Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to write data into Cloud Spanner. These transactions rely on @@ -70,9 +74,12 @@ class TransactionOptions(proto.Message): may, however, read/write data in different tables within that database. - Locking Read-Write Transactions: Locking transactions may be used to - atomically read-modify-write data anywhere in a database. This type - of transaction is externally consistent. + Locking Read-Write Transactions + ------------------------------- + + Locking transactions may be used to atomically read-modify-write + data anywhere in a database. This type of transaction is externally + consistent. Clients should attempt to minimize the amount of time a transaction is active. Faster transactions commit with higher probability and @@ -91,25 +98,30 @@ class TransactionOptions(proto.Message): [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the transaction. - Semantics: Cloud Spanner can commit the transaction if all read - locks it acquired are still valid at commit time, and it is able to - acquire write locks for all writes. Cloud Spanner can abort the - transaction for any reason. If a commit attempt returns ``ABORTED``, - Cloud Spanner guarantees that the transaction has not modified any - user data in Cloud Spanner. + Semantics + --------- + + Cloud Spanner can commit the transaction if all read locks it + acquired are still valid at commit time, and it is able to acquire + write locks for all writes. Cloud Spanner can abort the transaction + for any reason. If a commit attempt returns ``ABORTED``, Cloud + Spanner guarantees that the transaction has not modified any user + data in Cloud Spanner. Unless the transaction commits, Cloud Spanner makes no guarantees about how long the transaction's locks were held for. It is an error to use Cloud Spanner locks for any sort of mutual exclusion other than between Cloud Spanner transactions themselves. - Retrying Aborted Transactions: When a transaction aborts, the - application can choose to retry the whole transaction again. To - maximize the chances of successfully committing the retry, the - client should execute the retry in the same session as the original - attempt. The original session's lock priority increases with each - consecutive abort, meaning that each attempt has a slightly better - chance of success than the previous. + Retrying Aborted Transactions + ----------------------------- + + When a transaction aborts, the application can choose to retry the + whole transaction again. To maximize the chances of successfully + committing the retry, the client should execute the retry in the + same session as the original attempt. The original session's lock + priority increases with each consecutive abort, meaning that each + attempt has a slightly better chance of success than the previous. Under some circumstances (for example, many transactions attempting to modify the same row(s)), a transaction can abort many times in a @@ -118,21 +130,25 @@ class TransactionOptions(proto.Message): instead, it is better to limit the total amount of time spent retrying. - Idle Transactions: A transaction is considered idle if it has no - outstanding reads or SQL queries and has not started a read or SQL - query within the last 10 seconds. Idle transactions can be aborted - by Cloud Spanner so that they don't hold on to locks indefinitely. - If an idle transaction is aborted, the commit will fail with error - ``ABORTED``. + Idle Transactions + ----------------- + + A transaction is considered idle if it has no outstanding reads or + SQL queries and has not started a read or SQL query within the last + 10 seconds. Idle transactions can be aborted by Cloud Spanner so + that they don't hold on to locks indefinitely. In that case, the + commit will fail with error ``ABORTED``. If this behavior is undesirable, periodically executing a simple SQL query in the transaction (for example, ``SELECT 1``) prevents the transaction from becoming idle. - Snapshot Read-Only Transactions: Snapshot read-only transactions - provides a simpler method than locking read-write transactions for - doing several consistent reads. However, this type of transaction - does not support writes. + Snapshot Read-Only Transactions + ------------------------------- + + Snapshot read-only transactions provides a simpler method than + locking read-write transactions for doing several consistent reads. + However, this type of transaction does not support writes. Snapshot transactions do not take locks. Instead, they work by choosing a Cloud Spanner timestamp, then executing all reads at that @@ -166,11 +182,14 @@ class TransactionOptions(proto.Message): Each type of timestamp bound is discussed in detail below. - Strong: Strong reads are guaranteed to see the effects of all - transactions that have committed before the start of the read. - Furthermore, all rows yielded by a single read are consistent with - each other -- if any part of the read observes a transaction, all - parts of the read see the transaction. + Strong + ------ + + Strong reads are guaranteed to see the effects of all transactions + that have committed before the start of the read. Furthermore, all + rows yielded by a single read are consistent with each other -- if + any part of the read observes a transaction, all parts of the read + see the transaction. Strong reads are not repeatable: two consecutive strong read-only transactions might return inconsistent results if there are @@ -181,14 +200,17 @@ class TransactionOptions(proto.Message): See [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. - Exact Staleness: These timestamp bounds execute reads at a - user-specified timestamp. Reads at a timestamp are guaranteed to see - a consistent prefix of the global transaction history: they observe - modifications done by all transactions with a commit timestamp less - than or equal to the read timestamp, and observe none of the - modifications done by transactions with a larger commit timestamp. - They will block until all conflicting transactions that may be - assigned commit timestamps <= the read timestamp have finished. + Exact Staleness + --------------- + + These timestamp bounds execute reads at a user-specified timestamp. + Reads at a timestamp are guaranteed to see a consistent prefix of + the global transaction history: they observe modifications done by + all transactions with a commit timestamp <= the read timestamp, and + observe none of the modifications done by transactions with a larger + commit timestamp. They will block until all conflicting transactions + that may be assigned commit timestamps <= the read timestamp have + finished. The timestamp can either be expressed as an absolute Cloud Spanner commit timestamp or a staleness relative to the current time. @@ -203,11 +225,14 @@ class TransactionOptions(proto.Message): and [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. - Bounded Staleness: Bounded staleness modes allow Cloud Spanner to - pick the read timestamp, subject to a user-provided staleness bound. - Cloud Spanner chooses the newest timestamp within the staleness - bound that allows execution of the reads at the closest available - replica without blocking. + Bounded Staleness + ----------------- + + Bounded staleness modes allow Cloud Spanner to pick the read + timestamp, subject to a user-provided staleness bound. Cloud Spanner + chooses the newest timestamp within the staleness bound that allows + execution of the reads at the closest available replica without + blocking. All rows yielded are consistent with each other -- if any part of the read observes a transaction, all parts of the read see the @@ -233,23 +258,27 @@ class TransactionOptions(proto.Message): and [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. - Old Read Timestamps and Garbage Collection: Cloud Spanner - continuously garbage collects deleted and overwritten data in the - background to reclaim storage space. This process is known as - "version GC". By default, version GC reclaims versions after they - are one hour old. Because of this, Cloud Spanner cannot perform - reads at read timestamps more than one hour in the past. This - restriction also applies to in-progress reads and/or SQL queries - whose timestamp become too old while executing. Reads and SQL - queries with too-old read timestamps fail with the error + Old Read Timestamps and Garbage Collection + ------------------------------------------ + + Cloud Spanner continuously garbage collects deleted and overwritten + data in the background to reclaim storage space. This process is + known as "version GC". By default, version GC reclaims versions + after they are one hour old. Because of this, Cloud Spanner cannot + perform reads at read timestamps more than one hour in the past. + This restriction also applies to in-progress reads and/or SQL + queries whose timestamp become too old while executing. Reads and + SQL queries with too-old read timestamps fail with the error ``FAILED_PRECONDITION``. - Partitioned DML Transactions: Partitioned DML transactions are used - to execute DML statements with a different execution strategy that - provides different, and often better, scalability properties for - large, table-wide operations than DML in a ReadWrite transaction. - Smaller scoped statements, such as an OLTP workload, should prefer - using ReadWrite transactions. + Partitioned DML Transactions + ---------------------------- + + Partitioned DML transactions are used to execute DML statements with + a different execution strategy that provides different, and often + better, scalability properties for large, table-wide operations than + DML in a ReadWrite transaction. Smaller scoped statements, such as + an OLTP workload, should prefer using ReadWrite transactions. Partitioned DML partitions the keyspace and runs the DML statement on each partition in separate, internal transactions. These @@ -486,7 +515,6 @@ class ReadOnly(proto.Message): class Transaction(proto.Message): r"""A transaction. - Attributes: id (bytes): ``id`` may be used to identify the transaction in subsequent diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index 12b06fc737..02444fa577 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -59,6 +59,7 @@ class TypeAnnotationCode(proto.Enum): the way value is serialized. """ TYPE_ANNOTATION_CODE_UNSPECIFIED = 0 + INT32 = 1 PG_NUMERIC = 2 From e580775b23347f4895817e78a1e36f8d39752fc4 Mon Sep 17 00:00:00 2001 From: larkee Date: Mon, 16 Aug 2021 12:32:07 +1000 Subject: [PATCH 02/26] feat: add NUMERIC support: conflicts resolved --- google/cloud/spanner_v1/__init__.py | 2 ++ google/cloud/spanner_v1/param_types.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/google/cloud/spanner_v1/__init__.py b/google/cloud/spanner_v1/__init__.py index 4aa08d2c29..503dba70c4 100644 --- a/google/cloud/spanner_v1/__init__.py +++ b/google/cloud/spanner_v1/__init__.py @@ -57,6 +57,7 @@ from .types.transaction import TransactionSelector from .types.type import StructType from .types.type import Type +from .types.type import TypeAnnotationCode from .types.type import TypeCode from .data_types import JsonObject @@ -132,6 +133,7 @@ "TransactionOptions", "TransactionSelector", "Type", + "TypeAnnotationCode", "TypeCode", # Custom spanner related data types "JsonObject", diff --git a/google/cloud/spanner_v1/param_types.py b/google/cloud/spanner_v1/param_types.py index 9f7c9586a3..22c4782b8d 100644 --- a/google/cloud/spanner_v1/param_types.py +++ b/google/cloud/spanner_v1/param_types.py @@ -15,6 +15,7 @@ """Types exported from this package.""" from google.cloud.spanner_v1 import Type +from google.cloud.spanner_v1 import TypeAnnotationCode from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1 import StructType @@ -29,6 +30,7 @@ TIMESTAMP = Type(code=TypeCode.TIMESTAMP) NUMERIC = Type(code=TypeCode.NUMERIC) JSON = Type(code=TypeCode.JSON) +PG_NUMERIC = Type(code=TypeCode.NUMERIC, type_annotation=TypeAnnotationCode.PG_NUMERIC) def Array(element_type): From f7f0c3a3e278679ba7f5dc7832d30387c9a791aa Mon Sep 17 00:00:00 2001 From: larkee Date: Mon, 16 Aug 2021 12:32:27 +1000 Subject: [PATCH 03/26] feat: add dialect support: fix conflicts --- google/cloud/spanner_v1/backup.py | 10 +++++++- google/cloud/spanner_v1/database.py | 40 ++++++++++++++++++++++++++--- 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py index a7b7a972b6..f83a4a2d9c 100644 --- a/google/cloud/spanner_v1/backup.py +++ b/google/cloud/spanner_v1/backup.py @@ -94,6 +94,7 @@ def __init__( self._encryption_info = None self._max_expire_time = None self._referencing_backups = None + self._database_dialect = None if type(encryption_config) == dict: if source_backup: self._encryption_config = CopyBackupEncryptionConfig( @@ -193,7 +194,7 @@ def referencing_databases(self): @property def encryption_info(self): """Encryption info for this backup. - :rtype: :class:`~google.clod.spanner_admin_database_v1.types.EncryptionInfo` + :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionInfo` :returns: a class representing the encryption info """ return self._encryption_info @@ -216,6 +217,13 @@ def referencing_backups(self): """ return self._referencing_backups + def database_dialect(self): + """Encryption info for this backup. + :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.DatabaseDialect` + :returns: a class representing the dialect of this backup's database + """ + return self._encryption_info + @classmethod def from_pb(cls, backup_pb, instance): """Create an instance of this class from a protobuf message. diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 90916bc710..13a86a6186 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -52,6 +52,17 @@ from google.cloud.spanner_v1.services.spanner.transports.grpc import ( SpannerGrpcTransport, ) +from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest +from google.cloud.spanner_admin_database_v1 import DatabaseDialect +from google.cloud.spanner_admin_database_v1 import EncryptionConfig +from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig +from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest +from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +from google.cloud.spanner_v1 import ( + ExecuteSqlRequest, + TransactionSelector, + TransactionOptions, +) from google.cloud.spanner_v1.table import Table @@ -68,7 +79,7 @@ _LIST_TABLES_QUERY = """SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES -WHERE SPANNER_STATE = 'COMMITTED' +{} """ DEFAULT_RETRY_BACKOFF = Retry(initial=0.02, maximum=32, multiplier=1.3) @@ -126,6 +137,7 @@ def __init__( pool=None, logger=None, encryption_config=None, + database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED ): self.database_id = database_id self._instance = instance @@ -141,6 +153,7 @@ def __init__( self.log_commit_stats = False self._logger = logger self._encryption_config = encryption_config + self._database_dialect = database_dialect if pool is None: pool = BurstyPool() @@ -294,6 +307,18 @@ def ddl_statements(self): """ return self._ddl_statements + @property + def database_dialect(self): + """DDL Statements used to define database schema. + + See + cloud.google.com/spanner/docs/data-definition-language + + :rtype: :class:`google.cloud.spanner_admin_database_v1.types.DatabaseDialect + :returns: the dialect of the database + """ + return self._database_dialect + @property def logger(self): """Logger used by the database. @@ -364,7 +389,10 @@ def create(self): metadata = _metadata_with_prefix(self.name) db_name = self.database_id if "-" in db_name: - db_name = "`%s`" % (db_name,) + if self._database_dialect == DatabaseDialect.POSTGRESQL: + db_name = f"\"{db_name}\"" + else: + db_name = f"`{db_name}`" if type(self._encryption_config) == dict: self._encryption_config = EncryptionConfig(**self._encryption_config) @@ -373,6 +401,7 @@ def create(self): create_statement="CREATE DATABASE %s" % (db_name,), extra_statements=list(self._ddl_statements), encryption_config=self._encryption_config, + database_dialect=self._database_dialect, ) future = api.create_database(request=request, metadata=metadata) return future @@ -418,6 +447,7 @@ def reload(self): self._encryption_config = response.encryption_config self._encryption_info = response.encryption_info self._default_leader = response.default_leader + self._database_dialect = response.database_dialect def update_ddl(self, ddl_statements, operation_id=""): """Update DDL for this database. @@ -778,7 +808,11 @@ def list_tables(self): resources within the current database. """ with self.snapshot() as snapshot: - results = snapshot.execute_sql(_LIST_TABLES_QUERY) + if self._database_dialect == DatabaseDialect.POSTGRESQL: + where_clause = "WHERE TABLE_SCHEMA = 'PUBLIC'" + else: + where_clause = "WHERE SPANNER_STATE = 'COMMITTED'" + results = snapshot.execute_sql(_LIST_TABLES_QUERY.format(where_clause)) for row in results: yield self.table(row[0]) From 95dd609cf3a2a75cbffd06fdbbe7e5d61c277b7f Mon Sep 17 00:00:00 2001 From: larkee Date: Wed, 18 Aug 2021 12:21:00 +1000 Subject: [PATCH 04/26] fix: update table queries to support PG dialect --- google/cloud/spanner_v1/database.py | 2 +- google/cloud/spanner_v1/table.py | 20 ++++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 13a86a6186..7806d719c7 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -809,7 +809,7 @@ def list_tables(self): """ with self.snapshot() as snapshot: if self._database_dialect == DatabaseDialect.POSTGRESQL: - where_clause = "WHERE TABLE_SCHEMA = 'PUBLIC'" + where_clause = "WHERE TABLE_SCHEMA = 'public'" else: where_clause = "WHERE SPANNER_STATE = 'COMMITTED'" results = snapshot.execute_sql(_LIST_TABLES_QUERY.format(where_clause)) diff --git a/google/cloud/spanner_v1/table.py b/google/cloud/spanner_v1/table.py index 4a31446509..0f25c41756 100644 --- a/google/cloud/spanner_v1/table.py +++ b/google/cloud/spanner_v1/table.py @@ -16,6 +16,7 @@ from google.cloud.exceptions import NotFound +from google.cloud.spanner_admin_database_v1 import DatabaseDialect from google.cloud.spanner_v1.types import ( Type, TypeCode, @@ -26,7 +27,7 @@ SELECT EXISTS( SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES - WHERE TABLE_NAME = @table_id + {} ) """ _GET_SCHEMA_TEMPLATE = "SELECT * FROM {} LIMIT 0" @@ -76,11 +77,18 @@ def _exists(self, snapshot): :rtype: bool :returns: True if the table exists, else false. """ - results = snapshot.execute_sql( - _EXISTS_TEMPLATE, - params={"table_id": self.table_id}, - param_types={"table_id": Type(code=TypeCode.STRING)}, - ) + if self._database.database_dialect == DatabaseDialect.POSTGRESQL: + results = snapshot.execute_sql( + _EXISTS_TEMPLATE.format("WHERE TABLE_NAME = $1"), + params={"p1": self.table_id}, + param_types={"p1": Type(code=TypeCode.STRING)}, + ) + else: + results = snapshot.execute_sql( + _EXISTS_TEMPLATE.format("WHERE TABLE_NAME = @table_id"), + params={"table_id": self.table_id}, + param_types={"table_id": Type(code=TypeCode.STRING)}, + ) return next(iter(results))[0] @property From 8e4349b15e870262345f58a687dc13baf1e96b44 Mon Sep 17 00:00:00 2001 From: larkee Date: Fri, 3 Sep 2021 15:16:45 +1000 Subject: [PATCH 05/26] feat: add database dialect support for database factory --- google/cloud/spanner_v1/instance.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index f8869d1f7b..78ae8714a5 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -25,6 +25,7 @@ from google.cloud.spanner_admin_instance_v1 import Instance as InstancePB from google.cloud.spanner_admin_database_v1.types import backup from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.cloud.spanner_admin_database_v1 import DatabaseDialect from google.cloud.spanner_admin_database_v1 import ListBackupsRequest from google.cloud.spanner_admin_database_v1 import ListBackupOperationsRequest from google.cloud.spanner_admin_database_v1 import ListDatabasesRequest @@ -428,6 +429,7 @@ def database( pool=None, logger=None, encryption_config=None, + database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED ): """Factory to create a database within this instance. @@ -468,6 +470,7 @@ def database( pool=pool, logger=logger, encryption_config=encryption_config, + database_dialect=database_dialect ) def list_databases(self, page_size=None): From f4a256f32ccf6dd73d739286bbc51b53aba671b5 Mon Sep 17 00:00:00 2001 From: larkee Date: Fri, 10 Sep 2021 17:06:53 +1000 Subject: [PATCH 06/26] test: add dialect support to system tests: resolve conflict, correct POSTGRES_ALL_TYPES_COLUMNS --- tests/_fixtures.py | 47 ++++++ tests/system/_helpers.py | 6 +- tests/system/conftest.py | 21 ++- tests/system/test_backup_api.py | 8 +- tests/system/test_database_api.py | 14 +- tests/system/test_dbapi.py | 4 +- tests/system/test_session_api.py | 249 +++++++++++++++++++----------- tests/system/test_table_api.py | 10 +- 8 files changed, 251 insertions(+), 108 deletions(-) diff --git a/tests/_fixtures.py b/tests/_fixtures.py index e4cd929835..9f661d8ec2 100644 --- a/tests/_fixtures.py +++ b/tests/_fixtures.py @@ -120,7 +120,54 @@ PRIMARY KEY(id, commit_ts DESC); """ +PG_DDL = """\ +CREATE TABLE contacts ( + contact_id BIGINT, + first_name VARCHAR(1024), + last_name VARCHAR(1024), + email VARCHAR(1024), + PRIMARY KEY (contact_id) ); +CREATE TABLE all_types ( + pkey BIGINT NOT NULL, + int_value INT, + bool_value BOOL, + bytes_value BYTEA, + float_value DOUBLE PRECISION, + string_value VARCHAR(16), + timestamp_value TIMESTAMPTZ, + numeric_value NUMERIC, + PRIMARY KEY (pkey) ); +CREATE TABLE counters ( + name VARCHAR(1024), + value BIGINT, + PRIMARY KEY (name)); +CREATE TABLE string_plus_array_of_string ( + id BIGINT, + name VARCHAR(16), + PRIMARY KEY (id)); +CREATE INDEX name ON contacts(first_name, last_name); +""" +# CREATE TABLE contact_phones ( +# contact_id BIGINT, +# phone_type VARCHAR(1024), +# phone_number VARCHAR(1024), +# PRIMARY KEY (contact_id, phone_type), +# FOREIGN KEY (contact_id) REFERENCES +# contacts(contact_id) ON DELETE CASCADE ) +# INTERLEAVE IN PARENT contacts; +# CREATE TABLE users_history ( +# id BIGINT NOT NULL, +# commit_ts COMMIT_TIMESTAMP NOT NULL, +# name TEXT NOT NULL, +# email TEXT, +# deleted BOOL NOT NULL, +# PRIMARY KEY(id, commit_ts DESC) ); +# """ + DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()] EMULATOR_DDL_STATEMENTS = [ stmt.strip() for stmt in EMULATOR_DDL.split(";") if stmt.strip() ] +PG_DDL_STATEMENTS = [ + stmt.strip() for stmt in PG_DDL.split(";") if stmt.strip() +] diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 80eb9361cd..5fbc69c6d2 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -46,13 +46,17 @@ USE_EMULATOR_ENVVAR = "SPANNER_EMULATOR_HOST" USE_EMULATOR = os.getenv(USE_EMULATOR_ENVVAR) is not None +DATABASE_DIALECT_ENVVAR = "SPANNER_DATABASE_DIALECT" +DATABASE_DIALECT = os.getenv(DATABASE_DIALECT_ENVVAR) + EMULATOR_PROJECT_ENVVAR = "GCLOUD_PROJECT" EMULATOR_PROJECT_DEFAULT = "emulator-test-project" EMULATOR_PROJECT = os.getenv(EMULATOR_PROJECT_ENVVAR, EMULATOR_PROJECT_DEFAULT) DDL_STATEMENTS = ( - _fixtures.EMULATOR_DDL_STATEMENTS if USE_EMULATOR else _fixtures.DDL_STATEMENTS + _fixtures.PG_DDL_STATEMENTS if DATABASE_DIALECT == "POSTGRESQL" else + (_fixtures.EMULATOR_DDL_STATEMENTS if USE_EMULATOR else _fixtures.DDL_STATEMENTS) ) retry_true = retry.RetryResult(operator.truth) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index 0568b3bf3f..e748bd85ca 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -18,6 +18,7 @@ import pytest from google.cloud import spanner_v1 +from google.cloud.spanner_admin_database_v1 import DatabaseDialect from . import _helpers from google.cloud.spanner_admin_database_v1.types.backup import ( CreateBackupEncryptionConfig, @@ -48,6 +49,18 @@ def not_emulator(): pytest.skip(f"{_helpers.USE_EMULATOR_ENVVAR} set in environment.") +@pytest.fixture(scope="session") +def not_postgres(database_dialect): + if database_dialect == DatabaseDialect.POSTGRESQL: + pytest.skip(f"{_helpers.DATABASE_DIALECT_ENVVAR} set to POSTGRES in environment.") + + +@pytest.fixture(scope="session") +def database_dialect(): + return DatabaseDialect[_helpers.DATABASE_DIALECT] if _helpers.DATABASE_DIALECT else \ + DatabaseDialect.GOOGLE_STANDARD_SQL + + @pytest.fixture(scope="session") def spanner_client(): if _helpers.USE_EMULATOR: @@ -59,7 +72,9 @@ def spanner_client(): credentials=credentials, ) else: - return spanner_v1.Client() # use google.auth.default credentials + return spanner_v1.Client( + client_options={"api_endpoint": "staging-wrenchworks.sandbox.googleapis.com"} + ) # use google.auth.default credentials @pytest.fixture(scope="session") @@ -148,11 +163,11 @@ def shared_instance( @pytest.fixture(scope="session") -def shared_database(shared_instance, database_operation_timeout): +def shared_database(shared_instance, database_operation_timeout, database_dialect): database_name = _helpers.unique_id("test_database") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( - database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool + database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool, database_dialect=database_dialect ) operation = database.create() operation.result(database_operation_timeout) # raises on failure / timeout. diff --git a/tests/system/test_backup_api.py b/tests/system/test_backup_api.py index c09c06a5f2..dc57c2261a 100644 --- a/tests/system/test_backup_api.py +++ b/tests/system/test_backup_api.py @@ -50,7 +50,7 @@ def same_config_instance(spanner_client, shared_instance, instance_operation_tim @pytest.fixture(scope="session") -def diff_config(shared_instance, instance_configs): +def diff_config(shared_instance, instance_configs, not_postgres): current_config = shared_instance.configuration_name for config in reversed(instance_configs): if "-us-" in config.name and config.name != current_config: @@ -93,11 +93,11 @@ def database_version_time(shared_database): @pytest.fixture(scope="session") -def second_database(shared_instance, database_operation_timeout): +def second_database(shared_instance, database_operation_timeout, database_dialect): database_name = _helpers.unique_id("test_database2") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( - database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool + database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool, database_dialect=database_dialect ) operation = database.create() operation.result(database_operation_timeout) # raises on failure / timeout. @@ -120,6 +120,7 @@ def backups_to_delete(): def test_backup_workflow( shared_instance, shared_database, + database_dialect, database_version_time, backups_to_delete, databases_to_delete, @@ -197,6 +198,7 @@ def test_backup_workflow( database.reload() expected_encryption_config = EncryptionConfig() assert expected_encryption_config == database.encryption_config + assert database_dialect == database.database_dialect database.drop() backup.delete() diff --git a/tests/system/test_database_api.py b/tests/system/test_database_api.py index 09f6d0e038..aa024eb6a2 100644 --- a/tests/system/test_database_api.py +++ b/tests/system/test_database_api.py @@ -27,7 +27,7 @@ @pytest.fixture(scope="module") -def multiregion_instance(spanner_client, instance_operation_timeout): +def multiregion_instance(spanner_client, instance_operation_timeout, not_postgres): multi_region_instance_id = _helpers.unique_id("multi-region") multi_region_config = "nam3" config_name = "{}/instanceConfigs/{}".format( @@ -55,10 +55,10 @@ def test_list_databases(shared_instance, shared_database): assert shared_database.name in database_names -def test_create_database(shared_instance, databases_to_delete): +def test_create_database(shared_instance, databases_to_delete, database_dialect): pool = spanner_v1.BurstyPool(labels={"testcase": "create_database"}) temp_db_id = _helpers.unique_id("temp_db") - temp_db = shared_instance.database(temp_db_id, pool=pool) + temp_db = shared_instance.database(temp_db_id, pool=pool, database_dialect=database_dialect) operation = temp_db.create() databases_to_delete.append(temp_db) @@ -71,6 +71,7 @@ def test_create_database(shared_instance, databases_to_delete): def test_create_database_pitr_invalid_retention_period( not_emulator, # PITR-lite features are not supported by the emulator + not_postgres, shared_instance, ): pool = spanner_v1.BurstyPool(labels={"testcase": "create_database_pitr"}) @@ -89,6 +90,7 @@ def test_create_database_pitr_invalid_retention_period( def test_create_database_pitr_success( not_emulator, # PITR-lite features are not supported by the emulator + not_postgres, shared_instance, databases_to_delete, ): @@ -180,7 +182,7 @@ def test_table_not_found(shared_instance): temp_db.create() -def test_update_ddl_w_operation_id(shared_instance, databases_to_delete): +def test_update_ddl_w_operation_id(shared_instance, databases_to_delete, database_dialect): # We used to have: # @pytest.mark.skip( # reason="'Database.update_ddl' has a flaky timeout. See: " @@ -188,7 +190,7 @@ def test_update_ddl_w_operation_id(shared_instance, databases_to_delete): # ) pool = spanner_v1.BurstyPool(labels={"testcase": "update_database_ddl"}) temp_db_id = _helpers.unique_id("update_ddl", separator="_") - temp_db = shared_instance.database(temp_db_id, pool=pool) + temp_db = shared_instance.database(temp_db_id, pool=pool, database_dialect=database_dialect) create_op = temp_db.create() databases_to_delete.append(temp_db) create_op.result(DBAPI_OPERATION_TIMEOUT) # raises on failure / timeout. @@ -208,6 +210,7 @@ def test_update_ddl_w_operation_id(shared_instance, databases_to_delete): def test_update_ddl_w_pitr_invalid( not_emulator, + not_postgres, shared_instance, databases_to_delete, ): @@ -232,6 +235,7 @@ def test_update_ddl_w_pitr_invalid( def test_update_ddl_w_pitr_success( not_emulator, + not_postgres, shared_instance, databases_to_delete, ): diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py index 9557a46b37..c1cd025c07 100644 --- a/tests/system/test_dbapi.py +++ b/tests/system/test_dbapi.py @@ -21,6 +21,7 @@ from google.cloud import spanner_v1 from google.cloud._helpers import UTC from google.cloud.spanner_dbapi.connection import connect +from google.cloud.spanner_admin_database_v1 import DatabaseDialect from google.cloud.spanner_dbapi.connection import Connection from google.cloud.spanner_dbapi.exceptions import ProgrammingError from google.cloud.spanner_v1 import JsonObject @@ -39,9 +40,8 @@ PRIMARY KEY (contact_id)""", ) - @pytest.fixture(scope="session") -def raw_database(shared_instance, database_operation_timeout): +def raw_database(shared_instance, database_operation_timeout, not_postgres): databse_id = _helpers.unique_id("dbapi-txn") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 09c65970f3..8a09be5347 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -26,6 +26,7 @@ from google.api_core import datetime_helpers from google.api_core import exceptions from google.cloud import spanner_v1 +from google.cloud.spanner_admin_database_v1 import DatabaseDialect from google.cloud._helpers import UTC from google.cloud.spanner_v1.data_types import JsonObject from tests import _helpers as ot_helpers @@ -81,7 +82,11 @@ "json_value", "json_array", ) + EMULATOR_ALL_TYPES_COLUMNS = LIVE_ALL_TYPES_COLUMNS[:-4] +# ToDo: Clean up generation of POSTGRES_ALL_TYPES_COLUMNS +POSTGRES_ALL_TYPES_COLUMNS = LIVE_ALL_TYPES_COLUMNS[:1] + LIVE_ALL_TYPES_COLUMNS[1:7:2] + LIVE_ALL_TYPES_COLUMNS[9:17:2] + AllTypesRowData = collections.namedtuple("AllTypesRowData", LIVE_ALL_TYPES_COLUMNS) AllTypesRowData.__new__.__defaults__ = tuple([None for colum in LIVE_ALL_TYPES_COLUMNS]) EmulatorAllTypesRowData = collections.namedtuple( @@ -90,6 +95,12 @@ EmulatorAllTypesRowData.__new__.__defaults__ = tuple( [None for colum in EMULATOR_ALL_TYPES_COLUMNS] ) +PostGresAllTypesRowData = collections.namedtuple( + "PostGresAllTypesRowData", POSTGRES_ALL_TYPES_COLUMNS +) +PostGresAllTypesRowData.__new__.__defaults__ = tuple( + [None for colum in POSTGRES_ALL_TYPES_COLUMNS] +) LIVE_ALL_TYPES_ROWDATA = ( # all nulls @@ -156,16 +167,33 @@ EmulatorAllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]), ) +POSTGRES_ALL_TYPES_ROWDATA = ( + # all nulls + PostGresAllTypesRowData(pkey=0), + # Non-null values + PostGresAllTypesRowData(pkey=101, int_value=123), + PostGresAllTypesRowData(pkey=102, bool_value=False), + PostGresAllTypesRowData(pkey=103, bytes_value=BYTES_1), + PostGresAllTypesRowData(pkey=105, float_value=1.4142136), + PostGresAllTypesRowData(pkey=106, string_value="VALUE"), + PostGresAllTypesRowData(pkey=107, timestamp_value=SOME_TIME), + PostGresAllTypesRowData(pkey=108, timestamp_value=NANO_TIME), + PostGresAllTypesRowData(pkey=109, numeric_value=NUMERIC_1), +) + if _helpers.USE_EMULATOR: ALL_TYPES_COLUMNS = EMULATOR_ALL_TYPES_COLUMNS ALL_TYPES_ROWDATA = EMULATOR_ALL_TYPES_ROWDATA +elif _helpers.DATABASE_DIALECT: + ALL_TYPES_COLUMNS = POSTGRES_ALL_TYPES_COLUMNS + ALL_TYPES_ROWDATA = POSTGRES_ALL_TYPES_ROWDATA else: ALL_TYPES_COLUMNS = LIVE_ALL_TYPES_COLUMNS ALL_TYPES_ROWDATA = LIVE_ALL_TYPES_ROWDATA @pytest.fixture(scope="session") -def sessions_database(shared_instance, database_operation_timeout): +def sessions_database(shared_instance, database_operation_timeout, database_dialect): database_name = _helpers.unique_id("test_sessions", separator="_") pool = spanner_v1.BurstyPool(labels={"testcase": "session_api"}) sessions_database = shared_instance.database( @@ -356,7 +384,7 @@ def test_batch_insert_then_read(sessions_database, ot_exporter): ) -def test_batch_insert_then_read_string_array_of_string(sessions_database): +def test_batch_insert_then_read_string_array_of_string(sessions_database, not_postgres): table = "string_plus_array_of_string" columns = ["id", "name", "tags"] rowdata = [ @@ -402,7 +430,7 @@ def test_batch_insert_or_update_then_query(sessions_database): sd._check_rows_data(rows) -def test_batch_insert_w_commit_timestamp(sessions_database): +def test_batch_insert_w_commit_timestamp(sessions_database, not_postgres): table = "users_history" columns = ["id", "commit_ts", "name", "email", "deleted"] user_id = 1234 @@ -588,11 +616,12 @@ def _generate_insert_statements(): column_list = ", ".join(_sample_data.COLUMNS) for row in _sample_data.ROW_DATA: - row_data = '{}, "{}", "{}", "{}"'.format(*row) + row_data = "{}, '{}', '{}', '{}'".format(*row) yield f"INSERT INTO {table} ({column_list}) VALUES ({row_data})" @_helpers.retry_mabye_conflict +@_helpers.retry_mabye_aborted_txn def test_transaction_execute_sql_w_dml_read_rollback( sessions_database, sessions_to_delete, @@ -690,7 +719,7 @@ def test_transaction_execute_update_then_insert_commit( # [END spanner_test_dml_with_mutation] -def test_transaction_batch_update_success(sessions_database, sessions_to_delete): +def test_transaction_batch_update_success(sessions_database, sessions_to_delete, database_dialect): # [START spanner_test_dml_with_mutation] # [START spanner_test_dml_update] sd = _sample_data @@ -703,16 +732,19 @@ def test_transaction_batch_update_success(sessions_database, sessions_to_delete) with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) + keys = ['p1', 'p2'] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] + placeholders = ['$1', '$2'] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + insert_statement = list(_generate_insert_statements())[0] update_statement = ( - "UPDATE contacts SET email = @email WHERE contact_id = @contact_id;", - {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": param_types.INT64, "email": param_types.STRING}, + f"UPDATE contacts SET email = {placeholders[1]} WHERE contact_id = {placeholders[0]};", + {keys[0]: 1, keys[1]: "phreddy@example.com"}, + {keys[0]: param_types.INT64, keys[1]: param_types.STRING}, ) delete_statement = ( - "DELETE contacts WHERE contact_id = @contact_id;", - {"contact_id": 1}, - {"contact_id": param_types.INT64}, + f"DELETE FROM contacts WHERE contact_id = {placeholders[0]};", + {keys[0]: 1}, + {keys[0]: param_types.INT64}, ) def unit_of_work(transaction): @@ -739,6 +771,7 @@ def unit_of_work(transaction): def test_transaction_batch_update_and_execute_dml( sessions_database, sessions_to_delete, + database_dialect ): sd = _sample_data param_types = spanner_v1.param_types @@ -750,16 +783,19 @@ def test_transaction_batch_update_and_execute_dml( with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) + keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] + placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + insert_statements = list(_generate_insert_statements()) update_statements = [ ( - "UPDATE contacts SET email = @email WHERE contact_id = @contact_id;", - {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": param_types.INT64, "email": param_types.STRING}, + f"UPDATE contacts SET email = {placeholders[1]} WHERE contact_id = {placeholders[0]};", + {keys[0]: 1, keys[1]: "phreddy@example.com"}, + {keys[0]: param_types.INT64, keys[1]: param_types.STRING}, ) ] - delete_statement = "DELETE contacts WHERE TRUE;" + delete_statement = "DELETE FROM contacts WHERE TRUE;" def unit_of_work(transaction): rows = list(transaction.read(sd.TABLE, sd.COLUMNS, sd.ALL)) @@ -784,7 +820,7 @@ def unit_of_work(transaction): sd._check_rows_data(rows, []) -def test_transaction_batch_update_w_syntax_error(sessions_database, sessions_to_delete): +def test_transaction_batch_update_w_syntax_error(sessions_database, sessions_to_delete, database_dialect): from google.rpc import code_pb2 sd = _sample_data @@ -797,16 +833,19 @@ def test_transaction_batch_update_w_syntax_error(sessions_database, sessions_to_ with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) + keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] + placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + insert_statement = list(_generate_insert_statements())[0] update_statement = ( - "UPDTAE contacts SET email = @email WHERE contact_id = @contact_id;", - {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": param_types.INT64, "email": param_types.STRING}, + f"UPDTAE contacts SET email = {placeholders[1]} WHERE contact_id = {placeholders[0]};", + {keys[0]: 1, keys[1]: "phreddy@example.com"}, + {keys[0]: param_types.INT64, keys[1]: param_types.STRING}, ) delete_statement = ( - "DELETE contacts WHERE contact_id = @contact_id;", - {"contact_id": 1}, - {"contact_id": param_types.INT64}, + f"DELETE FROM contacts WHERE contact_id = {placeholders[0]};", + {keys[0]: 1}, + {keys[0]: param_types.INT64}, ) def unit_of_work(transaction): @@ -841,6 +880,7 @@ def test_transaction_batch_update_w_parent_span( sessions_database, sessions_to_delete, ot_exporter, + database_dialect ): from opentelemetry import trace @@ -855,16 +895,19 @@ def test_transaction_batch_update_w_parent_span( with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) + keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] + placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + insert_statement = list(_generate_insert_statements())[0] update_statement = ( - "UPDATE contacts SET email = @email WHERE contact_id = @contact_id;", - {"contact_id": 1, "email": "phreddy@example.com"}, - {"contact_id": param_types.INT64, "email": param_types.STRING}, + f"UPDATE contacts SET email = {placeholders[1]} WHERE contact_id = {placeholders[0]};", + {keys[0]: 1, keys[1]: "phreddy@example.com"}, + {keys[0]: param_types.INT64, keys[1]: param_types.STRING}, ) delete_statement = ( - "DELETE contacts WHERE contact_id = @contact_id;", - {"contact_id": 1}, - {"contact_id": param_types.INT64}, + f"DELETE FROM contacts WHERE contact_id = {placeholders[0]};", + {keys[0]: 1}, + {keys[0]: param_types.INT64}, ) def unit_of_work(transaction): @@ -896,7 +939,7 @@ def unit_of_work(transaction): assert span.parent.span_id == span_list[-1].context.span_id -def test_execute_partitioned_dml(sessions_database): +def test_execute_partitioned_dml(sessions_database, database_dialect): # [START spanner_test_dml_partioned_dml_update] sd = _sample_data param_types = spanner_v1.param_types @@ -915,17 +958,18 @@ def _setup_table(txn): sd._check_rows_data(before_pdml) + keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["email", "target"] + placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] nonesuch = "nonesuch@example.com" target = "phred@example.com" update_statement = ( - f"UPDATE {sd.TABLE} SET {sd.TABLE}.email = @email " - f"WHERE {sd.TABLE}.email = @target" + f"UPDATE contacts SET email = {placeholders[0]} WHERE email = {placeholders[1]}" ) row_count = sessions_database.execute_partitioned_dml( update_statement, - params={"email": nonesuch, "target": target}, - param_types={"email": param_types.STRING, "target": param_types.STRING}, + params={keys[0]: nonesuch, keys[1]: target}, + param_types={keys[0]: param_types.STRING, keys[1]: param_types.STRING}, request_options=spanner_v1.RequestOptions( priority=spanner_v1.RequestOptions.Priority.PRIORITY_MEDIUM ), @@ -949,7 +993,7 @@ def _setup_table(txn): # [END spanner_test_dml_partioned_dml_update] -def _transaction_concurrency_helper(sessions_database, unit_of_work, pkey): +def _transaction_concurrency_helper(sessions_database, unit_of_work, pkey, database_dialect=None): initial_value = 123 num_threads = 3 # conforms to equivalent Java systest. @@ -965,9 +1009,11 @@ def _transaction_concurrency_helper(sessions_database, unit_of_work, pkey): for _ in range(num_threads): txn_sessions.append(sessions_database) + args = (unit_of_work, pkey, database_dialect) if database_dialect else (unit_of_work, pkey) + threads = [ threading.Thread( - target=txn_session.run_in_transaction, args=(unit_of_work, pkey) + target=txn_session.run_in_transaction, args=args ) for txn_session in txn_sessions ] @@ -999,12 +1045,14 @@ def test_transaction_read_w_concurrent_updates(sessions_database): _transaction_concurrency_helper(sessions_database, _read_w_concurrent_update, pkey) -def _query_w_concurrent_update(transaction, pkey): +def _query_w_concurrent_update(transaction, pkey, database_dialect): param_types = spanner_v1.param_types - sql = f"SELECT * FROM {COUNTERS_TABLE} WHERE name = @name" + key = "p1" if database_dialect == DatabaseDialect.POSTGRESQL else "name" + placeholder = "$1" if database_dialect == DatabaseDialect.POSTGRESQL else f"@{key}" + sql = f"SELECT * FROM {COUNTERS_TABLE} WHERE name = {placeholder}" rows = list( transaction.execute_sql( - sql, params={"name": pkey}, param_types={"name": param_types.STRING} + sql, params={key: pkey}, param_types={key: param_types.STRING} ) ) assert len(rows) == 1 @@ -1012,9 +1060,9 @@ def _query_w_concurrent_update(transaction, pkey): transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]]) -def test_transaction_query_w_concurrent_updates(sessions_database): +def test_transaction_query_w_concurrent_updates(sessions_database, database_dialect): pkey = "query_w_concurrent_updates" - _transaction_concurrency_helper(sessions_database, _query_w_concurrent_update, pkey) + _transaction_concurrency_helper(sessions_database, _query_w_concurrent_update, pkey, database_dialect) def test_transaction_read_w_abort(not_emulator, sessions_database): @@ -1214,7 +1262,7 @@ def test_multiuse_snapshot_read_isolation_exact_staleness(sessions_database): sd._check_row_data(after, all_data_rows) -def test_read_w_index(shared_instance, database_operation_timeout, databases_to_delete): +def test_read_w_index(shared_instance, database_operation_timeout, databases_to_delete, database_dialect): # Indexed reads cannot return non-indexed columns sd = _sample_data row_count = 2000 @@ -1227,6 +1275,7 @@ def test_read_w_index(shared_instance, database_operation_timeout, databases_to_ _helpers.unique_id("test_read", separator="_"), ddl_statements=_helpers.DDL_STATEMENTS + extra_ddl, pool=pool, + database_dialect=database_dialect ) operation = temp_db.create() databases_to_delete.append(temp_db) @@ -1684,7 +1733,7 @@ def test_multiuse_snapshot_execute_sql_isolation_strong(sessions_database): sd._check_row_data(after, all_data_rows) -def test_execute_sql_returning_array_of_struct(sessions_database): +def test_execute_sql_returning_array_of_struct(sessions_database, not_postgres): sql = ( "SELECT ARRAY(SELECT AS STRUCT C1, C2 " "FROM (SELECT 'a' AS C1, 1 AS C2 " @@ -1700,7 +1749,7 @@ def test_execute_sql_returning_array_of_struct(sessions_database): ) -def test_execute_sql_returning_empty_array_of_struct(sessions_database): +def test_execute_sql_returning_empty_array_of_struct(sessions_database, not_postgres): sql = ( "SELECT ARRAY(SELECT AS STRUCT C1, C2 " "FROM (SELECT 2 AS C1) X " @@ -1749,7 +1798,8 @@ def test_execute_sql_select_1(sessions_database): def _bind_test_helper( database, - type_name, + database_dialect, + param_type, single_value, array_value, expected_array_value=None, @@ -1757,12 +1807,15 @@ def _bind_test_helper( ): database.snapshot(multi_use=True) + key = "p1" if database_dialect == DatabaseDialect.POSTGRESQL else "v" + placeholder = "$1" if database_dialect == DatabaseDialect.POSTGRESQL else f"@{key}" + # Bind a non-null _check_sql_results( database, - sql="SELECT @v", - params={"v": single_value}, - param_types={"v": spanner_v1.Type(code=type_name)}, + sql=f"SELECT {placeholder}", + params={key: single_value}, + param_types={key: param_type}, expected=[(single_value,)], order=False, recurse_into_lists=recurse_into_lists, @@ -1771,16 +1824,16 @@ def _bind_test_helper( # Bind a null _check_sql_results( database, - sql="SELECT @v", - params={"v": None}, - param_types={"v": spanner_v1.Type(code=type_name)}, + sql=f"SELECT {placeholder}", + params={key: None}, + param_types={key: param_type}, expected=[(None,)], order=False, recurse_into_lists=recurse_into_lists, ) # Bind an array of - array_element_type = spanner_v1.Type(code=type_name) + array_element_type = param_type array_type = spanner_v1.Type( code=spanner_v1.TypeCode.ARRAY, array_element_type=array_element_type ) @@ -1790,9 +1843,9 @@ def _bind_test_helper( _check_sql_results( database, - sql="SELECT @v", - params={"v": array_value}, - param_types={"v": array_type}, + sql=f"SELECT {placeholder}", + params={key: array_value}, + param_types={key: array_type}, expected=[(expected_array_value,)], order=False, recurse_into_lists=recurse_into_lists, @@ -1801,9 +1854,9 @@ def _bind_test_helper( # Bind an empty array of _check_sql_results( database, - sql="SELECT @v", - params={"v": []}, - param_types={"v": array_type}, + sql=f"SELECT {placeholder}", + params={key: []}, + param_types={key: array_type}, expected=[([],)], order=False, recurse_into_lists=recurse_into_lists, @@ -1812,70 +1865,75 @@ def _bind_test_helper( # Bind a null array of _check_sql_results( database, - sql="SELECT @v", - params={"v": None}, - param_types={"v": array_type}, + sql=f"SELECT {placeholder}", + params={key: None}, + param_types={key: array_type}, expected=[(None,)], order=False, recurse_into_lists=recurse_into_lists, ) -def test_execute_sql_w_string_bindings(sessions_database): +def test_execute_sql_w_string_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, spanner_v1.TypeCode.STRING, "Phred", ["Phred", "Bharney"] + sessions_database, database_dialect, spanner_v1.param_types.STRING, "Phred", ["Phred", "Bharney"] ) -def test_execute_sql_w_bool_bindings(sessions_database): +def test_execute_sql_w_bool_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, spanner_v1.TypeCode.BOOL, True, [True, False, True] + sessions_database, database_dialect, spanner_v1.param_types.BOOL, True, [True, False, True] ) -def test_execute_sql_w_int64_bindings(sessions_database): - _bind_test_helper(sessions_database, spanner_v1.TypeCode.INT64, 42, [123, 456, 789]) +def test_execute_sql_w_int64_bindings(sessions_database, database_dialect): + _bind_test_helper(sessions_database, database_dialect, spanner_v1.param_types.INT64, 42, [123, 456, 789]) -def test_execute_sql_w_float64_bindings(sessions_database): +def test_execute_sql_w_float64_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, spanner_v1.TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89] + sessions_database, database_dialect, spanner_v1.param_types.FLOAT64, 42.3, [12.3, 456.0, 7.89] ) -def test_execute_sql_w_float_bindings_transfinite(sessions_database): +def test_execute_sql_w_float_bindings_transfinite(sessions_database, database_dialect): + key = "p1" if database_dialect == DatabaseDialect.POSTGRESQL else "neg_inf" + placeholder = "$1" if database_dialect == DatabaseDialect.POSTGRESQL else f"@{key}" # Find -inf _check_sql_results( sessions_database, - sql="SELECT @neg_inf", - params={"neg_inf": NEG_INF}, - param_types={"neg_inf": spanner_v1.param_types.FLOAT64}, + sql=f"SELECT {placeholder}", + params={key: NEG_INF}, + param_types={key: spanner_v1.param_types.FLOAT64}, expected=[(NEG_INF,)], order=False, ) + key = "p1" if database_dialect == DatabaseDialect.POSTGRESQL else "pos_inf" + placeholder = "$1" if database_dialect == DatabaseDialect.POSTGRESQL else f"@{key}" # Find +inf _check_sql_results( sessions_database, - sql="SELECT @pos_inf", - params={"pos_inf": POS_INF}, - param_types={"pos_inf": spanner_v1.param_types.FLOAT64}, + sql=f"SELECT {placeholder}", + params={key: POS_INF}, + param_types={key: spanner_v1.param_types.FLOAT64}, expected=[(POS_INF,)], order=False, ) -def test_execute_sql_w_bytes_bindings(sessions_database): +def test_execute_sql_w_bytes_bindings(sessions_database, database_dialect): _bind_test_helper( sessions_database, - spanner_v1.TypeCode.BYTES, + database_dialect, + spanner_v1.param_types.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"], ) -def test_execute_sql_w_timestamp_bindings(sessions_database): +def test_execute_sql_w_timestamp_bindings(sessions_database, database_dialect): timestamp_1 = datetime_helpers.DatetimeWithNanoseconds( 1989, 1, 17, 17, 59, 12, nanosecond=345612789 @@ -1892,7 +1950,8 @@ def test_execute_sql_w_timestamp_bindings(sessions_database): _bind_test_helper( sessions_database, - spanner_v1.TypeCode.TIMESTAMP, + database_dialect, + spanner_v1.param_types.TIMESTAMP, timestamp_1, timestamps, expected_timestamps, @@ -1900,18 +1959,28 @@ def test_execute_sql_w_timestamp_bindings(sessions_database): ) -def test_execute_sql_w_date_bindings(sessions_database): +def test_execute_sql_w_date_bindings(sessions_database, not_postgres, database_dialect): dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)] - _bind_test_helper(sessions_database, spanner_v1.TypeCode.DATE, SOME_DATE, dates) + _bind_test_helper(sessions_database, database_dialect, spanner_v1.param_types.DATE, SOME_DATE, dates) -def test_execute_sql_w_numeric_bindings(not_emulator, sessions_database): - _bind_test_helper( - sessions_database, - spanner_v1.TypeCode.NUMERIC, - NUMERIC_1, - [NUMERIC_1, NUMERIC_2], - ) +def test_execute_sql_w_numeric_bindings(not_emulator, not_postgres, sessions_database, database_dialect): + if database_dialect == DatabaseDialect.POSTGRESQL: + _bind_test_helper( + sessions_database, + database_dialect, + spanner_v1.param_types.PG_NUMERIC, + NUMERIC_1, + [NUMERIC_1, NUMERIC_2], + ) + else: + _bind_test_helper( + sessions_database, + database_dialect, + spanner_v1.param_types.NUMERIC, + NUMERIC_1, + [NUMERIC_1, NUMERIC_2], + ) def test_execute_sql_w_json_bindings(not_emulator, sessions_database): @@ -1923,7 +1992,7 @@ def test_execute_sql_w_json_bindings(not_emulator, sessions_database): ) -def test_execute_sql_w_query_param_struct(sessions_database): +def test_execute_sql_w_query_param_struct(sessions_database, not_postgres): name = "Phred" count = 123 size = 23.456 @@ -2128,7 +2197,7 @@ def test_execute_sql_w_query_param_struct(sessions_database): ) -def test_execute_sql_returning_transfinite_floats(sessions_database): +def test_execute_sql_returning_transfinite_floats(sessions_database, not_postgres): with sessions_database.snapshot(multi_use=True) as snapshot: # Query returning -inf, +inf, NaN as column values diff --git a/tests/system/test_table_api.py b/tests/system/test_table_api.py index 73de78d7df..abf1a95beb 100644 --- a/tests/system/test_table_api.py +++ b/tests/system/test_table_api.py @@ -16,6 +16,7 @@ from google.api_core import exceptions from google.cloud import spanner_v1 +from google.cloud.spanner_admin_database_v1 import DatabaseDialect def test_table_exists(shared_database): @@ -32,7 +33,7 @@ def test_db_list_tables(shared_database): tables = shared_database.list_tables() table_ids = set(table.table_id for table in tables) assert "contacts" in table_ids - assert "contact_phones" in table_ids + # assert "contact_phones" in table_ids assert "all_types" in table_ids @@ -49,20 +50,21 @@ def test_table_reload_miss(shared_database): table.reload() -def test_table_schema(shared_database): +def test_table_schema(shared_database, database_dialect): table = shared_database.table("all_types") schema = table.schema expected = [ ("pkey", spanner_v1.TypeCode.INT64), ("int_value", spanner_v1.TypeCode.INT64), - ("int_array", spanner_v1.TypeCode.ARRAY), ("bool_value", spanner_v1.TypeCode.BOOL), ("bytes_value", spanner_v1.TypeCode.BYTES), - ("date_value", spanner_v1.TypeCode.DATE), ("float_value", spanner_v1.TypeCode.FLOAT64), ("string_value", spanner_v1.TypeCode.STRING), ("timestamp_value", spanner_v1.TypeCode.TIMESTAMP), + ("date_value", spanner_v1.TypeCode.DATE), + ("int_array", spanner_v1.TypeCode.ARRAY), ] + expected = expected[:-2] if database_dialect == DatabaseDialect.POSTGRESQL else expected found = {field.name: field.type_.code for field in schema} for field_name, type_code in expected: From 9bc87d9e557276ee624d6058164c2d71c9ac4103 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Thu, 9 Jun 2022 16:35:41 +0530 Subject: [PATCH 07/26] feat: postgres dialect - review fixes --- .../spanner_admin_database_v1/types/backup.py | 1 - .../types/spanner_database_admin.py | 5 ++--- google/cloud/spanner_v1/backup.py | 4 ++-- google/cloud/spanner_v1/instance.py | 5 +++++ google/cloud/spanner_v1/types/type.py | 2 +- tests/_fixtures.py | 16 ---------------- tests/system/conftest.py | 4 +--- tests/system/test_dbapi.py | 2 +- 8 files changed, 12 insertions(+), 27 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py index 5cb9dc9f76..dd42c409b9 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup.py +++ b/google/cloud/spanner_admin_database_v1/types/backup.py @@ -190,7 +190,6 @@ class State(proto.Enum): number=12, message=timestamp_pb2.Timestamp, ) - database_dialect = proto.Field(proto.ENUM, number=10, enum=common.DatabaseDialect,) class CreateBackupRequest(proto.Message): diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index a24d382c9c..5c195f8635 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -143,7 +143,7 @@ class Database(proto.Message): DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - The dialect of the Cloud Spanner Database. + Output only. The dialect of the Cloud Spanner Database. """ class State(proto.Enum): @@ -295,7 +295,7 @@ class CreateDatabaseRequest(proto.Message): Cloud Spanner will encrypt/decrypt all data at rest using Google default encryption. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - The dialect of the Cloud Spanner Database. + Optional. The dialect of the Cloud Spanner Database. """ parent = proto.Field( @@ -320,7 +320,6 @@ class CreateDatabaseRequest(proto.Message): number=5, enum=common.DatabaseDialect, ) - database_dialect = proto.Field(proto.ENUM, number=5, enum=common.DatabaseDialect,) class CreateDatabaseMetadata(proto.Message): diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py index f83a4a2d9c..2f54cf2167 100644 --- a/google/cloud/spanner_v1/backup.py +++ b/google/cloud/spanner_v1/backup.py @@ -218,11 +218,11 @@ def referencing_backups(self): return self._referencing_backups def database_dialect(self): - """Encryption info for this backup. + """Database Dialect for this backup. :rtype: :class:`~google.cloud.spanner_admin_database_v1.types.DatabaseDialect` :returns: a class representing the dialect of this backup's database """ - return self._encryption_info + return self._database_dialect @classmethod def from_pb(cls, backup_pb, instance): diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index 78ae8714a5..8d9546139f 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -460,6 +460,11 @@ def database( messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig` or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig` + :type database_dialect: + :class:`~google.cloud.spanner_admin_database_v1.types.DatabaseDialect` + :param database_dialect: + (Optional) database dialect for the database + :rtype: :class:`~google.cloud.spanner_v1.database.Database` :returns: a database owned by this instance. """ diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index 02444fa577..cacec433d3 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -59,7 +59,7 @@ class TypeAnnotationCode(proto.Enum): the way value is serialized. """ TYPE_ANNOTATION_CODE_UNSPECIFIED = 0 - INT32 = 1 + # INT32 = 1 #unsupported PG_NUMERIC = 2 diff --git a/tests/_fixtures.py b/tests/_fixtures.py index 9f661d8ec2..25a9833383 100644 --- a/tests/_fixtures.py +++ b/tests/_fixtures.py @@ -147,22 +147,6 @@ PRIMARY KEY (id)); CREATE INDEX name ON contacts(first_name, last_name); """ -# CREATE TABLE contact_phones ( -# contact_id BIGINT, -# phone_type VARCHAR(1024), -# phone_number VARCHAR(1024), -# PRIMARY KEY (contact_id, phone_type), -# FOREIGN KEY (contact_id) REFERENCES -# contacts(contact_id) ON DELETE CASCADE ) -# INTERLEAVE IN PARENT contacts; -# CREATE TABLE users_history ( -# id BIGINT NOT NULL, -# commit_ts COMMIT_TIMESTAMP NOT NULL, -# name TEXT NOT NULL, -# email TEXT, -# deleted BOOL NOT NULL, -# PRIMARY KEY(id, commit_ts DESC) ); -# """ DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()] EMULATOR_DDL_STATEMENTS = [ diff --git a/tests/system/conftest.py b/tests/system/conftest.py index e748bd85ca..2d23ab78dd 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -72,9 +72,7 @@ def spanner_client(): credentials=credentials, ) else: - return spanner_v1.Client( - client_options={"api_endpoint": "staging-wrenchworks.sandbox.googleapis.com"} - ) # use google.auth.default credentials + return spanner_v1.Client() # use google.auth.default credentials @pytest.fixture(scope="session") diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py index c1cd025c07..c37abf1db8 100644 --- a/tests/system/test_dbapi.py +++ b/tests/system/test_dbapi.py @@ -21,7 +21,6 @@ from google.cloud import spanner_v1 from google.cloud._helpers import UTC from google.cloud.spanner_dbapi.connection import connect -from google.cloud.spanner_admin_database_v1 import DatabaseDialect from google.cloud.spanner_dbapi.connection import Connection from google.cloud.spanner_dbapi.exceptions import ProgrammingError from google.cloud.spanner_v1 import JsonObject @@ -40,6 +39,7 @@ PRIMARY KEY (contact_id)""", ) + @pytest.fixture(scope="session") def raw_database(shared_instance, database_operation_timeout, not_postgres): databse_id = _helpers.unique_id("dbapi-txn") From 69713deba53df53c6c262f84435a56746739694b Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Sat, 11 Jun 2022 08:42:26 +0530 Subject: [PATCH 08/26] feat: postgres dialect - review fixes --- tests/system/test_session_api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 8a09be5347..91fdaec63e 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -1986,6 +1986,7 @@ def test_execute_sql_w_numeric_bindings(not_emulator, not_postgres, sessions_dat def test_execute_sql_w_json_bindings(not_emulator, sessions_database): _bind_test_helper( sessions_database, + database_dialect, spanner_v1.TypeCode.JSON, JSON_1, [JSON_1, JSON_2], From e112a1d335caf2ae3c934fd6f08d7f138849554b Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Sun, 12 Jun 2022 08:00:28 +0530 Subject: [PATCH 09/26] feat: postgres dialect - review fixes --- google/cloud/spanner_v1/database.py | 5 +++++ tests/system/test_session_api.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 7806d719c7..4c94d124f6 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -125,6 +125,11 @@ class Database(object): If a dict is provided, it must be of the same form as either of the protobuf messages :class:`~google.cloud.spanner_admin_database_v1.types.EncryptionConfig` or :class:`~google.cloud.spanner_admin_database_v1.types.RestoreDatabaseEncryptionConfig` + :type database_dialect: + :class:`~google.cloud.spanner_admin_database_v1.types.DatabaseDialect` + :param database_dialect: + (Optional) database dialect for the database + """ _spanner_api = None diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 91fdaec63e..f22227b7d3 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -1983,7 +1983,7 @@ def test_execute_sql_w_numeric_bindings(not_emulator, not_postgres, sessions_dat ) -def test_execute_sql_w_json_bindings(not_emulator, sessions_database): +def test_execute_sql_w_json_bindings(not_emulator, sessions_database, database_dialect): _bind_test_helper( sessions_database, database_dialect, From da92986ee77cd47a255aeeede8c1650954e6b964 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Mon, 13 Jun 2022 12:52:09 +0530 Subject: [PATCH 10/26] feat: postgres dialect - review fixes --- google/cloud/spanner_v1/database.py | 2 +- tests/unit/test_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 4c94d124f6..1b316aa8a8 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -319,7 +319,7 @@ def database_dialect(self): See cloud.google.com/spanner/docs/data-definition-language - :rtype: :class:`google.cloud.spanner_admin_database_v1.types.DatabaseDialect + :rtype: :class:`google.cloud.spanner_admin_database_v1.types.DatabaseDialect` :returns: the dialect of the database """ return self._database_dialect diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index 0a49a9b225..016c048a89 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -59,7 +59,7 @@ def test_exists_executes_query(self): exists = table.exists() self.assertFalse(exists) snapshot.execute_sql.assert_called_with( - _EXISTS_TEMPLATE, + _EXISTS_TEMPLATE.format("WHERE TABLE_NAME = $1"), params={"table_id": self.TABLE_ID}, param_types={"table_id": Type(code=TypeCode.STRING)}, ) From be004139d26e28a4eb2c629b1ce1f575593af3a4 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Mon, 13 Jun 2022 12:55:04 +0530 Subject: [PATCH 11/26] feat: postgres dialect - review fixes --- .../types/spanner_database_admin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index 5c195f8635..3758575337 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -143,7 +143,8 @@ class Database(proto.Message): DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - Output only. The dialect of the Cloud Spanner Database. + Output only. The dialect of the Cloud Spanner + Database. """ class State(proto.Enum): @@ -200,8 +201,6 @@ class State(proto.Enum): number=10, enum=common.DatabaseDialect, ) - default_leader = proto.Field(proto.STRING, number=9,) - database_dialect = proto.Field(proto.ENUM, number=10, enum=common.DatabaseDialect,) class ListDatabasesRequest(proto.Message): @@ -295,7 +294,8 @@ class CreateDatabaseRequest(proto.Message): Cloud Spanner will encrypt/decrypt all data at rest using Google default encryption. database_dialect (google.cloud.spanner_admin_database_v1.types.DatabaseDialect): - Optional. The dialect of the Cloud Spanner Database. + Output only. The dialect of the Cloud Spanner + Database. """ parent = proto.Field( From dd2a0ab2adfe6c4a6806d0b64e040b38746b19ce Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Mon, 13 Jun 2022 14:30:25 +0530 Subject: [PATCH 12/26] feat: postgres dialect - review fixes --- tests/unit/test_table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index 016c048a89..7ab30ea139 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -59,7 +59,7 @@ def test_exists_executes_query(self): exists = table.exists() self.assertFalse(exists) snapshot.execute_sql.assert_called_with( - _EXISTS_TEMPLATE.format("WHERE TABLE_NAME = $1"), + _EXISTS_TEMPLATE.format("WHERE TABLE_NAME = @table_id"), params={"table_id": self.TABLE_ID}, param_types={"table_id": Type(code=TypeCode.STRING)}, ) From a0a8d8bb93dc1d5ce34a619a333beb5fc9fe3b96 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Mon, 13 Jun 2022 17:17:56 +0530 Subject: [PATCH 13/26] feat: postgres dialect - review fixes --- google/cloud/spanner_v1/types/transaction.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 0addc255e2..d7262b27c2 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -31,7 +31,7 @@ class TransactionOptions(proto.Message): r"""Transactions - ============ + ------------ Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and @@ -41,8 +41,7 @@ class TransactionOptions(proto.Message): for each transaction. Transaction Modes - ================= - + ----------------- Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to From fc3db167bcd6450ada846d8bc8bfcbbfe11f4a39 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Mon, 13 Jun 2022 17:18:34 +0530 Subject: [PATCH 14/26] feat: postgres dialect - review fixes --- google/cloud/spanner_v1/types/transaction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index d7262b27c2..5005f8835f 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -42,6 +42,7 @@ class TransactionOptions(proto.Message): Transaction Modes ----------------- + Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to From 1bf0440f27ed3fda539a61829bd9b9659ab72ef6 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Tue, 14 Jun 2022 04:06:56 +0530 Subject: [PATCH 15/26] feat: postgres dialect - docstring fixes --- google/cloud/spanner_v1/types/transaction.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 5005f8835f..b73e49a7a9 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -31,7 +31,6 @@ class TransactionOptions(proto.Message): r"""Transactions - ------------ Each session can have at most one active transaction at a time (note that standalone reads and queries use a transaction internally and @@ -41,8 +40,7 @@ class TransactionOptions(proto.Message): for each transaction. Transaction Modes - ----------------- - + Cloud Spanner supports three transaction modes: 1. Locking read-write. This type of transaction is the only way to @@ -75,7 +73,6 @@ class TransactionOptions(proto.Message): database. Locking Read-Write Transactions - ------------------------------- Locking transactions may be used to atomically read-modify-write data anywhere in a database. This type of transaction is externally @@ -99,7 +96,6 @@ class TransactionOptions(proto.Message): transaction. Semantics - --------- Cloud Spanner can commit the transaction if all read locks it acquired are still valid at commit time, and it is able to acquire @@ -114,7 +110,6 @@ class TransactionOptions(proto.Message): than between Cloud Spanner transactions themselves. Retrying Aborted Transactions - ----------------------------- When a transaction aborts, the application can choose to retry the whole transaction again. To maximize the chances of successfully @@ -131,7 +126,6 @@ class TransactionOptions(proto.Message): retrying. Idle Transactions - ----------------- A transaction is considered idle if it has no outstanding reads or SQL queries and has not started a read or SQL query within the last @@ -144,7 +138,6 @@ class TransactionOptions(proto.Message): transaction from becoming idle. Snapshot Read-Only Transactions - ------------------------------- Snapshot read-only transactions provides a simpler method than locking read-write transactions for doing several consistent reads. @@ -183,7 +176,6 @@ class TransactionOptions(proto.Message): Each type of timestamp bound is discussed in detail below. Strong - ------ Strong reads are guaranteed to see the effects of all transactions that have committed before the start of the read. Furthermore, all @@ -201,7 +193,6 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. Exact Staleness - --------------- These timestamp bounds execute reads at a user-specified timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of @@ -226,7 +217,6 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. Bounded Staleness - ----------------- Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to a user-provided staleness bound. Cloud Spanner @@ -259,7 +249,6 @@ class TransactionOptions(proto.Message): [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. Old Read Timestamps and Garbage Collection - ------------------------------------------ Cloud Spanner continuously garbage collects deleted and overwritten data in the background to reclaim storage space. This process is @@ -272,7 +261,6 @@ class TransactionOptions(proto.Message): ``FAILED_PRECONDITION``. Partitioned DML Transactions - ---------------------------- Partitioned DML transactions are used to execute DML statements with a different execution strategy that provides different, and often From 9a540fc5d26341bcf59474f3e116fd9e9cc492d6 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Tue, 14 Jun 2022 13:09:21 +0530 Subject: [PATCH 16/26] feat: fix linting --- google/cloud/spanner_v1/database.py | 4 +- google/cloud/spanner_v1/instance.py | 4 +- tests/_fixtures.py | 4 +- tests/system/_helpers.py | 7 +- tests/system/conftest.py | 16 ++- tests/system/test_backup_api.py | 5 +- tests/system/test_database_api.py | 12 ++- tests/system/test_session_api.py | 145 +++++++++++++++++++++------- tests/system/test_table_api.py | 4 +- 9 files changed, 149 insertions(+), 52 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 1b316aa8a8..04f2e6c025 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -142,7 +142,7 @@ def __init__( pool=None, logger=None, encryption_config=None, - database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED, ): self.database_id = database_id self._instance = instance @@ -395,7 +395,7 @@ def create(self): db_name = self.database_id if "-" in db_name: if self._database_dialect == DatabaseDialect.POSTGRESQL: - db_name = f"\"{db_name}\"" + db_name = f'"{db_name}"' else: db_name = f"`{db_name}`" if type(self._encryption_config) == dict: diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index 8d9546139f..9317948542 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -429,7 +429,7 @@ def database( pool=None, logger=None, encryption_config=None, - database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED + database_dialect=DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED, ): """Factory to create a database within this instance. @@ -475,7 +475,7 @@ def database( pool=pool, logger=logger, encryption_config=encryption_config, - database_dialect=database_dialect + database_dialect=database_dialect, ) def list_databases(self, page_size=None): diff --git a/tests/_fixtures.py b/tests/_fixtures.py index 25a9833383..cea3054156 100644 --- a/tests/_fixtures.py +++ b/tests/_fixtures.py @@ -152,6 +152,4 @@ EMULATOR_DDL_STATEMENTS = [ stmt.strip() for stmt in EMULATOR_DDL.split(";") if stmt.strip() ] -PG_DDL_STATEMENTS = [ - stmt.strip() for stmt in PG_DDL.split(";") if stmt.strip() -] +PG_DDL_STATEMENTS = [stmt.strip() for stmt in PG_DDL.split(";") if stmt.strip()] diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 5fbc69c6d2..51a6d773c4 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -55,8 +55,11 @@ DDL_STATEMENTS = ( - _fixtures.PG_DDL_STATEMENTS if DATABASE_DIALECT == "POSTGRESQL" else - (_fixtures.EMULATOR_DDL_STATEMENTS if USE_EMULATOR else _fixtures.DDL_STATEMENTS) + _fixtures.PG_DDL_STATEMENTS + if DATABASE_DIALECT == "POSTGRESQL" + else ( + _fixtures.EMULATOR_DDL_STATEMENTS if USE_EMULATOR else _fixtures.DDL_STATEMENTS + ) ) retry_true = retry.RetryResult(operator.truth) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index 2d23ab78dd..b7004fa274 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -52,13 +52,18 @@ def not_emulator(): @pytest.fixture(scope="session") def not_postgres(database_dialect): if database_dialect == DatabaseDialect.POSTGRESQL: - pytest.skip(f"{_helpers.DATABASE_DIALECT_ENVVAR} set to POSTGRES in environment.") + pytest.skip( + f"{_helpers.DATABASE_DIALECT_ENVVAR} set to POSTGRES in environment." + ) @pytest.fixture(scope="session") def database_dialect(): - return DatabaseDialect[_helpers.DATABASE_DIALECT] if _helpers.DATABASE_DIALECT else \ - DatabaseDialect.GOOGLE_STANDARD_SQL + return ( + DatabaseDialect[_helpers.DATABASE_DIALECT] + if _helpers.DATABASE_DIALECT + else DatabaseDialect.GOOGLE_STANDARD_SQL + ) @pytest.fixture(scope="session") @@ -165,7 +170,10 @@ def shared_database(shared_instance, database_operation_timeout, database_dialec database_name = _helpers.unique_id("test_database") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( - database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool, database_dialect=database_dialect + database_name, + ddl_statements=_helpers.DDL_STATEMENTS, + pool=pool, + database_dialect=database_dialect, ) operation = database.create() operation.result(database_operation_timeout) # raises on failure / timeout. diff --git a/tests/system/test_backup_api.py b/tests/system/test_backup_api.py index dc57c2261a..bfcd635e8d 100644 --- a/tests/system/test_backup_api.py +++ b/tests/system/test_backup_api.py @@ -97,7 +97,10 @@ def second_database(shared_instance, database_operation_timeout, database_dialec database_name = _helpers.unique_id("test_database2") pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"}) database = shared_instance.database( - database_name, ddl_statements=_helpers.DDL_STATEMENTS, pool=pool, database_dialect=database_dialect + database_name, + ddl_statements=_helpers.DDL_STATEMENTS, + pool=pool, + database_dialect=database_dialect, ) operation = database.create() operation.result(database_operation_timeout) # raises on failure / timeout. diff --git a/tests/system/test_database_api.py b/tests/system/test_database_api.py index aa024eb6a2..1d21a77498 100644 --- a/tests/system/test_database_api.py +++ b/tests/system/test_database_api.py @@ -58,7 +58,9 @@ def test_list_databases(shared_instance, shared_database): def test_create_database(shared_instance, databases_to_delete, database_dialect): pool = spanner_v1.BurstyPool(labels={"testcase": "create_database"}) temp_db_id = _helpers.unique_id("temp_db") - temp_db = shared_instance.database(temp_db_id, pool=pool, database_dialect=database_dialect) + temp_db = shared_instance.database( + temp_db_id, pool=pool, database_dialect=database_dialect + ) operation = temp_db.create() databases_to_delete.append(temp_db) @@ -182,7 +184,9 @@ def test_table_not_found(shared_instance): temp_db.create() -def test_update_ddl_w_operation_id(shared_instance, databases_to_delete, database_dialect): +def test_update_ddl_w_operation_id( + shared_instance, databases_to_delete, database_dialect +): # We used to have: # @pytest.mark.skip( # reason="'Database.update_ddl' has a flaky timeout. See: " @@ -190,7 +194,9 @@ def test_update_ddl_w_operation_id(shared_instance, databases_to_delete, databas # ) pool = spanner_v1.BurstyPool(labels={"testcase": "update_database_ddl"}) temp_db_id = _helpers.unique_id("update_ddl", separator="_") - temp_db = shared_instance.database(temp_db_id, pool=pool, database_dialect=database_dialect) + temp_db = shared_instance.database( + temp_db_id, pool=pool, database_dialect=database_dialect + ) create_op = temp_db.create() databases_to_delete.append(temp_db) create_op.result(DBAPI_OPERATION_TIMEOUT) # raises on failure / timeout. diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index f22227b7d3..ccbfeab844 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -85,7 +85,11 @@ EMULATOR_ALL_TYPES_COLUMNS = LIVE_ALL_TYPES_COLUMNS[:-4] # ToDo: Clean up generation of POSTGRES_ALL_TYPES_COLUMNS -POSTGRES_ALL_TYPES_COLUMNS = LIVE_ALL_TYPES_COLUMNS[:1] + LIVE_ALL_TYPES_COLUMNS[1:7:2] + LIVE_ALL_TYPES_COLUMNS[9:17:2] +POSTGRES_ALL_TYPES_COLUMNS = ( + LIVE_ALL_TYPES_COLUMNS[:1] + + LIVE_ALL_TYPES_COLUMNS[1:7:2] + + LIVE_ALL_TYPES_COLUMNS[9:17:2] +) AllTypesRowData = collections.namedtuple("AllTypesRowData", LIVE_ALL_TYPES_COLUMNS) AllTypesRowData.__new__.__defaults__ = tuple([None for colum in LIVE_ALL_TYPES_COLUMNS]) @@ -719,7 +723,9 @@ def test_transaction_execute_update_then_insert_commit( # [END spanner_test_dml_with_mutation] -def test_transaction_batch_update_success(sessions_database, sessions_to_delete, database_dialect): +def test_transaction_batch_update_success( + sessions_database, sessions_to_delete, database_dialect +): # [START spanner_test_dml_with_mutation] # [START spanner_test_dml_update] sd = _sample_data @@ -732,8 +738,16 @@ def test_transaction_batch_update_success(sessions_database, sessions_to_delete, with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) - keys = ['p1', 'p2'] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] - placeholders = ['$1', '$2'] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + keys = ( + ["p1", "p2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else ["contact_id", "email"] + ) + placeholders = ( + ["$1", "$2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else [f"@{key}" for key in keys] + ) insert_statement = list(_generate_insert_statements())[0] update_statement = ( @@ -769,9 +783,7 @@ def unit_of_work(transaction): def test_transaction_batch_update_and_execute_dml( - sessions_database, - sessions_to_delete, - database_dialect + sessions_database, sessions_to_delete, database_dialect ): sd = _sample_data param_types = spanner_v1.param_types @@ -783,8 +795,16 @@ def test_transaction_batch_update_and_execute_dml( with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) - keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] - placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + keys = ( + ["p1", "p2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else ["contact_id", "email"] + ) + placeholders = ( + ["$1", "$2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else [f"@{key}" for key in keys] + ) insert_statements = list(_generate_insert_statements()) update_statements = [ @@ -820,7 +840,9 @@ def unit_of_work(transaction): sd._check_rows_data(rows, []) -def test_transaction_batch_update_w_syntax_error(sessions_database, sessions_to_delete, database_dialect): +def test_transaction_batch_update_w_syntax_error( + sessions_database, sessions_to_delete, database_dialect +): from google.rpc import code_pb2 sd = _sample_data @@ -833,8 +855,16 @@ def test_transaction_batch_update_w_syntax_error(sessions_database, sessions_to_ with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) - keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] - placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + keys = ( + ["p1", "p2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else ["contact_id", "email"] + ) + placeholders = ( + ["$1", "$2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else [f"@{key}" for key in keys] + ) insert_statement = list(_generate_insert_statements())[0] update_statement = ( @@ -877,10 +907,7 @@ def test_transaction_batch_update_wo_statements(sessions_database, sessions_to_d reason="trace requires OpenTelemetry", ) def test_transaction_batch_update_w_parent_span( - sessions_database, - sessions_to_delete, - ot_exporter, - database_dialect + sessions_database, sessions_to_delete, ot_exporter, database_dialect ): from opentelemetry import trace @@ -895,8 +922,16 @@ def test_transaction_batch_update_w_parent_span( with session.batch() as batch: batch.delete(sd.TABLE, sd.ALL) - keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["contact_id", "email"] - placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + keys = ( + ["p1", "p2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else ["contact_id", "email"] + ) + placeholders = ( + ["$1", "$2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else [f"@{key}" for key in keys] + ) insert_statement = list(_generate_insert_statements())[0] update_statement = ( @@ -958,8 +993,16 @@ def _setup_table(txn): sd._check_rows_data(before_pdml) - keys = ["p1", "p2"] if database_dialect == DatabaseDialect.POSTGRESQL else ["email", "target"] - placeholders = ["$1", "$2"] if database_dialect == DatabaseDialect.POSTGRESQL else [f"@{key}" for key in keys] + keys = ( + ["p1", "p2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else ["email", "target"] + ) + placeholders = ( + ["$1", "$2"] + if database_dialect == DatabaseDialect.POSTGRESQL + else [f"@{key}" for key in keys] + ) nonesuch = "nonesuch@example.com" target = "phred@example.com" update_statement = ( @@ -993,7 +1036,9 @@ def _setup_table(txn): # [END spanner_test_dml_partioned_dml_update] -def _transaction_concurrency_helper(sessions_database, unit_of_work, pkey, database_dialect=None): +def _transaction_concurrency_helper( + sessions_database, unit_of_work, pkey, database_dialect=None +): initial_value = 123 num_threads = 3 # conforms to equivalent Java systest. @@ -1009,12 +1054,14 @@ def _transaction_concurrency_helper(sessions_database, unit_of_work, pkey, datab for _ in range(num_threads): txn_sessions.append(sessions_database) - args = (unit_of_work, pkey, database_dialect) if database_dialect else (unit_of_work, pkey) + args = ( + (unit_of_work, pkey, database_dialect) + if database_dialect + else (unit_of_work, pkey) + ) threads = [ - threading.Thread( - target=txn_session.run_in_transaction, args=args - ) + threading.Thread(target=txn_session.run_in_transaction, args=args) for txn_session in txn_sessions ] @@ -1062,7 +1109,9 @@ def _query_w_concurrent_update(transaction, pkey, database_dialect): def test_transaction_query_w_concurrent_updates(sessions_database, database_dialect): pkey = "query_w_concurrent_updates" - _transaction_concurrency_helper(sessions_database, _query_w_concurrent_update, pkey, database_dialect) + _transaction_concurrency_helper( + sessions_database, _query_w_concurrent_update, pkey, database_dialect + ) def test_transaction_read_w_abort(not_emulator, sessions_database): @@ -1262,7 +1311,9 @@ def test_multiuse_snapshot_read_isolation_exact_staleness(sessions_database): sd._check_row_data(after, all_data_rows) -def test_read_w_index(shared_instance, database_operation_timeout, databases_to_delete, database_dialect): +def test_read_w_index( + shared_instance, database_operation_timeout, databases_to_delete, database_dialect +): # Indexed reads cannot return non-indexed columns sd = _sample_data row_count = 2000 @@ -1275,7 +1326,7 @@ def test_read_w_index(shared_instance, database_operation_timeout, databases_to_ _helpers.unique_id("test_read", separator="_"), ddl_statements=_helpers.DDL_STATEMENTS + extra_ddl, pool=pool, - database_dialect=database_dialect + database_dialect=database_dialect, ) operation = temp_db.create() databases_to_delete.append(temp_db) @@ -1876,23 +1927,41 @@ def _bind_test_helper( def test_execute_sql_w_string_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, database_dialect, spanner_v1.param_types.STRING, "Phred", ["Phred", "Bharney"] + sessions_database, + database_dialect, + spanner_v1.param_types.STRING, + "Phred", + ["Phred", "Bharney"], ) def test_execute_sql_w_bool_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, database_dialect, spanner_v1.param_types.BOOL, True, [True, False, True] + sessions_database, + database_dialect, + spanner_v1.param_types.BOOL, + True, + [True, False, True], ) def test_execute_sql_w_int64_bindings(sessions_database, database_dialect): - _bind_test_helper(sessions_database, database_dialect, spanner_v1.param_types.INT64, 42, [123, 456, 789]) + _bind_test_helper( + sessions_database, + database_dialect, + spanner_v1.param_types.INT64, + 42, + [123, 456, 789], + ) def test_execute_sql_w_float64_bindings(sessions_database, database_dialect): _bind_test_helper( - sessions_database, database_dialect, spanner_v1.param_types.FLOAT64, 42.3, [12.3, 456.0, 7.89] + sessions_database, + database_dialect, + spanner_v1.param_types.FLOAT64, + 42.3, + [12.3, 456.0, 7.89], ) @@ -1961,10 +2030,18 @@ def test_execute_sql_w_timestamp_bindings(sessions_database, database_dialect): def test_execute_sql_w_date_bindings(sessions_database, not_postgres, database_dialect): dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)] - _bind_test_helper(sessions_database, database_dialect, spanner_v1.param_types.DATE, SOME_DATE, dates) + _bind_test_helper( + sessions_database, + database_dialect, + spanner_v1.param_types.DATE, + SOME_DATE, + dates, + ) -def test_execute_sql_w_numeric_bindings(not_emulator, not_postgres, sessions_database, database_dialect): +def test_execute_sql_w_numeric_bindings( + not_emulator, not_postgres, sessions_database, database_dialect +): if database_dialect == DatabaseDialect.POSTGRESQL: _bind_test_helper( sessions_database, diff --git a/tests/system/test_table_api.py b/tests/system/test_table_api.py index abf1a95beb..1385fb953c 100644 --- a/tests/system/test_table_api.py +++ b/tests/system/test_table_api.py @@ -64,7 +64,9 @@ def test_table_schema(shared_database, database_dialect): ("date_value", spanner_v1.TypeCode.DATE), ("int_array", spanner_v1.TypeCode.ARRAY), ] - expected = expected[:-2] if database_dialect == DatabaseDialect.POSTGRESQL else expected + expected = ( + expected[:-2] if database_dialect == DatabaseDialect.POSTGRESQL else expected + ) found = {field.name: field.type_.code for field in schema} for field_name, type_code in expected: From 666285bf32a4df969bb08d40a2b0e45db742b6fb Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Wed, 15 Jun 2022 15:18:35 +0530 Subject: [PATCH 17/26] feat: add opentelemetry version in noxfile to remove failures --- noxfile.py | 1 + 1 file changed, 1 insertion(+) diff --git a/noxfile.py b/noxfile.py index 265933acd7..eb3041f060 100644 --- a/noxfile.py +++ b/noxfile.py @@ -373,6 +373,7 @@ def prerelease_deps(session): # dependencies of google-auth "cryptography", "pyasn1", + "opentelemetry-instrumentation >= 0.20b0, < 0.23dev" ] for dep in prerel_deps: From 8525bf5c5839b4823ad5057a4573a185162ddc33 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Wed, 15 Jun 2022 15:56:28 +0530 Subject: [PATCH 18/26] feat: add opentelemetry version and constraints.txt --- noxfile.py | 2 ++ testing/constraints-3.8.txt | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/noxfile.py b/noxfile.py index eb3041f060..fb7d5b4141 100644 --- a/noxfile.py +++ b/noxfile.py @@ -373,6 +373,8 @@ def prerelease_deps(session): # dependencies of google-auth "cryptography", "pyasn1", + "opentelemetry-api >= 1.1.0", + "opentelemetry-sdk >= 1.1.0", "opentelemetry-instrumentation >= 0.20b0, < 0.23dev" ] diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index e69de29bb2..81c7b183a9 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -0,0 +1,18 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==1.31.5 +google-cloud-core==1.4.1 +grpc-google-iam-v1==0.12.4 +libcst==0.2.5 +proto-plus==1.15.0 +sqlparse==0.3.0 +opentelemetry-api==1.1.0 +opentelemetry-sdk==1.1.0 +opentelemetry-instrumentation==0.20b0 +packaging==14.3 +protobuf==3.19.0 From f264ecaf455dff7b939c9e7e8822d2181fd55cf8 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Wed, 15 Jun 2022 23:30:48 +0530 Subject: [PATCH 19/26] Revert "feat: add opentelemetry version and constraints.txt" This reverts commit 8525bf5c5839b4823ad5057a4573a185162ddc33. --- noxfile.py | 2 -- testing/constraints-3.8.txt | 18 ------------------ 2 files changed, 20 deletions(-) diff --git a/noxfile.py b/noxfile.py index fb7d5b4141..eb3041f060 100644 --- a/noxfile.py +++ b/noxfile.py @@ -373,8 +373,6 @@ def prerelease_deps(session): # dependencies of google-auth "cryptography", "pyasn1", - "opentelemetry-api >= 1.1.0", - "opentelemetry-sdk >= 1.1.0", "opentelemetry-instrumentation >= 0.20b0, < 0.23dev" ] diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index 81c7b183a9..e69de29bb2 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -1,18 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List *all* library dependencies and extras in this file. -# Pin the version to the lower bound. -# -# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", -# Then this file should have foo==1.14.0 -google-api-core==1.31.5 -google-cloud-core==1.4.1 -grpc-google-iam-v1==0.12.4 -libcst==0.2.5 -proto-plus==1.15.0 -sqlparse==0.3.0 -opentelemetry-api==1.1.0 -opentelemetry-sdk==1.1.0 -opentelemetry-instrumentation==0.20b0 -packaging==14.3 -protobuf==3.19.0 From 2ae7b98a42232dc555b17e2b00de5b28863c32f0 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Wed, 15 Jun 2022 23:31:19 +0530 Subject: [PATCH 20/26] Revert "feat: add opentelemetry version in noxfile to remove failures" This reverts commit 666285bf32a4df969bb08d40a2b0e45db742b6fb. --- noxfile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index eb3041f060..265933acd7 100644 --- a/noxfile.py +++ b/noxfile.py @@ -373,7 +373,6 @@ def prerelease_deps(session): # dependencies of google-auth "cryptography", "pyasn1", - "opentelemetry-instrumentation >= 0.20b0, < 0.23dev" ] for dep in prerel_deps: From 9d4ef525fa3e9c65ff1b094921b3036f07641189 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Wed, 15 Jun 2022 23:48:26 +0530 Subject: [PATCH 21/26] feat: removing duplicate imports --- google/cloud/spanner_v1/database.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 04f2e6c025..d3300d3a67 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -52,17 +52,6 @@ from google.cloud.spanner_v1.services.spanner.transports.grpc import ( SpannerGrpcTransport, ) -from google.cloud.spanner_admin_database_v1 import CreateDatabaseRequest -from google.cloud.spanner_admin_database_v1 import DatabaseDialect -from google.cloud.spanner_admin_database_v1 import EncryptionConfig -from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig -from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest -from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest -from google.cloud.spanner_v1 import ( - ExecuteSqlRequest, - TransactionSelector, - TransactionOptions, -) from google.cloud.spanner_v1.table import Table From 032a2b83f8c96e275227e32961ec78d58d128a8e Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Thu, 16 Jun 2022 00:02:03 +0530 Subject: [PATCH 22/26] feat: correcting imports --- google/cloud/spanner_v1/database.py | 1 + 1 file changed, 1 insertion(+) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index d3300d3a67..5357f55c90 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -53,6 +53,7 @@ SpannerGrpcTransport, ) from google.cloud.spanner_v1.table import Table +from google.cloud.spanner_v1.types import DatabaseDialect SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data" From d879842e704430cae4b23973e3dae55782cef0a3 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Thu, 16 Jun 2022 00:06:45 +0530 Subject: [PATCH 23/26] feat: correcting imports --- google/cloud/spanner_v1/database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 5357f55c90..7d2384beed 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -34,6 +34,7 @@ from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig from google.cloud.spanner_admin_database_v1 import RestoreDatabaseRequest from google.cloud.spanner_admin_database_v1 import UpdateDatabaseDdlRequest +from google.cloud.spanner_admin_database_v1.types import DatabaseDialect from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1 import TransactionOptions @@ -53,7 +54,6 @@ SpannerGrpcTransport, ) from google.cloud.spanner_v1.table import Table -from google.cloud.spanner_v1.types import DatabaseDialect SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data" From f9c274a3e9b0a5e09cceffc39a31909ececeec0d Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Fri, 17 Jun 2022 11:28:29 +0530 Subject: [PATCH 24/26] feat: skip backup tests --- tests/system/_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 51a6d773c4..3f07e74908 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -31,7 +31,7 @@ INSTANCE_ID = os.environ.get(INSTANCE_ID_ENVVAR, INSTANCE_ID_DEFAULT) SKIP_BACKUP_TESTS_ENVVAR = "SKIP_BACKUP_TESTS" -SKIP_BACKUP_TESTS = os.getenv(SKIP_BACKUP_TESTS_ENVVAR) is not None +SKIP_BACKUP_TESTS = True # os.getenv(SKIP_BACKUP_TESTS_ENVVAR) == True INSTANCE_OPERATION_TIMEOUT_IN_SECONDS = int( os.getenv("SPANNER_INSTANCE_OPERATION_TIMEOUT_IN_SECONDS", 560) From 41a03e02ad8c779b5d1ba3cfa0c016ba453a84b1 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Fri, 17 Jun 2022 13:10:09 +0530 Subject: [PATCH 25/26] feat: correct the import --- tests/system/test_session_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index ccbfeab844..f211577abd 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -2064,7 +2064,7 @@ def test_execute_sql_w_json_bindings(not_emulator, sessions_database, database_d _bind_test_helper( sessions_database, database_dialect, - spanner_v1.TypeCode.JSON, + spanner_v1.param_types.JSON, JSON_1, [JSON_1, JSON_2], ) From ba0bf4dbf459a68c0de03a7220d5ad50c67ac5c4 Mon Sep 17 00:00:00 2001 From: Anshul Goyal Date: Fri, 17 Jun 2022 14:23:48 +0530 Subject: [PATCH 26/26] feat: fix linting --- tests/system/_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system/_helpers.py b/tests/system/_helpers.py index 3f07e74908..0cb00b15ff 100644 --- a/tests/system/_helpers.py +++ b/tests/system/_helpers.py @@ -31,7 +31,7 @@ INSTANCE_ID = os.environ.get(INSTANCE_ID_ENVVAR, INSTANCE_ID_DEFAULT) SKIP_BACKUP_TESTS_ENVVAR = "SKIP_BACKUP_TESTS" -SKIP_BACKUP_TESTS = True # os.getenv(SKIP_BACKUP_TESTS_ENVVAR) == True +SKIP_BACKUP_TESTS = True # os.getenv(SKIP_BACKUP_TESTS_ENVVAR) == True INSTANCE_OPERATION_TIMEOUT_IN_SECONDS = int( os.getenv("SPANNER_INSTANCE_OPERATION_TIMEOUT_IN_SECONDS", 560)