diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index eabed9f3..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: 2.1 - -orbs: - python: circleci/python@1.3.3 - -jobs: - build-and-test: - executor: python/default - steps: - - checkout - - run: sudo apt install python3.8 - - run: echo $GCLOUD_SERVICE_KEY > "$GOOGLE_APPLICATION_CREDENTIALS" - - run: python3 -m pip install nox - - run: python3 -m nox - -workflows: - main: - jobs: - - build-and-test diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..54b9c39b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,8 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + +# The @googleapis/api-spanner is the default owner for changes in this repo +* @googleapis/api-spanner diff --git a/.github/release-please.yml b/.github/release-please.yml new file mode 100644 index 00000000..4507ad05 --- /dev/null +++ b/.github/release-please.yml @@ -0,0 +1 @@ +releaseType: python diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml new file mode 100644 index 00000000..c8e9158a --- /dev/null +++ b/.github/sync-repo-settings.yaml @@ -0,0 +1,16 @@ +# https://github.com/googleapis/repo-automation-bots/tree/main/packages/sync-repo-settings +# Rules for main branch protection +branchProtectionRules: +# Identifies the protection rule pattern. Name of the branch to be protected. +# Defaults to `main` +- pattern: main + requiresCodeOwnerReviews: true + requiresStrictStatusChecks: true + requiredStatusCheckContexts: + - 'lint' + - 'unit' + - 'compliance_tests' + - 'migration_tests' + - 'cla/google' + - 'Kokoro' + - 'Kokoro Compliance Tests' diff --git a/.github/workflows/test_suite.yml b/.github/workflows/test_suite.yml index 7a6d4653..292c3d69 100644 --- a/.github/workflows/test_suite.yml +++ b/.github/workflows/test_suite.yml @@ -5,7 +5,40 @@ on: pull_request: name: SQLAlchemy Spanner dialect jobs: - tests: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install nox + run: python -m pip install nox + - name: Run Lint + run: nox -s lint_setup_py lint blacken + + unit: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install nox + run: python -m pip install nox + - name: Run Unit Tests + run: nox -s unit + env: + SPANNER_EMULATOR_HOST: localhost:9010 + GOOGLE_CLOUD_PROJECT: appdev-soda-spanner-staging + + compliance_tests: runs-on: ubuntu-latest services: @@ -23,9 +56,32 @@ jobs: python-version: 3.8 - name: Install nox run: python -m pip install nox - - name: Run SQLAlchemy tests - run: nox + - name: Run Compliance Tests + run: nox -s compliance_test env: SPANNER_EMULATOR_HOST: localhost:9010 GOOGLE_CLOUD_PROJECT: appdev-soda-spanner-staging + migration_tests: + runs-on: ubuntu-latest + + services: + emulator-0: + image: gcr.io/cloud-spanner-emulator/emulator:latest + ports: + - 9010:9010 + + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install nox + run: python -m pip install nox + - name: Run Migration Tests + run: nox -s migration_test + env: + SPANNER_EMULATOR_HOST: localhost:9010 + GOOGLE_CLOUD_PROJECT: appdev-soda-spanner-staging diff --git a/.kokoro/build.sh b/.kokoro/build.sh new file mode 100755 index 00000000..6f33e298 --- /dev/null +++ b/.kokoro/build.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +set -eo pipefail + +if [[ -z "${PROJECT_ROOT:-}" ]]; then + PROJECT_ROOT="github/python-spanner-sqlalchemy" +fi + +cd "${PROJECT_ROOT}" + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +export GOOGLE_CLOUD_PROJECT=$(cat "${KOKORO_GFILE_DIR}/project-id.json") + +# Remove old nox +python3 -m pip uninstall --yes --quiet nox-automation + +# Install nox +python3 -m pip install --upgrade --quiet nox +python3 -m nox --version + +# If this is a continuous build, send the test log to the FlakyBot. +# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi + +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3 -m nox -s ${NOX_SESSION:-} +else + python3 -m nox +fi diff --git a/.kokoro/continuous/common.cfg b/.kokoro/continuous/common.cfg new file mode 100644 index 00000000..3af6b618 --- /dev/null +++ b/.kokoro/continuous/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-spanner-sqlalchemy/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner-sqlalchemy/.kokoro/build.sh" +} diff --git a/.kokoro/continuous/continuous.cfg b/.kokoro/continuous/continuous.cfg new file mode 100644 index 00000000..18a4c353 --- /dev/null +++ b/.kokoro/continuous/continuous.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile new file mode 100644 index 00000000..cf3922d7 --- /dev/null +++ b/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,59 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + python3-distutils \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.8"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 00000000..2b2b7e95 --- /dev/null +++ b/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Łukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg new file mode 100644 index 00000000..76db11b1 --- /dev/null +++ b/.kokoro/docs/common.cfg @@ -0,0 +1,63 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-spanner-sqlalchemy/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner-sqlalchemy/.kokoro/publish-docs.sh" +} + +env_vars: { + key: "STAGING_BUCKET" + value: "docs-staging" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + +# Fetch the token needed for reporting release status to GitHub +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 00000000..5e07943d --- /dev/null +++ b/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,28 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner-sqlalchemy/.kokoro/build.sh" +} + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "docs docfx" +} diff --git a/.kokoro/docs/docs.cfg b/.kokoro/docs/docs.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/docs/docs.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 00000000..bf0be603 --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/.kokoro/presubmit/common.cfg b/.kokoro/presubmit/common.cfg new file mode 100644 index 00000000..3af6b618 --- /dev/null +++ b/.kokoro/presubmit/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-spanner-sqlalchemy/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner-sqlalchemy/.kokoro/build.sh" +} diff --git a/.kokoro/presubmit/compliance.cfg b/.kokoro/presubmit/compliance.cfg new file mode 100644 index 00000000..3383be45 --- /dev/null +++ b/.kokoro/presubmit/compliance.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "compliance_test" +} diff --git a/.kokoro/presubmit/presubmit.cfg b/.kokoro/presubmit/presubmit.cfg new file mode 100644 index 00000000..3aa90144 --- /dev/null +++ b/.kokoro/presubmit/presubmit.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Disable system tests. +env_vars: { + key: "RUN_COMPLIANCE_TESTS" + value: "false" +} diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh new file mode 100755 index 00000000..41e4460d --- /dev/null +++ b/.kokoro/publish-docs.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +set -eo pipefail + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +export PATH="${HOME}/.local/bin:${PATH}" + +# Install nox +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version + +# build docs +nox -s docs + +python3 -m pip install --user gcp-docuploader + +# create metadata +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release.sh b/.kokoro/release.sh new file mode 100755 index 00000000..c03a31f0 --- /dev/null +++ b/.kokoro/release.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +set -eo pipefail + +# Start the releasetool reporter +python3 -m pip install gcp-releasetool +python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script + +# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. +python3 -m pip install --upgrade twine wheel setuptools + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Move into the package, build the distribution and upload. +TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token") +cd github/python-spanner-sqlalchemy +python3 setup.py sdist bdist_wheel +twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg new file mode 100644 index 00000000..9a818a87 --- /dev/null +++ b/.kokoro/release/common.cfg @@ -0,0 +1,30 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-spanner-sqlalchemy/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-spanner-sqlalchemy/.kokoro/release.sh" +} + +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token" +} diff --git a/.kokoro/release/release.cfg b/.kokoro/release/release.cfg new file mode 100644 index 00000000..8f43917d --- /dev/null +++ b/.kokoro/release/release.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh new file mode 100755 index 00000000..427c287f --- /dev/null +++ b/.kokoro/trampoline.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +set -eo pipefail + +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT + +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000..957848c4 --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,479 @@ +#!/usr/bin/env bash +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For FlakyBot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/BENCHMARKS.md b/BENCHMARKS.md new file mode 100644 index 00000000..14c44358 --- /dev/null +++ b/BENCHMARKS.md @@ -0,0 +1,26 @@ +# Benchmarks + +The performance test suite is located in [test/benchmark.py](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/blob/main/test/benchmark.py) and intended to compare execution time difference between SQLAlchemy dialect for Spanner and pure Spanner client. + +The test suite requirements: +- `scipy` Python package installed +- the original dialect requirements + +Use `PROJECT`, `INSTANCE` and `DATABASE` module constants to set a project to execute tests on. + +The following measurements were made on a VM instance. + +# 25-11-2021 + +|Test|mean, sec|error|std_dev| +|----|-------|-----|--------| +|SPANNER insert_one_row_with_fetch_after| 0.16|0.0|0.03| +|ALCHEMY insert_one_row_with_fetch_after| 0.11| 0.0|0.02| +|SPANNER read_one_row| 0.04| 0.0| 0.01| +|ALCHEMY read_one_row| 0.01| 0.0| 0.0| +|SPANNER insert_many_rows| 0.33| 0.01| 0.05| +|ALCHEMY insert_many_rows| 0.32| 0.01| 0.06| +|SPANNER select_many_rows| 0.04| 0.0| 0.01| +|ALCHEMY select_many_rows| 0.03| 0.0| 0.0| +|SPANNER insert_many_rows_with_mutations| 0.07| 0.0| 0.03| +|SQLALCHEMY insert_many_rows_with_mutations| 0.31| 0.01| 0.07| diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..d431e1ba --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog + +## [1.0.0](https://www.github.com/googleapis/python-spanner-sqlalchemy/compare/v0.1.0...v1.0.0) (2021-12-08) + + +### Features + +* add code samples ([#55](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/55)) ([406c34b](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/406c34bdb21e01a1317c074fab34d87bb3d61020)) +* set user-agent string to distinguish SQLAlchemy requests ([#116](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/116)) ([b5e1a21](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/b5e1a211a0475690feed36fd222a41c216d8fb82)) +* support computed columns ([#139](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/139)) ([046ca97](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/046ca975778f4793e2c37d70d2a602546f9d4699)), closes [#137](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/137) +* support JSON data type ([#135](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/135)) ([184a7d5](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/184a7d576a790bbbd049fe80d589af78831379b4)) +* support read_only connections ([#125](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/125)) ([352c47d](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/352c47de7bb4ea1c30b50a7fe5aee0c4d102e80e)) +* support stale reads ([#146](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/146)) ([d80cb27](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/d80cb2792437731c24905c7a6919468c37779c67)) + + +### Bug Fixes + +* ALTER COLUMN NOT NULL directive fails because of inappropriate syntax ([#124](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/124)) ([c433cda](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/c433cda99fd8544810c878328a272a3a9430630f)) +* array columns reflection ([#119](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/119)) ([af3b97b](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/af3b97bfa4b3ed4b223384c9ed3fa0643204d8c9)), closes [#118](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/118) +* calculate limit value correctly for offset only queries ([#160](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/160)) ([6844336](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/684433682ed29d9cde8c9898796024cefeb38493)) +* correct typo in spanner_interleave_on_delete_cascade keyword ([#99](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/99)) ([a0ebf75](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/a0ebf758eda351c0a20103f9e8c2243f002b2e6e)) +* raise Unimplemented error when creating temporary tables ([#159](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/159)) ([646d6ac](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/646d6ac24ccd0643b67abff9da28118e0a6f6e55)) +* rollback failed exception log ([#106](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/106)) ([809e6ab](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/809e6abb29f82a7fbe6587d606e8d75283f2a2fe)) + + +### Documentation + +* add query hints example ([#153](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/153)) ([9c23804](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/9c23804746bc8c638b6c22f2cb6ea57778f7fd19)) +* reformatted README titles ([#141](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/141)) ([a3ccbac](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/a3ccbac476679fe8048ed2109e5489b873278c9c)) +* update benchmarks ([#155](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/155)) ([3500653](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/35006536e4de31dbcba022b73f0aadf39bc89e39)) + + +### Miscellaneous Chores + +* setup release 1.0.0 ([#165](https://www.github.com/googleapis/python-spanner-sqlalchemy/issues/165)) ([37a415d](https://www.github.com/googleapis/python-spanner-sqlalchemy/commit/37a415d071d39e99f233a1c15c1c4b89bd436570)) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..46b2a08e --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/contributing.md b/CONTRIBUTING.md similarity index 100% rename from contributing.md rename to CONTRIBUTING.md diff --git a/LICENSE b/LICENSE index 08eea89f..d6456956 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ - Apache License + + Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -192,10 +193,10 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/README.md b/README.md index f601c077..5927cb35 100644 --- a/README.md +++ b/README.md @@ -2,15 +2,12 @@ Spanner dialect for SQLAlchemy represents an interface API designed to make it possible to control Cloud Spanner databases with SQLAlchemy API. The dialect is built on top of [the Spanner DB API](https://github.com/googleapis/python-spanner/tree/master/google/cloud/spanner_dbapi), which is designed in accordance with [PEP-249](https://www.python.org/dev/peps/pep-0249/). -This project has **Preview** release status. Known limitations are listed [here](#features-and-limitations). All supported features have been tested and verified to work with the test configurations. There may be configurations and/or data model variations that have not yet been covered by the tests and that show unexpected behavior. Please report any problems that you might encounter by [creating a new issue](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/issues/new). - -**NOTE: This project may still make breaking changes without prior notice and should not yet be used for production purposes.** +Known limitations are listed [here](#features-and-limitations). All supported features have been tested and verified to work with the test configurations. There may be configurations and/or data model variations that have not yet been covered by the tests and that show unexpected behavior. Please report any problems that you might encounter by [creating a new issue](https://github.com/googleapis/python-spanner-sqlalchemy/issues/new). - [Cloud Spanner product documentation](https://cloud.google.com/spanner/docs) - [SQLAlchemy product documentation](https://www.sqlalchemy.org/) -Quick Start ------------ +## Quick Start In order to use this package, you first need to go through the following steps: @@ -19,12 +16,11 @@ In order to use this package, you first need to go through the following steps: 3. [Enable the Google Cloud Spanner API.](https://cloud.google.com/spanner) 4. [Setup Authentication.](https://googleapis.dev/python/google-api-core/latest/auth.html) -Installation ------------ +## Installation To install an in-development version of the package, clone its Git-repository: ``` -git clone https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy.git +git clone https://github.com/googleapis/python-spanner-sqlalchemy.git ``` Next install the package from the package `setup.py` file: ``` @@ -32,9 +28,9 @@ python setup.py install ``` During setup the dialect will be registered with entry points. -A Minimal App ------------ -**Create a table** +## A Minimal App + +### Create a table ```python from sqlalchemy import ( Column, @@ -59,7 +55,8 @@ user = Table( metadata.create_all(engine) ``` -**Insert a row** + +### Insert a row ```python from sqlalchemy import ( MetaData, @@ -76,7 +73,7 @@ with engine.begin() as connection: connection.execute(user.insert(), {"user_id": 1, "user_name": "Full Name"}) ``` -**Read** +### Read ```python from sqlalchemy import MetaData, Table, create_engine, select @@ -90,16 +87,16 @@ with engine.begin() as connection: print(row) ``` -Migration ------------ +## Migration + SQLAlchemy uses [Alembic](https://alembic.sqlalchemy.org/en/latest/#) tool to organize database migrations. **Warning!** A migration script can produce a lot of DDL statements. If each of the statements are executed separately, performance issues can occur. To avoid these, it's highly recommended to use the [Alembic batch context](https://alembic.sqlalchemy.org/en/latest/batch.html) feature to pack DDL statements into groups of statements. -Features and limitations ------------ -**Interleaved tables** +## Features and limitations + +### Interleaved tables Cloud Spanner dialect includes two dialect-specific arguments for `Table` constructor, which help to define interleave relations: `spanner_interleave_in` - a parent table name `spanner_inverleave_on_delete_cascade` - a flag specifying if `ON DELETE CASCADE` statement must be used for the interleave relation @@ -126,7 +123,7 @@ client = Table( client.create(engine) ``` -**Unique constraints** +### Unique constraints Cloud Spanner doesn't support direct UNIQUE constraints creation. In order to achieve column values uniqueness UNIQUE indexes should be used. Instead of direct UNIQUE constraint creation: @@ -147,7 +144,7 @@ Table( Index("uix_1", "col1", unique=True), ) ``` -**Autocommit mode** +### Autocommit mode Spanner dialect supports both `SERIALIZABLE` and `AUTOCOMMIT` isolation levels. `SERIALIZABLE` is the default one, where transactions need to be committed manually. `AUTOCOMMIT` mode corresponds to automatically committing of a query right in its execution time. Isolation level change example: @@ -158,13 +155,85 @@ eng = create_engine("spanner:///projects/project-id/instances/instance-id/databa autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT") ``` -**DDL and transactions** +### Query hints +Spanner dialect supports [query hints](https://cloud.google.com/spanner/docs/query-syntax#table_hints), which give the ability to set additional query execution parameters. Usage example: +```python +session = Session(engine) + +Base = declarative_base() + +class User(Base): + """Data model.""" + + __tablename__ = "users" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + +query = session.query(User) +query = query.with_hint( + selectable=User, text="@{FORCE_INDEX=index_name}" +) +query = query.filter(User.name.in_(["val1", "val2"])) +query.statement.compile(session.bind) +``` + +### ReadOnly transactions +By default, transactions produced by a Spanner connection are in ReadWrite mode. However, some applications require an ability to grant ReadOnly access to users/methods; for these cases Spanner dialect supports the `read_only` execution option, which switches a connection into ReadOnly mode: +```python +with engine.connect().execution_options(read_only=True) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() +``` +Note that execution options are applied lazily - on the `execute()` method call, right before it. + +ReadOnly/ReadWrite mode of a connection can't be changed while a transaction is in progress - first you must commit or rollback it. + +### Stale reads +To use the Spanner [Stale Reads](https://cloud.google.com/spanner/docs/reads#perform-stale-read) with SQLAlchemy you can tweak the connection execution options with a wanted staleness value. For example: +```python +# maximum staleness +with engine.connect().execution_options( + read_only=True, + staleness={"max_staleness": datetime.timedelta(seconds=5)} +) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() +``` + +```python +# exact staleness +with engine.connect().execution_options( + read_only=True, + staleness={"exact_staleness": datetime.timedelta(seconds=5)} +) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() +``` + +```python +# min read timestamp +with engine.connect().execution_options( + read_only=True, + staleness={"min_read_timestamp": datetime.datetime(2021, 11, 17, 12, 55, 30)} +) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() +``` + +```python +# read timestamp +with engine.connect().execution_options( + read_only=True, + staleness={"read_timestamp": datetime.datetime(2021, 11, 17, 12, 55, 30)} +) as connection: + connection.execute(select(["*"], from_obj=table)).fetchall() +``` +Note that the set option will be dropped when the connection is returned back to the pool. + +### DDL and transactions DDL statements are executed outside the regular transactions mechanism, which means DDL statements will not be rolled back on normal transaction rollback. -**Dropping a table** +### Dropping a table Cloud Spanner, by default, doesn't drop tables, which have secondary indexes and/or foreign key constraints. In Spanner dialect for SQLAlchemy, however, this restriction is omitted - if a table you are trying to delete has indexes/foreign keys, they will be dropped automatically right before dropping the table. -**Data types** +### Data types Data types table mapping SQLAlchemy types to Cloud Spanner types: | SQLAlchemy | Spanner | @@ -183,14 +252,13 @@ Data types table mapping SQLAlchemy types to Cloud Spanner types: | NUMERIC | NUMERIC | -**Other limitations** +### Other limitations - WITH RECURSIVE statement is not supported. - Named schemas are not supported. -- Temporary tables are not supported, real tables are used instead. +- Temporary tables are not supported. - Numeric type dimensions (scale and precision) are constant. See the [docs](https://cloud.google.com/spanner/docs/data-types#numeric_types). -Best practices ------------ +## Best practices When a SQLAlchemy function is called, a new connection to a database is established and a Spanner session object is fetched. In case of connectionless execution these fetches are done for every `execute()` call, which can cause a significant latency. To avoid initiating a Spanner session on every `execute()` call it's recommended to write code in connection-bounded fashion. Once a `Connection()` object is explicitly initiated, it fetches a Spanner session object and uses it for all the following calls made on this `Connection()` object. Non-optimal connectionless use: @@ -206,8 +274,8 @@ with engine.begin() as connection: ``` Connectionless way of use is also deprecated since SQLAlchemy 2.0 and soon will be removed (see in [SQLAlchemy docs](https://docs.sqlalchemy.org/en/14/core/connections.html#connectionless-execution-implicit-execution)). -Running tests ------------- +## Running tests + Spanner dialect includes a compliance, migration and unit test suite. To run the tests the `nox` package commands can be used: ``` # Run the whole suite @@ -216,17 +284,16 @@ $ nox # Run a particular test session $ nox -s migration_test ``` -**Running tests on Spanner emulator** +### Running tests on Spanner emulator The dialect test suite can be runned on [Spanner emulator](https://cloud.google.com/spanner/docs/emulator). Several tests, relating to `NULL` values of data types, are skipped when executed on emulator. -Contributing ------------- +## Contributing -Contributions to this library are welcome and encouraged. Please report issues, file feature requests, and send pull requests. See [CONTRIBUTING](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/blob/main/contributing.md) for more information on how to get +Contributions to this library are welcome and encouraged. Please report issues, file feature requests, and send pull requests. See [CONTRIBUTING](https://github.com/googleapis/python-spanner-sqlalchemy/blob/main/contributing.md) for more information on how to get started. **Note that this project is not officially supported by Google as part of the Cloud Spanner product.** Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. See the [Code -of Conduct](https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy/blob/main/code-of-conduct.md) for more information. +of Conduct](https://github.com/googleapis/python-spanner-sqlalchemy/blob/main/code-of-conduct.md) for more information. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..8b58ae9c --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). + +The Google Security Team will respond within 5 working days of your report on g.co/vulnz. + +We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. diff --git a/code-of-conduct.md b/code-of-conduct.md deleted file mode 100644 index f8b12cb5..00000000 --- a/code-of-conduct.md +++ /dev/null @@ -1,63 +0,0 @@ -# Google Open Source Community Guidelines - -At Google, we recognize and celebrate the creativity and collaboration of open -source contributors and the diversity of skills, experiences, cultures, and -opinions they bring to the projects and communities they participate in. - -Every one of Google's open source projects and communities are inclusive -environments, based on treating all individuals respectfully, regardless of -gender identity and expression, sexual orientation, disabilities, -neurodiversity, physical appearance, body size, ethnicity, nationality, race, -age, religion, or similar personal characteristic. - -We value diverse opinions, but we value respectful behavior more. - -Respectful behavior includes: - -* Being considerate, kind, constructive, and helpful. -* Not engaging in demeaning, discriminatory, harassing, hateful, sexualized, or - physically threatening behavior, speech, and imagery. -* Not engaging in unwanted physical contact. - -Some Google open source projects [may adopt][] an explicit project code of -conduct, which may have additional detailed expectations for participants. Most -of those projects will use our [modified Contributor Covenant][]. - -[may adopt]: https://opensource.google/docs/releasing/preparing/#conduct -[modified Contributor Covenant]: https://opensource.google/docs/releasing/template/CODE_OF_CONDUCT/ - -## Resolve peacefully - -We do not believe that all conflict is necessarily bad; healthy debate and -disagreement often yields positive results. However, it is never okay to be -disrespectful. - -If you see someone behaving disrespectfully, you are encouraged to address the -behavior directly with those involved. Many issues can be resolved quickly and -easily, and this gives people more control over the outcome of their dispute. -If you are unable to resolve the matter for any reason, or if the behavior is -threatening or harassing, report it. We are dedicated to providing an -environment where participants feel welcome and safe. - -## Reporting problems - -Some Google open source projects may adopt a project-specific code of conduct. -In those cases, a Google employee will be identified as the Project Steward, -who will receive and handle reports of code of conduct violations. In the event -that a project hasn’t identified a Project Steward, you can report problems by -emailing opensource@google.com. - -We will investigate every complaint, but you may not receive a direct response. -We will use our discretion in determining when and how to follow up on reported -incidents, which may range from not taking action to permanent expulsion from -the project and project-sponsored spaces. We will notify the accused of the -report and provide them an opportunity to discuss it before any action is -taken. The identity of the reporter will be omitted from the details of the -report supplied to the accused. In potentially harmful situations, such as -ongoing harassment or threats to anyone's safety, we may take action without -notice. - -*This document was adapted from the [IndieWeb Code of Conduct][] and can also -be found at .* - -[IndieWeb Code of Conduct]: https://indieweb.org/code-of-conduct diff --git a/create_test_config.py b/create_test_config.py new file mode 100644 index 00000000..34d5b863 --- /dev/null +++ b/create_test_config.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import sys + + +def set_test_config(project, instance): + config = configparser.ConfigParser() + url = ( + f"spanner:///projects/{project}/instances/{instance}/" + "databases/compliance-test" + ) + config.add_section("db") + config["db"]["default"] = url + + with open("test.cfg", "w") as configfile: + config.write(configfile) + + +def main(argv): + project = argv[0] + instance = argv[1] + set_test_config(project, instance) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/create_test_database.py b/create_test_database.py index 498b5448..8abbed65 100644 --- a/create_test_database.py +++ b/create_test_database.py @@ -18,6 +18,8 @@ import os import time +from create_test_config import set_test_config +from google.api_core.exceptions import AlreadyExists, ResourceExhausted from google.cloud.spanner_v1 import Client from google.cloud.spanner_v1.instance import Instance @@ -37,8 +39,8 @@ CLIENT = Client(project=PROJECT) -def reap_old_instances(): - # Delete test instances that are older than four hours. +def delete_stale_test_instances(): + """Delete test instances that are older than four hours.""" cutoff = int(time.time()) - 4 * 60 * 60 instances_pbs = CLIENT.list_instances( "labels.python-spanner-sqlalchemy-systest:true" @@ -50,11 +52,19 @@ def reap_old_instances(): create_time = int(instance.labels["created"]) if create_time > cutoff: continue - # Backups are not used in sqlalchemy dialect test, therefore instance can just be deleted. - instance.delete() - - -def prep_instance(): + # Backups are not used in sqlalchemy dialect test, + # therefore instance can just be deleted. + try: + instance.delete() + except ResourceExhausted: + print( + "Unable to drop stale instance '{}'. May need manual delete.".format( + instance.instance_id + ) + ) + + +def create_test_instance(): configs = list(CLIENT.list_instance_configs()) if not USE_EMULATOR: # Filter out non "us" locations @@ -62,29 +72,32 @@ def prep_instance(): instance_config = configs[0].name create_time = str(int(time.time())) - unique_resource_id = '%s%d' % ('-', 1000 * time.time()) - instance_id = 'sqlalchemy-dialect-test' if USE_EMULATOR else "sqlalchemy-test" + unique_resource_id + unique_resource_id = "%s%d" % ("-", 1000 * time.time()) + instance_id = ( + "sqlalchemy-dialect-test" + if USE_EMULATOR + else "sqlalchemy-test" + unique_resource_id + ) labels = {"python-spanner-sqlalchemy-systest": "true", "created": create_time} instance = CLIENT.instance(instance_id, instance_config, labels=labels) - created_op = instance.create() - created_op.result(120) # block until completion - database = instance.database("compliance-test") - created_op = database.create() - created_op.result(120) + try: + created_op = instance.create() + created_op.result(1800) # block until completion + except AlreadyExists: + pass # instance was already created - config = configparser.ConfigParser() - url = "spanner:///projects/{project}/instances/{instance_id}/databases/compliance-test".format( - project=PROJECT, instance_id=instance_id - ) - config.add_section("db") - config["db"]["default"] = url + try: + database = instance.database("compliance-test") + created_op = database.create() + created_op.result(1800) + except AlreadyExists: + pass # instance was already created - with open("test.cfg", "w") as configfile: - config.write(configfile) + set_test_config(PROJECT, instance_id) -reap_old_instances() -prep_instance() +delete_stale_test_instances() +create_test_instance() diff --git a/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py b/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py index ed3b5b9d..cc0e8ac6 100644 --- a/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py +++ b/google/cloud/sqlalchemy_spanner/_opentelemetry_tracing.py @@ -52,9 +52,9 @@ def trace_call(name, extra_attributes=None): name, kind=trace.SpanKind.CLIENT, attributes=attributes ) as span: try: - span.set_status(Status(StatusCode.OK)) yield span except GoogleAPICallError as error: span.set_status(Status(StatusCode.ERROR)) span.record_exception(error) raise + span.set_status(Status(StatusCode.OK)) diff --git a/google/cloud/sqlalchemy_spanner/provision.py b/google/cloud/sqlalchemy_spanner/provision.py index 91ff8e92..f56aaccf 100644 --- a/google/cloud/sqlalchemy_spanner/provision.py +++ b/google/cloud/sqlalchemy_spanner/provision.py @@ -15,6 +15,6 @@ from sqlalchemy.testing.provision import temp_table_keyword_args -@temp_table_keyword_args.for_db("spanner") +@temp_table_keyword_args.for_db("spanner") # pragma: no cover def _spanner_temp_table_keyword_args(cfg, eng): - return {} + return {"prefixes": ["TEMPORARY"]} diff --git a/google/cloud/sqlalchemy_spanner/requirements.py b/google/cloud/sqlalchemy_spanner/requirements.py index 1b929847..d552dc34 100644 --- a/google/cloud/sqlalchemy_spanner/requirements.py +++ b/google/cloud/sqlalchemy_spanner/requirements.py @@ -16,7 +16,26 @@ from sqlalchemy.testing.requirements import SuiteRequirements -class Requirements(SuiteRequirements): +class Requirements(SuiteRequirements): # pragma: no cover + @property + def json_type(self): + return exclusions.open() + + @property + def computed_columns(self): + return exclusions.open() + + @property + def computed_columns_stored(self): + return exclusions.open() + + def sane_rowcount(self): + return exclusions.closed() + + @property + def sane_multi_rowcount(self): + return exclusions.closed() + @property def foreign_key_constraint_name_reflection(self): return exclusions.open() @@ -53,6 +72,11 @@ def isolation_level(self): def sequences(self): return exclusions.closed() + @property + def temporary_tables(self): + """Target database supports temporary tables.""" + return exclusions.closed() + def get_order_by_collation(self, _): """Get the default collation name. diff --git a/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py b/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py index e98b98bb..62eda640 100644 --- a/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py +++ b/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py @@ -12,23 +12,49 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pkg_resources import re -from sqlalchemy import types, ForeignKeyConstraint +from alembic.ddl.base import ( + ColumnNullable, + ColumnType, + alter_column, + alter_table, + format_type, +) +from sqlalchemy import ForeignKeyConstraint, types, util from sqlalchemy.engine.base import Engine -from sqlalchemy.engine.default import DefaultDialect -from sqlalchemy import util +from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext +from sqlalchemy.event import listens_for +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.pool import Pool from sqlalchemy.sql.compiler import ( selectable, DDLCompiler, GenericTypeCompiler, IdentifierPreparer, SQLCompiler, + OPERATORS, RESERVED_WORDS, ) +from sqlalchemy.sql.default_comparator import operator_lookup +from sqlalchemy.sql.operators import json_getitem_op + +from google.cloud.spanner_v1.data_types import JsonObject from google.cloud import spanner_dbapi from google.cloud.sqlalchemy_spanner._opentelemetry_tracing import trace_call + +@listens_for(Pool, "reset") +def reset_connection(dbapi_conn, connection_record): + """An event of returning a connection back to a pool.""" + dbapi_conn.connection.staleness = None + + +# register a method to get a single value of a JSON object +OPERATORS[json_getitem_op] = operator_lookup["json_getitem_op"] + + # Spanner-to-SQLAlchemy types map _type_map = { "BOOL": types.Boolean, @@ -41,8 +67,11 @@ "STRING": types.String, "TIME": types.TIME, "TIMESTAMP": types.TIMESTAMP, + "ARRAY": types.ARRAY, + "JSON": types.JSON, } + _type_map_inv = { types.Boolean: "BOOL", types.BINARY: "BYTES(MAX)", @@ -106,6 +135,23 @@ def wrapper(self, connection, *args, **kwargs): return wrapper +class SpannerExecutionContext(DefaultExecutionContext): + def pre_exec(self): + """ + Apply execution options to the DB API connection before + executing the next SQL operation. + """ + super(SpannerExecutionContext, self).pre_exec() + + read_only = self.execution_options.get("read_only", None) + if read_only is not None: + self._dbapi_connection.connection.read_only = read_only + + staleness = self.execution_options.get("staleness", None) + if staleness is not None: + self._dbapi_connection.connection.staleness = staleness + + class SpannerIdentifierPreparer(IdentifierPreparer): """Identifiers compiler. @@ -174,6 +220,53 @@ def visit_like_op_binary(self, binary, operator, **kw): binary.right._compiler_dispatch(self, **kw), ) + def _generate_generic_binary(self, binary, opstring, eager_grouping=False, **kw): + """The method is overriden to process JSON data type cases.""" + _in_binary = kw.get("_in_binary", False) + + kw["_in_binary"] = True + + if isinstance(opstring, str): + text = ( + binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + opstring + + binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + ) + if _in_binary and eager_grouping: + text = "(%s)" % text + else: + # got JSON data + right_value = getattr( + binary.right, "value", None + ) or binary.right._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + text = ( + binary.left._compiler_dispatch( + self, eager_grouping=eager_grouping, **kw + ) + + """, "$.""" + + str(right_value) + + '"' + ) + text = "JSON_VALUE(%s)" % text + + return text + + def visit_json_path_getitem_op_binary(self, binary, operator, **kw): + """Build a JSON_VALUE() function call.""" + expr = """JSON_VALUE(%s, "$.%s")""" + + return expr % ( + self.process(binary.left, **kw), + self.process(binary.right, **kw), + ) + def render_literal_value(self, value, type_): """Render the value of a bind parameter as a quoted literal. @@ -223,7 +316,7 @@ def limit_clause(self, select, **kw): text += "\n LIMIT " + self.process(select._limit_clause, **kw) if select._offset_clause is not None: if select._limit_clause is None: - text += "\n LIMIT 9223372036854775805" + text += f"\n LIMIT {9223372036854775807-select._offset}" text += " OFFSET " + self.process(select._offset_clause, **kw) return text @@ -231,6 +324,13 @@ def limit_clause(self, select, **kw): class SpannerDDLCompiler(DDLCompiler): """Spanner DDL statements compiler.""" + def visit_computed_column(self, generated, **kw): + """Computed column operator.""" + text = "AS (%s) STORED" % self.sql_compiler.process( + generated.sqltext, include_table=False, literal_binds=True + ) + return text + def visit_drop_table(self, drop_table): """ Cloud Spanner doesn't drop tables which have indexes @@ -298,12 +398,15 @@ def post_create_table(self, table): cols = [col.name for col in table.primary_key.columns] post_cmds = " PRIMARY KEY ({})".format(", ".join(cols)) + if "TEMPORARY" in table._prefixes: + raise NotImplementedError("Temporary tables are not supported.") + if table.kwargs.get("spanner_interleave_in"): post_cmds += ",\nINTERLEAVE IN PARENT {}".format( table.kwargs["spanner_interleave_in"] ) - if table.kwargs.get("spanner_inverleave_on_delete_cascade"): + if table.kwargs.get("spanner_interleave_on_delete_cascade"): post_cmds += " ON DELETE CASCADE" return post_cmds @@ -327,10 +430,14 @@ def visit_TEXT(self, type_, **kw): def visit_ARRAY(self, type_, **kw): return "ARRAY<{}>".format(self.process(type_.item_type, **kw)) - def visit_BINARY(self, type_, **kw): + def visit_BINARY(self, type_, **kw): # pragma: no cover + """ + The BINARY type is superseded by large_binary in + newer versions of SQLAlchemy (>1.4). + """ return "BYTES({})".format(type_.length or "MAX") - def visit_large_binary(self, type_, **kw): + def visit_large_binary(self, type_, **kw): # pragma: no cover return "BYTES({})".format(type_.length or "MAX") def visit_DECIMAL(self, type_, **kw): @@ -354,6 +461,9 @@ def visit_NUMERIC(self, type_, **kw): def visit_BIGINT(self, type_, **kw): return "INT64" + def visit_JSON(self, type_, **kw): + return "JSON" + class SpannerDialect(DefaultDialect): """Cloud Spanner dialect. @@ -371,7 +481,7 @@ class SpannerDialect(DefaultDialect): execute_sequence_format = list supports_alter = True - supports_sane_rowcount = True + supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_default_values = False supports_sequences = True @@ -383,6 +493,9 @@ class SpannerDialect(DefaultDialect): preparer = SpannerIdentifierPreparer statement_compiler = SpannerSQLCompiler type_compiler = SpannerTypeCompiler + execution_ctx_cls = SpannerExecutionContext + _json_serializer = JsonObject + _json_deserializer = JsonObject @classmethod def dbapi(cls): @@ -433,9 +546,10 @@ def create_connect_args(self, url): ), url.database, ) + dist = pkg_resources.get_distribution("sqlalchemy-spanner") return ( [match.group("instance"), match.group("database"), match.group("project")], - {}, + {"user_agent": f"gl-{dist.project_name}/{dist.version}"}, ) @engine_to_connection @@ -454,7 +568,7 @@ def get_columns(self, connection, table_name, schema=None, **kw): list: The table every column dict-like description. """ sql = """ -SELECT column_name, spanner_type, is_nullable +SELECT column_name, spanner_type, is_nullable, generation_expression FROM information_schema.columns WHERE table_catalog = '' @@ -474,28 +588,49 @@ def get_columns(self, connection, table_name, schema=None, **kw): columns = snap.execute_sql(sql) for col in columns: - if col[1].startswith("STRING"): - end = col[1].index(")") - size = int_from_size(col[1][7:end]) - type_ = _type_map["STRING"](length=size) - # add test creating a table with bytes - elif col[1].startswith("BYTES"): - end = col[1].index(")") - size = int_from_size(col[1][6:end]) - type_ = _type_map["BYTES"](length=size) - else: - type_ = _type_map[col[1]] - - cols_desc.append( - { - "name": col[0], - "type": type_, - "nullable": col[2] == "YES", - "default": None, + col_desc = { + "name": col[0], + "type": self._designate_type(col[1]), + "nullable": col[2] == "YES", + "default": None, + } + + if col[3] is not None: + col_desc["computed"] = { + "persisted": True, + "sqltext": col[3], } - ) + cols_desc.append(col_desc) + return cols_desc + def _designate_type(self, str_repr): + """ + Designate an SQLAlchemy data type from a Spanner + string representation. + + Args: + str_repr (str): String representation of a type. + + Returns: + An SQLAlchemy data type. + """ + if str_repr.startswith("STRING"): + end = str_repr.index(")") + size = int_from_size(str_repr[7:end]) + return _type_map["STRING"](length=size) + # add test creating a table with bytes + elif str_repr.startswith("BYTES"): + end = str_repr.index(")") + size = int_from_size(str_repr[6:end]) + return _type_map["BYTES"](length=size) + elif str_repr.startswith("ARRAY"): + inner_type_str = str_repr[6:-1] + inner_type = self._designate_type(inner_type_str) + return _type_map["ARRAY"](inner_type) + else: + return _type_map[str_repr] + @engine_to_connection def get_indexes(self, connection, table_name, schema=None, **kw): """Get the table indexes. @@ -798,13 +933,12 @@ def do_rollback(self, dbapi_connection): To prevent rollback exception, don't rollback committed/rolled back transactions. """ - if ( - not isinstance(dbapi_connection, spanner_dbapi.Connection) - and dbapi_connection.connection._transaction - and ( - dbapi_connection.connection._transaction.rolled_back - or dbapi_connection.connection._transaction.committed - ) + if not isinstance(dbapi_connection, spanner_dbapi.Connection): + dbapi_connection = dbapi_connection.connection + + if dbapi_connection._transaction and ( + dbapi_connection._transaction.rolled_back + or dbapi_connection._transaction.committed ): pass else: @@ -847,3 +981,28 @@ def do_execute_no_params(self, cursor, statement, context=None): } with trace_call("SpannerSqlAlchemy.ExecuteNoParams", trace_attributes): cursor.execute(statement) + + +# Alembic ALTER operation override +@compiles(ColumnNullable, "spanner") +def visit_column_nullable( + element: "ColumnNullable", compiler: "SpannerDDLCompiler", **kw +) -> str: + return "%s %s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + format_type(compiler, element.existing_type), + "" if element.nullable else "NOT NULL", + ) + + +# Alembic ALTER operation override +@compiles(ColumnType, "spanner") +def visit_column_type( + element: "ColumnType", compiler: "SpannerDDLCompiler", **kw +) -> str: + return "%s %s %s" % ( + alter_table(compiler, element.table_name, element.schema), + alter_column(compiler, element.column_name), + "%s" % format_type(compiler, element.type_), + ) diff --git a/migration_test_cleanup.py b/migration_test_cleanup.py index 485f2a42..62266359 100644 --- a/migration_test_cleanup.py +++ b/migration_test_cleanup.py @@ -14,24 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -import configparser -import os import re +import sys from google.cloud import spanner -config = configparser.ConfigParser() -if os.path.exists("test.cfg"): - config.read("test.cfg") -else: - config.read("setup.cfg") -db_url = config.get("db", "default") -project = re.findall(r'projects(.*?)instances', db_url) -instance_id = re.findall(r'instances(.*?)databases', db_url) +def main(argv): + db_url = argv[0] -client = spanner.Client(project="".join(project).replace('/', '')) -instance = client.instance(instance_id="".join(instance_id).replace('/', '')) -database = instance.database("compliance-test") + project = re.findall(r"projects(.*?)instances", db_url) + instance_id = re.findall(r"instances(.*?)databases", db_url) -database.update_ddl(["DROP TABLE account", "DROP TABLE alembic_version"]).result(120) + client = spanner.Client(project="".join(project).replace("/", "")) + instance = client.instance(instance_id="".join(instance_id).replace("/", "")) + database = instance.database("compliance-test") + + database.update_ddl(["DROP TABLE account", "DROP TABLE alembic_version"]).result(120) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/noxfile.py b/noxfile.py index 840998fd..a4797503 100644 --- a/noxfile.py +++ b/noxfile.py @@ -60,11 +60,17 @@ class = StreamHandler sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.String(50), nullable=False), sa.Column('description', sa.Unicode(200)), + ) + op.alter_column( + 'account', + 'name', + existing_type=sa.String(50), + nullable=True, )""" BLACK_VERSION = "black==19.10b0" -BLACK_PATHS = ["google", "test", "noxfile.py", "setup.py"] +BLACK_PATHS = ["google", "test", "noxfile.py", "setup.py", "samples"] DEFAULT_PYTHON_VERSION = "3.8" @@ -110,11 +116,37 @@ def lint_setup_py(session): @nox.session(python=DEFAULT_PYTHON_VERSION) def compliance_test(session): """Run SQLAlchemy dialect compliance test suite.""" + + # Check the value of `RUN_COMPLIANCE_TESTS` env var. It defaults to true. + if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false": + session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping") + # Sanity check: Only run tests if the environment variable is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip( + "Credentials or emulator host must be set via environment variable" + ) + + session.install( + "pytest", "pytest-cov", "pytest-asyncio", + ) + session.install("pytest") session.install("mock") - session.install("-e", ".") + session.install("-e", ".[tracing]") session.run("python", "create_test_database.py") - session.run("pytest", "-v") + + session.run( + "py.test", + "--cov=google.cloud.sqlalchemy_spanner", + "--cov=tests", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + "test", + ) @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -127,6 +159,7 @@ def unit(session): session.install("opentelemetry-api==1.1.0") session.install("opentelemetry-sdk==1.1.0") session.install("opentelemetry-instrumentation==0.20b0") + session.run("python", "create_test_config.py", "my-project", "my-instance") session.run("py.test", "--quiet", os.path.join("test/unit"), *session.posargs) @@ -141,12 +174,22 @@ def migration_test(session): session.install("-e", ".") session.install("alembic") + session.run("python", "create_test_database.py") + + project = os.getenv( + "GOOGLE_CLOUD_PROJECT", os.getenv("PROJECT_ID", "emulator-test-project"), + ) + db_url = ( + f"spanner:///projects/{project}/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + config = configparser.ConfigParser() if os.path.exists("test.cfg"): config.read("test.cfg") else: config.read("setup.cfg") - db_url = config.get("db", "default") + db_url = config.get("db", "default", fallback=db_url) session.run("alembic", "init", "test_migration") @@ -175,6 +218,29 @@ def migration_test(session): # clearing the migration data os.remove("alembic.ini") shutil.rmtree("test_migration") - session.run("python", "migration_test_cleanup.py") + session.run("python", "migration_test_cleanup.py", db_url) if os.path.exists("test.cfg"): os.remove("test.cfg") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def snippets(session): + """Run the documentation example snippets.""" + # Sanity check: Only run snippets system tests if the environment variable + # is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): + session.skip("Credentials must be set via environment variable.") + + session.install("pytest") + session.install("sqlalchemy") + session.install( + "git+https://github.com/googleapis/python-spanner.git#egg=google-cloud-spanner" + ) + session.install("-e", ".") + session.run("python", "create_test_database.py") + session.run( + "py.test", + "--quiet", + os.path.join("samples", "snippets_test.py"), + *session.posargs, + ) diff --git a/renovate.json b/renovate.json new file mode 100644 index 00000000..f45d8f11 --- /dev/null +++ b/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base" + ] +} diff --git a/samples/__init__.py b/samples/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/samples/conftest.py b/samples/conftest.py new file mode 100644 index 00000000..5a4f622e --- /dev/null +++ b/samples/conftest.py @@ -0,0 +1,104 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import configparser +import datetime +import os +import uuid + +import pytest + +from sqlalchemy import ( + Column, + Integer, + MetaData, + String, + Table, + create_engine, + ForeignKey, +) + + +@pytest.fixture +def db_url(): + project = os.getenv( + "GOOGLE_CLOUD_PROJECT", os.getenv("PROJECT_ID", "emulator-test-project"), + ) + db_url = ( + f"spanner:///projects/{project}/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" + ) + + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + return config.get("db", "default", fallback=db_url) + + +@pytest.fixture +def table_id(): + now = datetime.datetime.now() + table_id = "example_table_{}_{}".format( + now.strftime("%Y%m%d%H%M%S"), uuid.uuid4().hex[:8] + ) + return table_id + + +@pytest.fixture +def table(db_url, table_id): + engine = create_engine(db_url) + metadata = MetaData(bind=engine) + + table = Table( + table_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + table.create() + yield table + table.drop() + + +@pytest.fixture +def table_w_foreign_key(db_url, table): + engine = create_engine(db_url) + metadata = MetaData(bind=engine) + + table_fk = Table( + "table_fk", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(16), nullable=False), + Column( + table.name + "_user_id", + Integer, + ForeignKey(table.c.user_id, name=table.name + "user_id"), + ), + ) + table_fk.create() + yield table_fk + table_fk.drop() + + +@pytest.fixture +def connection(db_url): + engine = create_engine(db_url) + return engine.connect() + + +def insert_data(conn, table, data): + conn.execute(table.insert(), data) diff --git a/samples/snippets.py b/samples/snippets.py new file mode 100644 index 00000000..b309ea0b --- /dev/null +++ b/samples/snippets.py @@ -0,0 +1,337 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bs + +""" +This application demonstrates how to do basic operations with Cloud +Spanner database. +For more information, see the README.md under /python-spanner-sqlalchemy. +""" + +from sqlalchemy import ( + Column, + create_engine, + Index, + Integer, + inspect, + MetaData, + String, + Table, +) + + +# [START spanner_sqlalchemy_autocommit_on] +def enable_autocommit_mode(connection, url): + """Enable AUTOCOMMIT mode.""" + level = connection.get_isolation_level() + print("Connection default mode is {}.".format(level)) + + connection.execution_options(isolation_level="AUTOCOMMIT") + level = connection.get_isolation_level() + print("Connection mode is now {}.".format(level)) + + +# [END spanner_sqlalchemy_autocommit_on] + + +# [START spanner_sqlalchemy_create_table] +def create_table(url, table_id): + """Create a table.""" + engine = create_engine(url) + metadata = MetaData(bind=engine) + + table = Table( + table_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + table.create() + + print("Table {} successfully created.".format(table.name)) + + +# [END spanner_sqlalchemy_create_table] + + +# [START spanner_sqlalchemy_drop_table] +def drop_table(table): + """Drop the table.""" + table.drop() + + print("Table {} successfully dropped.".format(table.name)) + + +# [END spanner_sqlalchemy_drop_table] + + +# [START spanner_sqlalchemy_get_table_names] +def get_table_names(url): + """Retrieve the list of the table names in the database. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + names = insp.get_table_names() + + print("Retrieved table names:") + for name in names: + print(name) + + +# [END spanner_sqlalchemy_get_table_names] + + +# [START spanner_sqlalchemy_create_unique_index] +def create_unique_index(table): + """Create unique index. + + The table must already exist and can be created using + `create_table.` + """ + index = Index("some_index", table.c.user_name, unique=True) + index.create() + print("Index created.") + + +# [END spanner_sqlalchemy_create_unique_index] + + +# [START spanner_sqlalchemy_delete_all_rows] +def delete_all_rows(connection, table): + """Delete all rows from the table. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + print("Row count:", len(rows)) + + connection.execute(table.delete()) + + rows = connection.execute(table.select()).fetchall() + print("Row count after deletion:", len(rows)) + + +# [END spanner_sqlalchemy_delete_all_rows] + + +# [START spanner_sqlalchemy_delete_row] +def delete_row_with_where_clause(connection, table): + """Delete a row. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + print("Row count:", len(rows)) + + connection.execute(table.delete().where(table.c.user_id == 1)) + + rows = connection.execute(table.select()).fetchall() + print("Row count after deletion:", len(rows)) + + +# [END spanner_sqlalchemy_delete_row] + + +# [START spanner_sqlalchemy_table_exists] +def table_exists(table): + """Check the table exists. + + The table must already exist and can be created using + `create_table.` + """ + result = table.exists() + if result is True: + print("Table exists.") + + +# [END spanner_sqlalchemy_table_exists] + + +# [START spanner_sqlalchemy_fetch_rows] +def fetch_rows(connection, table): + """Fetch all rows from the table. + + The table must already exist and can be created using + `create_table.` + """ + rows = connection.execute(table.select()).fetchall() + + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows] + + +# [START spanner_sqlalchemy_fetch_row] +def fetch_row_with_where_clause(connection, table): + """Fetch row with a WHERE clause. + + The table must already exist and can be created using + `create_table.` + """ + row = list(connection.execute(table.select().where(table.c.user_id == 1))) + + print("Fetched row: ", row) + + +# [END spanner_sqlalchemy_fetch_row] + + +# [START spanner_sqlalchemy_fetch_rows_with_limit_offset] +def fetch_rows_with_limit_offset(connection, table): + """Fetch rows from the table with LIMIT and OFFSET clauses. + + The table must already exist and can be created using + `create_table.` + """ + rows = list(connection.execute(table.select().limit(2).offset(1))) + + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows_with_limit_offset] + + +# [START spanner_sqlalchemy_fetch_rows_with_order_by] +def fetch_rows_with_order_by(connection, table): + """Fetch all rows ordered. + + The table must already exist and can be created using + `create_table.` + """ + rows = list( + connection.execute(table.select().order_by(table.c.user_name)).fetchall() + ) + print("Ordered rows: ", rows) + + +# [END spanner_sqlalchemy_fetch_rows_with_order_by] + + +# [START spanner_sqlalchemy_filter_data_startswith] +def filter_data_startswith(connection, table): + """Filter data with STARTSWITH clause. + + The table must already exist and can be created using + `create_table.` + """ + rows = list( + connection.execute(table.select().where(table.c.user_name.startswith("abcd%"))) + ) + print("Fetched rows: ", rows) + + +# [END spanner_sqlalchemy_filter_data_startswith] + + +# [START spanner_sqlalchemy_get_table_columns] +def get_table_columns(url, table): + """Retrieve the list of columns of the table. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + columns = insp.get_columns(table.name) + + print("Fetched columns: ", columns) + + +# [END spanner_sqlalchemy_get_table_columns] + + +# [START spanner_sqlalchemy_get_foreign_key] +def get_table_foreign_key(url, table): + """Retrieve a Foreign Key. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + f_keys = insp.get_foreign_keys(table.name) + + if f_keys: + print("Fetched foreign keys: ", f_keys) + + +# [END spanner_sqlalchemy_get_foreign_key] + + +# [START spanner_sqlalchemy_get_indexes] +def get_table_indexes(url, table): + """Retrieve the table indexes. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + indexes = insp.get_indexes(table.name) + + if indexes: + print("Fetched indexes: ", indexes) + + +# [END spanner_sqlalchemy_get_indexes] + + +# [START spanner_sqlalchemy_get_primary_key] +def get_table_primary_key(url, table): + """Retrieve the table Primary Key. + + The table must already exist and can be created using + `create_table.` + """ + engine = create_engine(url) + insp = inspect(engine) + p_key = insp.get_pk_constraint(table.name) + + if p_key: + print("Fetched primary key: ", p_key) + + +# [END spanner_sqlalchemy_get_primary_key] + + +# [START spanner_sqlalchemy_insert_row] +def insert_row(connection, table): + """Insert row into the table. + + The table must already exist and can be created using + `create_table.` + """ + connection.execute(table.insert(), {"user_id": 1, "user_name": "ABC"}) + + row = list(connection.execute(table.select())) + + print("Inserted row: ", row) + + +# [END spanner_sqlalchemy_insert_row] + + +# [START spanner_sqlalchemy_update_row] +def update_row(connection, table): + """Update a row in the table. + + The table must already exist and can be created using + `create_table.` + """ + connection.execute( + table.update().where(table.c.user_id == 2).values(user_name="GEH") + ) + row = list(connection.execute(table.select().where(table.c.user_id == 2))) + + print("Updated row: ", row) + + +# [END spanner_sqlalchemy_update_row] diff --git a/samples/snippets_test.py b/samples/snippets_test.py new file mode 100644 index 00000000..9866d638 --- /dev/null +++ b/samples/snippets_test.py @@ -0,0 +1,227 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bs + +from samples import snippets +from samples.conftest import insert_data +from sqlalchemy import ( + Column, + create_engine, + Index, + Integer, + inspect, + MetaData, + String, + Table, +) + +DATA = [ + {"user_id": 1, "user_name": "abcdefg"}, + {"user_id": 2, "user_name": "ab/cdefg"}, + {"user_id": 3, "user_name": "ab%cdefg"}, + {"user_id": 4, "user_name": "ab_cdefg"}, + {"user_id": 5, "user_name": "abcde/fg"}, + {"user_id": 6, "user_name": "abcde%fg"}, +] + + +def table_obj(database_url, tab_id): + """Helper to produce a `Table` object for the given table id.""" + engine = create_engine(database_url) + metadata = MetaData(bind=engine) + + table = Table( + tab_id, + metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + return table + + +def test_enable_autocommit_mode(capsys, connection, db_url): + snippets.enable_autocommit_mode(connection, db_url) + + out, err = capsys.readouterr() + assert "Connection default mode is SERIALIZABLE" in out + assert "Connection mode is now AUTOCOMMIT" in out + + +def test_create_table(capsys, db_url, table_id): + snippets.create_table(db_url, table_id) + + out, err = capsys.readouterr() + assert "Table {} successfully created".format(table_id) in out + + table = table_obj(db_url, table_id) + assert table.exists() is True + table.drop() + + +def test_drop_table(capsys, db_url, table_id): + table = table_obj(db_url, table_id) + table.create() + + snippets.drop_table(table) + + out, err = capsys.readouterr() + assert "Table {} successfully dropped".format(table_id) in out + assert table.exists() is False + + +def test_get_table_names(capsys, db_url, table): + snippets.get_table_names(db_url) + + out, err = capsys.readouterr() + assert "Retrieved table names:" in out + assert table.name in out + + +def test_table_create_unique_index(capsys, db_url, table): + snippets.create_unique_index(table) + + engine = create_engine(db_url) + insp = inspect(engine) + indexes = insp.get_indexes(table.name) + + out, err = capsys.readouterr() + + assert "Index created" in out + assert indexes[0]["unique"] is True + + +def test_table_delete_all_rows(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.delete_all_rows(connection, table) + + out, err = capsys.readouterr() + assert "Row count: 6" in out + assert "Row count after deletion: 0" in out + + +def test_table_delete_row_with_where_clause(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.delete_row_with_where_clause(connection, table) + + out, err = capsys.readouterr() + assert "Row count: 6" in out + assert "Row count after deletion: 5" in out + + +def test_exists_table(capsys, table): + snippets.table_exists(table) + + out, err = capsys.readouterr() + assert "Table exists" in out + + +def test_table_fetch_rows(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + + for row in DATA: # check that all rows were fetched + assert str(tuple(row.values())) in out + + +def test_table_fetch_row_with_where_clause(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_row_with_where_clause(connection, table) + + out, err = capsys.readouterr() + assert str(tuple(DATA[0].values())) in out + + +def test_table_fetch_rows_with_limit_offset(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows_with_limit_offset(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + assert str(tuple(DATA[1].values())) in out + assert str(tuple(DATA[2].values())) in out + + +def test_table_fetch_rows_with_order_by(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.fetch_rows_with_order_by(connection, table) + + out, err = capsys.readouterr() + assert "Ordered rows:" in out + + rows = [] + for row in sorted(DATA, key=lambda r: r["user_name"]): + rows.append(tuple(row.values())) + + assert str(rows) in out + + +def test_table_filter_data_startswith(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.filter_data_startswith(connection, table) + + out, err = capsys.readouterr() + assert "Fetched rows:" in out + + rows = [] + for ind in (0, 4, 5): + rows.append(tuple(DATA[ind].values())) + + assert str(rows) in out + + +def test_table_get_columns(capsys, db_url, table): + snippets.get_table_columns(db_url, table) + out, err = capsys.readouterr() + assert "Fetched columns:" in out + + for col in table.columns: + assert col.name in out + + +def test_table_get_foreign_key(capsys, db_url, table_w_foreign_key): + snippets.get_table_foreign_key(db_url, table_w_foreign_key) + out, err = capsys.readouterr() + + assert "Fetched foreign keys:" in out + + +def test_table_get_indexes(capsys, db_url, table): + index = Index("some_index", table.c.user_name, unique=True) + index.create() + + snippets.get_table_indexes(db_url, table) + out, err = capsys.readouterr() + + assert "Fetched indexes:" in out + + +def test_table_get_primary_key(capsys, db_url, table): + snippets.get_table_primary_key(db_url, table) + out, err = capsys.readouterr() + assert "Fetched primary key:" in out + + +def test_table_insert_row(capsys, connection, table): + snippets.insert_row(connection, table) + + out, err = capsys.readouterr() + assert "Inserted row:" in out + + rows = list(connection.execute(table.select())) + assert len(rows) == 1 + + +def test_table_update_row(capsys, connection, table): + insert_data(connection, table, DATA) + snippets.update_row(connection, table) + + out, err = capsys.readouterr() + assert "Updated row:" in out + + rows = list(connection.execute(table.select())) + rows[0][1] == "GEH" diff --git a/setup.cfg b/setup.cfg index 7050a471..fd3202c4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,9 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -[egg_info] -tag_build = dev - [tool:pytest] addopts= --tb native -v -r fxX --maxfail=25 -p no:warnings python_files=test/*test_*.py @@ -24,6 +21,3 @@ python_files=test/*test_*.py [sqla_testing] requirement_cls=google.cloud.sqlalchemy_spanner.requirements:Requirements profile_file=test/profiles.txt - -[db] -default=spanner:///projects/appdev-soda-spanner-staging/instances/sqlalchemy-dialect-test/databases/compliance-test \ No newline at end of file diff --git a/setup.py b/setup.py index 3852af3d..c2cfe3ea 100644 --- a/setup.py +++ b/setup.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import setuptools @@ -19,7 +20,11 @@ name = "sqlalchemy-spanner" description = "SQLAlchemy dialect integrated into Cloud Spanner database" -dependencies = ["sqlalchemy>=1.1.13, <=1.3.23", "google-cloud-spanner>=3.3.0"] +dependencies = [ + "sqlalchemy>=1.1.13, <=1.3.23", + "google-cloud-spanner>=3.3.0", + "alembic", +] extras = { "tracing": [ "opentelemetry-api >= 1.1.0", @@ -28,6 +33,13 @@ ] } +BASE_DIR = os.path.dirname(__file__) +VERSION_FILENAME = os.path.join(BASE_DIR, "version.py") +PACKAGE_INFO = {} +with open(VERSION_FILENAME) as f: + exec(f.read(), PACKAGE_INFO) +version = PACKAGE_INFO["__version__"] + # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ @@ -57,7 +69,7 @@ namespace_packages=namespaces, packages=packages, url="https://github.com/cloudspannerecosystem/python-spanner-sqlalchemy", - version="0.1", + version=version, include_package_data=True, zip_safe=False, ) diff --git a/test/_helpers.py b/test/_helpers.py index c9d9ba74..dd18a149 100644 --- a/test/_helpers.py +++ b/test/_helpers.py @@ -5,7 +5,9 @@ # https://developers.google.com/open-source/licenses/bsd +import configparser import mock +import os from sqlalchemy.testing import fixtures try: @@ -29,6 +31,24 @@ _TEST_OT_PROVIDER_INITIALIZED = False +PROJECT = os.getenv( + "GOOGLE_CLOUD_PROJECT", os.getenv("PROJECT_ID", "emulator-test-project"), +) +DB_URL = ( + f"spanner:///projects/{PROJECT}/instances/" + "sqlalchemy-dialect-test/databases/compliance-test" +) + + +def get_db_url(): + config = configparser.ConfigParser() + if os.path.exists("test.cfg"): + config.read("test.cfg") + else: + config.read("setup.cfg") + return config.get("db", "default", fallback=DB_URL) + + def get_test_ot_exporter(): global _TEST_OT_EXPORTER diff --git a/test/benchmark.py b/test/benchmark.py index be027ade..0ff50ae5 100644 --- a/test/benchmark.py +++ b/test/benchmark.py @@ -16,6 +16,7 @@ A test suite to check Spanner dialect for SQLAlchemy performance in comparison with the original Spanner client. """ +import base64 import datetime import random from scipy.stats import sem @@ -23,6 +24,8 @@ import time from google.api_core.exceptions import Aborted +from google.api_core.exceptions import NotFound +from google.cloud import spanner from google.cloud import spanner_dbapi from google.cloud.spanner_v1 import Client, KeySet from sqlalchemy import ( @@ -34,6 +37,10 @@ Table, ) +PROJECT = "project-id" +INSTANCE = "instance-id" +DATABASE = "database-id" + def measure_execution_time(function): """Decorator to measure a wrapped method execution time.""" @@ -60,26 +67,42 @@ class BenchmarkTestBase: Organizes testing data preparation and cleanup. """ + _many_rows_ids = [] + _many_rows2_ids = [] + def __init__(self): + self._cleanup() self._create_table() - self._one_row = ( - 1, - "Pete", - "Allison", - datetime.datetime(1998, 10, 6).strftime("%Y-%m-%d"), - b"123", - ) + self._one_row = { + "id": 1, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": datetime.date(1998, 10, 6), + "picture": b"123", + } + self.keys = set([1]) + if not self._many_rows_ids: + for i in range(99): + self._many_rows_ids.append(self._generate_id()) + self._many_rows2_ids.append(self._generate_id()) def _cleanup(self): """Drop the test table.""" - conn = spanner_dbapi.connect("sqlalchemy-dialect-test", "compliance-test") - conn.database.update_ddl(["DROP TABLE Singers"]) + conn = spanner_dbapi.connect(INSTANCE, DATABASE) + try: + conn.database.update_ddl(["DROP TABLE Singers"]) + except NotFound: + pass conn.close() def _create_table(self): """Create a table for performace testing.""" - conn = spanner_dbapi.connect("sqlalchemy-dialect-test", "compliance-test") + conn = spanner_dbapi.connect(INSTANCE, DATABASE) + try: + conn.database.update_ddl(["DROP TABLE Singers"]) + except NotFound: + pass conn.database.update_ddl( [ """ @@ -92,10 +115,17 @@ def _create_table(self): ) PRIMARY KEY (id) """ ] - ).result(120) + ).result() conn.close() + def _generate_id(self): + num = 1 + while num in self.keys: + num = round(random.random() * 1000000) + self.keys.add(num) + return num + def run(self): """Execute every test case.""" measures = {} @@ -113,22 +143,30 @@ def run(self): class SpannerBenchmarkTest(BenchmarkTestBase): - """The original Spanner performace testing class.""" + """The original Spanner performance testing class.""" def __init__(self): super().__init__() self._client = Client() - self._instance = self._client.instance("sqlalchemy-dialect-test") - self._database = self._instance.database("compliance-test") + self._instance = self._client.instance(INSTANCE) + self._database = self._instance.database(DATABASE) self._many_rows = [] self._many_rows2 = [] - birth_date = datetime.datetime(1998, 10, 6).strftime("%Y-%m-%d") - for i in range(99): - num = round(random.random() * 1000000) - self._many_rows.append((num, "Pete", "Allison", birth_date, b"123")) - num2 = round(random.random() * 1000000) - self._many_rows2.append((num2, "Pete", "Allison", birth_date, b"123")) + birth_date = datetime.date(1998, 10, 6) + picture = base64.b64encode(u"123".encode()) + for num in self._many_rows_ids: + self._many_rows.append( + { + "id": num, + "first_name": "Pete", + "last_name": "Allison", + "birth_date": birth_date, + "picture": picture, + } + ) + for num in self._many_rows2_ids: + self._many_rows2.append((num, "Pete", "Allison", birth_date, picture)) # initiate a session with self._database.snapshot(): @@ -177,8 +215,9 @@ class SQLAlchemyBenchmarkTest(BenchmarkTestBase): def __init__(self): super().__init__() self._engine = create_engine( - "spanner:///projects/appdev-soda-spanner-staging/instances/" - "sqlalchemy-dialect-test/databases/compliance-test" + "spanner:///projects/{project}/instances/{instance}/databases/{db}".format( + project=PROJECT, instance=INSTANCE, db=DATABASE, + ) ) metadata = MetaData(bind=self._engine) self._table = Table("Singers", metadata, autoload=True) @@ -187,9 +226,8 @@ def __init__(self): self._many_rows = [] self._many_rows2 = [] - birth_date = datetime.datetime(1998, 10, 6).strftime("%Y-%m-%d") - for i in range(99): - num = round(random.random() * 1000000) + birth_date = datetime.date(1998, 10, 6) + for num in self._many_rows_ids: self._many_rows.append( { "id": num, @@ -199,10 +237,10 @@ def __init__(self): "picture": b"123", } ) - num2 = round(random.random() * 1000000) + for num in self._many_rows2_ids: self._many_rows2.append( { - "id": num2, + "id": num, "first_name": "Pete", "last_name": "Allison", "birth_date": birth_date, @@ -250,8 +288,16 @@ def insert_one_row(transaction, one_row): Inserts a single row into a database and then fetches it back. """ transaction.execute_update( - "INSERT Singers (id, first_name, last_name, birth_date, picture) " - " VALUES {}".format(str(one_row)) + "INSERT INTO `Singers` (id, first_name, last_name, birth_date, picture)" + " VALUES (@id, @first_name, @last_name, @birth_date, @picture)", + params=one_row, + param_types={ + "id": spanner.param_types.INT64, + "first_name": spanner.param_types.STRING, + "last_name": spanner.param_types.STRING, + "birth_date": spanner.param_types.DATE, + "picture": spanner.param_types.BYTES, + }, ) last_name = transaction.execute_sql( "SELECT last_name FROM Singers WHERE id=1" @@ -268,8 +314,18 @@ def insert_many_rows(transaction, many_rows): statements = [] for row in many_rows: statements.append( - "INSERT Singers (id, first_name, last_name, birth_date, picture) " - " VALUES {}".format(str(row)) + ( + "INSERT INTO `Singers` (id, first_name, last_name, birth_date, picture)" + " VALUES (@id, @first_name, @last_name, @birth_date, @picture)", + row, + { + "id": spanner.param_types.INT64, + "first_name": spanner.param_types.STRING, + "last_name": spanner.param_types.STRING, + "birth_date": spanner.param_types.DATE, + "picture": spanner.param_types.BYTES, + }, + ) ) _, count = transaction.batch_update(statements) if sum(count) != 99: diff --git a/test/test_suite.py b/test/test_suite.py index 85274ddc..5559cfad 100644 --- a/test/test_suite.py +++ b/test/test_suite.py @@ -14,11 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime import decimal import operator import os +import pkg_resources import pytest -import pytz +import random +import unittest from unittest import mock import sqlalchemy @@ -28,12 +31,13 @@ from sqlalchemy import ForeignKey from sqlalchemy import MetaData from sqlalchemy.schema import DDL +from sqlalchemy.schema import Computed from sqlalchemy.testing import config from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import provide_metadata, emits_warning from sqlalchemy.testing import fixtures -from sqlalchemy.testing.provision import temp_table_keyword_args +from sqlalchemy.testing import is_true from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table from sqlalchemy import literal_column @@ -48,13 +52,16 @@ from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relation from sqlalchemy.orm import Session +from sqlalchemy.types import ARRAY from sqlalchemy.types import Integer from sqlalchemy.types import Numeric from sqlalchemy.types import Text from sqlalchemy.testing import requires +from sqlalchemy.testing.fixtures import ( + ComputedReflectionFixtureTest as _ComputedReflectionFixtureTest, +) from google.api_core.datetime_helpers import DatetimeWithNanoseconds - from google.cloud import spanner_dbapi from sqlalchemy.testing.suite.test_cte import * # noqa: F401, F403 @@ -87,18 +94,21 @@ QuotedNameArgumentTest as _QuotedNameArgumentTest, ComponentReflectionTest as _ComponentReflectionTest, CompositeKeyReflectionTest as _CompositeKeyReflectionTest, + ComputedReflectionTest as _ComputedReflectionTest, ) from sqlalchemy.testing.suite.test_results import RowFetchTest as _RowFetchTest from sqlalchemy.testing.suite.test_types import ( # noqa: F401, F403 + _DateFixture as _DateFixtureTest, + _LiteralRoundTripFixture, + _UnicodeFixture as _UnicodeFixtureTest, BooleanTest as _BooleanTest, DateTest as _DateTest, - _DateFixture as _DateFixtureTest, DateTimeHistoricTest, DateTimeCoercedToDateTimeTest as _DateTimeCoercedToDateTimeTest, DateTimeMicrosecondsTest as _DateTimeMicrosecondsTest, DateTimeTest as _DateTimeTest, IntegerTest as _IntegerTest, - _LiteralRoundTripFixture, + JSONTest as _JSONTest, NumericTest as _NumericTest, StringTest as _StringTest, TextTest as _TextTest, @@ -107,8 +117,8 @@ TimestampMicrosecondsTest, UnicodeVarcharTest as _UnicodeVarcharTest, UnicodeTextTest as _UnicodeTextTest, - _UnicodeFixture as _UnicodeFixtureTest, ) +from test._helpers import get_db_url config.test_schema = "" @@ -667,7 +677,6 @@ def define_temp_tables(cls, metadata): creating unique constraints. Overriding the test to replace constraints with indexes in testing data. """ - kw = temp_table_keyword_args(config, config.db) user_tmp = Table( "user_tmp", metadata, @@ -676,7 +685,6 @@ def define_temp_tables(cls, metadata): Column("foo", sqlalchemy.INT), sqlalchemy.Index("user_tmp_uq", "name", unique=True), sqlalchemy.Index("user_tmp_ix", "foo"), - **kw ) if ( testing.requires.view_reflection.enabled @@ -720,6 +728,7 @@ def test_reflect_bytes_column_max_len(self): self.metadata.create_all() Table("bytes_table", MetaData(bind=self.bind), autoload=True) + inspect(config.db).get_columns("bytes_table") @testing.provide_metadata def _test_get_unique_constraints(self, schema=None): @@ -900,6 +909,33 @@ def test_binary_reflection(self): assert isinstance(typ, LargeBinary) eq_(typ.length, 20) + @testing.requires.table_reflection + def test_array_reflection(self): + """Check array columns reflection.""" + orig_meta = self.metadata + + str_array = ARRAY(String(16)) + int_array = ARRAY(Integer) + Table( + "arrays_test", + orig_meta, + Column("id", Integer, primary_key=True), + Column("str_array", str_array), + Column("int_array", int_array), + ) + orig_meta.create_all() + + # autoload the table and check its columns reflection + tab = Table("arrays_test", orig_meta, autoload=True) + col_types = [col.type for col in tab.columns] + for type_ in ( + str_array, + int_array, + ): + assert type_ in col_types + + tab.drop() + class CompositeKeyReflectionTest(_CompositeKeyReflectionTest): @testing.requires.foreign_key_constraint_reflection @@ -945,7 +981,9 @@ def test_row_w_scalar_select(self): eq_( row["somelabel"], - DatetimeWithNanoseconds(2006, 5, 12, 12, 0, 0, tzinfo=pytz.UTC), + DatetimeWithNanoseconds( + 2006, 5, 12, 12, 0, 0, tzinfo=datetime.timezone.utc + ), ) @@ -1472,19 +1510,20 @@ class Address(Base): assert str(query.statement.compile(session.bind)) == EXPECTED_QUERY -class InterleavedTablesTest(fixtures.TestBase): +class SpannerSpecificTestBase(fixtures.TestBase): + """Base class for the Cloud Spanner related tests.""" + + def setUp(self): + self._engine = create_engine(get_db_url()) + self._metadata = MetaData(bind=self._engine) + + +class InterleavedTablesTest(SpannerSpecificTestBase): """ Check that CREATE TABLE statements for interleaved tables are correctly generated. """ - def setUp(self): - self._engine = create_engine( - "spanner:///projects/appdev-soda-spanner-staging/instances/" - "sqlalchemy-dialect-test/databases/compliance-test" - ) - self._metadata = MetaData(bind=self._engine) - def test_interleave(self): EXP_QUERY = ( "\nCREATE TABLE client (\n\tteam_id INT64 NOT NULL, " @@ -1520,8 +1559,321 @@ def test_interleave_on_delete_cascade(self): Column("client_id", Integer, primary_key=True), Column("client_name", String(16), nullable=False), spanner_interleave_in="team", - spanner_inverleave_on_delete_cascade=True, + spanner_interleave_on_delete_cascade=True, ) with mock.patch("google.cloud.spanner_dbapi.cursor.Cursor.execute") as execute: client.create(self._engine) execute.assert_called_once_with(EXP_QUERY, []) + + +class UserAgentTest(SpannerSpecificTestBase): + """Check that SQLAlchemy dialect uses correct user agent.""" + + def test_user_agent(self): + dist = pkg_resources.get_distribution("sqlalchemy-spanner") + + with self._engine.connect() as connection: + assert ( + connection.connection.instance._client._client_info.user_agent + == f"gl-{dist.project_name}/{dist.version}" + ) + + +class ExecutionOptionsTest(fixtures.TestBase, unittest.TestCase): + """ + Check that `execution_options()` method correctly + sets parameters on the underlying DB API connection. + """ + + @classmethod + def setUpClass(cls): + cls._engine = create_engine(get_db_url(), pool_size=1) + cls._metadata = MetaData(bind=cls._engine) + + cls._table = Table( + "execution_options", + cls._metadata, + Column("opt_id", Integer, primary_key=True), + Column("opt_name", String(16), nullable=False), + ) + + cls._metadata.create_all(cls._engine) + + def test_read_only(self): + with self._engine.connect().execution_options(read_only=True) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + assert connection.connection.read_only is True + + def test_staleness(self): + with self._engine.connect().execution_options( + read_only=True, staleness={"exact_staleness": datetime.timedelta(seconds=5)} + ) as connection: + connection.execute(select(["*"], from_obj=self._table)).fetchall() + assert connection.connection.staleness == { + "exact_staleness": datetime.timedelta(seconds=5) + } + + with self._engine.connect() as connection: + assert connection.connection.staleness is None + + +class LimitOffsetTest(fixtures.TestBase): + """ + Check that SQL with an offset and no limit is being generated correctly. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + self._metadata = MetaData(bind=self._engine) + + self._table = Table( + "users", + self._metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + ) + + self._metadata.create_all(self._engine) + + def test_offset_only(self): + for offset in [1, 7, 10, 100, 1000, 10000]: + + with self._engine.connect().execution_options(read_only=True) as connection: + list(connection.execute(self._table.select().offset(offset)).fetchall()) + + +class TemporaryTableTest(fixtures.TestBase): + """ + Check that temporary tables raise an error on creation. + """ + + def setUp(self): + self._engine = create_engine(get_db_url(), pool_size=1) + self._metadata = MetaData(bind=self._engine) + + def test_temporary_prefix(self): + with pytest.raises(NotImplementedError): + Table( + "users", + self._metadata, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + prefixes=["TEMPORARY"], + ).create() + + +class ComputedReflectionFixtureTest(_ComputedReflectionFixtureTest): + @classmethod + def define_tables(cls, metadata): + """SPANNER OVERRIDE: + + Avoid using default values for computed columns. + """ + Table( + "computed_default_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_col", Integer, Computed("normal + 42")), + Column("with_default", Integer), + ) + + t = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal + 42")), + ) + + if testing.requires.schemas.enabled: + t2 = Table( + "computed_column_table", + metadata, + Column("id", Integer, primary_key=True), + Column("normal", Integer), + Column("computed_no_flag", Integer, Computed("normal / 42")), + schema=config.test_schema, + ) + + if testing.requires.computed_columns_virtual.enabled: + t.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal + 2", persisted=False), + ) + ) + if testing.requires.schemas.enabled: + t2.append_column( + Column( + "computed_virtual", + Integer, + Computed("normal / 2", persisted=False), + ) + ) + if testing.requires.computed_columns_stored.enabled: + t.append_column( + Column( + "computed_stored", Integer, Computed("normal - 42", persisted=True), + ) + ) + if testing.requires.schemas.enabled: + t2.append_column( + Column( + "computed_stored", + Integer, + Computed("normal * 42", persisted=True), + ) + ) + + +class ComputedReflectionTest(_ComputedReflectionTest, ComputedReflectionFixtureTest): + @pytest.mark.skip("Default values are not supported.") + def test_computed_col_default_not_set(self): + pass + + def test_get_column_returns_computed(self): + """ + SPANNER OVERRIDE: + + In Spanner all the generated columns are STORED, + meaning there are no persisted and not persisted + (in the terms of the SQLAlchemy) columns. The + method override omits the persistence reflection checks. + """ + insp = inspect(config.db) + + cols = insp.get_columns("computed_default_table") + data = {c["name"]: c for c in cols} + for key in ("id", "normal", "with_default"): + is_true("computed" not in data[key]) + compData = data["computed_col"] + is_true("computed" in compData) + is_true("sqltext" in compData["computed"]) + eq_(self.normalize(compData["computed"]["sqltext"]), "normal+42") + + +@pytest.mark.skipif( + bool(os.environ.get("SPANNER_EMULATOR_HOST")), reason="Skipped on emulator" +) +class JSONTest(_JSONTest): + @pytest.mark.skip("Values without keys are not supported.") + def test_single_element_round_trip(self, element): + pass + + def _test_round_trip(self, data_element): + data_table = self.tables.data_table + + config.db.execute( + data_table.insert(), + {"id": random.randint(1, 100000000), "name": "row1", "data": data_element}, + ) + + row = config.db.execute(select([data_table.c.data])).first() + + eq_(row, (data_element,)) + + def test_unicode_round_trip(self): + # note we include Unicode supplementary characters as well + with config.db.connect() as conn: + conn.execute( + self.tables.data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "r1", + "data": { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + }, + ) + + eq_( + conn.scalar(select([self.tables.data_table.c.data])), + { + util.u("réve🐍 illé"): util.u("réve🐍 illé"), + "data": {"k1": util.u("drôl🐍e")}, + }, + ) + + @pytest.mark.skip("Parameterized types are not supported.") + def test_eval_none_flag_orm(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_index_typed_comparison(self): + pass + + @pytest.mark.skip( + "Spanner JSON_VALUE() always returns STRING," + "thus, this test case can't be executed." + ) + def test_path_typed_comparison(self): + pass + + @pytest.mark.skip("Custom JSON de-/serializers are not supported.") + def test_round_trip_custom_json(self): + pass + + def _index_fixtures(fn): + fn = testing.combinations( + ("boolean", True), + ("boolean", False), + ("boolean", None), + ("string", "some string"), + ("string", None), + ("integer", 15), + ("integer", 1), + ("integer", 0), + ("integer", None), + ("float", 28.5), + ("float", None), + id_="sa", + )(fn) + return fn + + @_index_fixtures + def test_index_typed_access(self, datatype, value): + data_table = self.tables.data_table + data_element = {"key1": value} + with config.db.connect() as conn: + conn.execute( + data_table.insert(), + { + "id": random.randint(1, 100000000), + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + expr = data_table.c.data["key1"] + expr = getattr(expr, "as_%s" % datatype)() + + roundtrip = conn.scalar(select([expr])) + if roundtrip in ("true", "false", None): + roundtrip = str(roundtrip).capitalize() + + eq_(str(roundtrip), str(value)) + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_json_null_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_json_null(self): + pass + + @pytest.mark.skip( + "Spanner doesn't support type casts inside JSON_VALUE() function." + ) + def test_round_trip_none_as_sql_null(self): + pass diff --git a/test_migration_env.py b/test_migration_env.py index 883ebc1e..a87e356b 100644 --- a/test_migration_env.py +++ b/test_migration_env.py @@ -1,3 +1,17 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from logging.config import fileConfig from sqlalchemy import engine_from_config diff --git a/version.py b/version.py new file mode 100644 index 00000000..498b5ee5 --- /dev/null +++ b/version.py @@ -0,0 +1,7 @@ +# Copyright 2021 Google LLC +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file or at +# https://developers.google.com/open-source/licenses/bsd + +__version__ = "1.0.0"