diff --git a/.gitignore b/.gitignore index 34e7a5bb..8d2441d7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,20 @@ -*.pyc - -docs/_build - -.*.swp -.coverage - +*~ +#*# +_build/ build/ +.coverage dist/ -riak.egg-info/ +docsrc/doctrees/ *.egg - -#*# -*~ +.eggs/ +envs/ +.idea/ +py-build/ +*.pyc +__pycache__/ +.python-version +README.rst +riak-*/ +riak.egg-info/ +.*.swp +.tox/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..510fba6e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,10 @@ +[submodule "riak_pb"] + path = riak_pb + url = git://github.com/basho/riak_pb.git +[submodule "tools"] + path = tools + url = git://github.com/basho/riak-client-tools.git +[submodule "docs"] + path = docs + url = https://github.com/basho/riak-python-client.git + branch = gh-pages diff --git a/.runner b/.runner new file mode 100755 index 00000000..91b20b5c --- /dev/null +++ b/.runner @@ -0,0 +1,162 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset + +have_tox='false' +if hash tox 2>/dev/null +then + echo '[INFO] tox command present, will use that to run tests' + have_tox='true' +fi + +have_py2='false' +if hash python2 2>/dev/null +then + have_py2='true' +fi + +have_py3='false' +if hash python3 2>/dev/null +then + have_py3='true' +fi + +have_riak_admin='false' +if hash riak-admin 2>/dev/null +then + have_riak_admin='true' + $riak_admin='riak-admin' +else + set +o nounset + + if [[ -x $RIAK_ADMIN ]] + then + have_riak_admin='true' + riak_admin="$RIAK_ADMIN" + elif [[ -x $RIAK_DIR/bin/riak-admin ]] + then + have_riak_admin='true' + riak_admin="$RIAK_DIR/bin/riak-admin" + fi + + set -o nounset +fi + +function lint +{ + if ! hash flake8 2>/dev/null + then + pip install --upgrade flake8 + fi + flake8 --exclude=riak/pb riak *.py +} + +function run_tests +{ + local protocol="${1:-pbc}" + export RIAK_TEST_PROTOCOL="$protocol" + if [[ $have_tox == 'true' ]] + then + tox + else + if [[ $have_py2 == 'true' ]] + then + python2 setup.py test + fi + if [[ $have_py3 == 'true' ]] + then + python3 setup.py test + fi + fi +} + +function run_tests_each_protocol +{ + for protocol in pbc http + do + run_tests "$protocol" + done +} + +function export_host_environment_vars +{ + local riak_test_host="${RIAK_TEST_HOST:-localhost}" + local -i riak_test_pb_port="${RIAK_TEST_PB_PORT:-8087}" + local -i riak_test_http_port="${RIAK_TEST_HTTP_PORT:-8098}" + export RIAK_TEST_HOST="$riak_test_host" + export RIAK_TEST_PB_PORT="$riak_test_pb_port" + export RIAK_TEST_HTTP_PORT="$riak_test_http_port" +} + +function export_test_environment_vars +{ + export RUN_BTYPES=1 + export RUN_CLIENT=1 + export RUN_DATATYPES=1 + export RUN_INDEXES=1 + export RUN_KV=1 + export RUN_MAPREDUCE=1 + export RUN_RESOLVE=1 + export RUN_TIMESERIES=1 + export RUN_YZ=1 +} + +function unexport_test_environment_vars +{ + export RUN_BTYPES=0 + export RUN_CLIENT=0 + export RUN_DATATYPES=0 + export RUN_INDEXES=0 + export RUN_KV=0 + export RUN_MAPREDUCE=0 + export RUN_RESOLVE=0 + export RUN_TIMESERIES=0 + export RUN_YZ=0 +} + +function security_test +{ + if [[ $have_riak_admin == 'true' ]] + then + export_host_environment_vars + unexport_test_environment_vars + export RUN_SECURITY=1 + $riak_admin security enable + run_tests 'pbc' + else + echo '[ERROR] riak-admin must be in PATH, RIAK_ADMIN var set to path, or RIAK_DIR set.' 1>&2 + exit 1 + fi +} + +function integration_test +{ + export_host_environment_vars + export_test_environment_vars + run_tests_each_protocol +} + +function timeseries_test +{ + unexport_test_environment_vars + export RUN_TIMESERIES=1 + run_tests_each_protocol +} + +arg="${1:-lint}" +case "$arg" in + 'lint') + lint;; + 'unit-test') + run_tests;; + 'integration-test') + integration_test;; + 'security-test') + security_test;; + 'timeseries-test') + timeseries_test;; + *) + echo "[ERROR] unknown argument: '$arg'" 1>&2 + exit 1;; +esac diff --git a/.travis.sh b/.travis.sh new file mode 100755 index 00000000..739c66cd --- /dev/null +++ b/.travis.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -o errexit + +flake8 --ignore E123,E126,E226,E722,E741 --exclude=riak/pb riak *.py + +sudo riak-admin security disable + +python setup.py test + +sudo riak-admin security enable + +if [[ $RIAK_TEST_PROTOCOL == 'pbc' ]] +then + export RUN_SECURITY=1 + python setup.py test --test-suite riak.tests.test_security +else + echo '[INFO]: security tests run on PB protocol only' +fi diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..7c46a5cd --- /dev/null +++ b/.travis.yml @@ -0,0 +1,39 @@ +sudo: required +dist: trusty +language: python +python: + - '2.7' + - '3.6' + - nightly +addons: + hosts: + - riak-test +install: + - pip install --upgrade pip setuptools flake8 +before_script: + - jdk_switcher use oraclejdk8 + - sudo ./tools/travis-ci/riak-install -d "$RIAK_DOWNLOAD_URL" + - sudo ./tools/setup-riak -s +env: + matrix: + - RIAK_TEST_PROTOCOL=pbc RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.0/2.0.7/ubuntu/trusty/riak_2.0.7-1_amd64.deb + - RIAK_TEST_PROTOCOL=http RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.0/2.0.7/ubuntu/trusty/riak_2.0.7-1_amd64.deb + - RIAK_TEST_PROTOCOL=pbc RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.2/2.2.0/ubuntu/trusty/riak_2.2.0-1_amd64.deb + - RIAK_TEST_PROTOCOL=http RIAK_DOWNLOAD_URL=http://s3.amazonaws.com/downloads.basho.com/riak/2.2/2.2.0/ubuntu/trusty/riak_2.2.0-1_amd64.deb + global: + - RIAK_TEST_PB_PORT=8087 + - RIAK_TEST_HTTP_PORT=8098 + - RUN_BTYPES=1 + - RUN_CLIENT=1 + - RUN_MAPREDUCE=1 + - RUN_KV=1 + - RUN_RESOLVE=1 + - RUN_YZ=1 + - RUN_DATATYPES=1 + - RUN_INDEXES=1 + - RUN_SECURITY=0 +script: + - ./.travis.sh +notifications: + slack: + secure: kU1XcvTAliCWKuYpMWEMbD4qkbmlnWGLAIKbBQjtIh5ZRzISgjdUFzGcC31eHoQFv12LQdp5KAFj0Y1FyEvLxi0W8VeWKpsBGc06ntuECaN9MNHRBzKKclrTMGTfpBWZ5IO17XSUu2lKaNz6GDGRkiZA+sxYAVPfZSXY3u86IuY= diff --git a/MANIFEST.in b/MANIFEST.in index e7c87e33..ddf59c00 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,8 @@ include docs/* include riak/erl_src/* -include THANKS +include README.md include README.rst include LICENSE -include RELEASE_NOTES.md +include RELNOTES.md include version.py -include commands.py \ No newline at end of file +include commands.py diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..166e4007 --- /dev/null +++ b/Makefile @@ -0,0 +1,130 @@ +unexport LANG +unexport LC_ADDRESS +unexport LC_COLLATE +unexport LC_CTYPE +unexport LC_IDENTIFICATION +unexport LC_MEASUREMENT +unexport LC_MESSAGES +unexport LC_MONETARY +unexport LC_NAME +unexport LC_NUMERIC +unexport LC_PAPER +unexport LC_TELEPHONE +unexport LC_TIME + +PANDOC_VERSION := $(shell pandoc --version) +PROTOC_VERSION := $(shell protoc --version) + +PROJDIR := $(realpath $(CURDIR)) +DOCSRC := $(PROJDIR)/docsrc +DOCTREES := $(DOCSRC)/doctrees +DOCSDIR := $(PROJDIR)/docs + +PYPI_REPOSITORY ?= pypi + +all: lint test + +.PHONY: lint +lint: + $(PROJDIR)/.runner lint + +.PHONY: docs +docs: + sphinx-build -b html -d $(DOCTREES) $(DOCSRC) $(DOCSDIR) + @echo "The HTML pages are in $(DOCSDIR)" + +.PHONY: pb_clean +pb_clean: + @echo "==> Python (clean)" + @rm -rf riak/pb/*_pb2.py riak/pb/*.pyc riak/pb/__pycache__ __pycache__ py-build + +.PHONY: pb_compile +pb_compile: pb_clean +ifeq ($(PROTOC_VERSION),) + $(error The protoc command is required to parse proto files) +endif +ifneq ($(PROTOC_VERSION),libprotoc 2.5.0) + $(error protoc must be version 2.5.0) +endif + @echo "==> Python (compile)" + @protoc -Iriak_pb/src --python_out=riak/pb riak_pb/src/*.proto + @python setup.py build_messages + +.PHONY: test_sdist +test_sdist: + @python setup.py sdist + +.PHONY: release_sdist +release_sdist: +ifeq ($(VERSION),) + $(error VERSION must be set to build a release and deploy this package) +endif +ifeq ($(PANDOC_VERSION),) + $(error The pandoc command is required to correctly convert README.md to rst format) +endif +ifeq ($(RELEASE_GPG_KEYNAME),) + $(error RELEASE_GPG_KEYNAME must be set to build a release and deploy this package) +endif +ifeq ("$(wildcard $(PROJDIR)/.python-version)","") + $(error expected $(PROJDIR)/.python-version to exist. Run $(PROJDIR)/build/pyenv-setup) +endif + @python -c 'import pypandoc' + @echo "==> Python tagging version $(VERSION)" + @$(PROJDIR)/build/publish $(VERSION) validate + @git tag --sign -a "$(VERSION)" -m "riak-python-client $(VERSION)" --local-user "$(RELEASE_GPG_KEYNAME)" + @git push --tags + @echo "==> pypi repository: $(PYPI_REPOSITORY)" + @echo "==> Python (sdist)" + @python setup.py sdist upload --repository $(PYPI_REPOSITORY) --show-response --sign --identity $(RELEASE_GPG_KEYNAME) + @$(PROJDIR)/build/publish $(VERSION) + +.PHONY: release +release: release_sdist +ifeq ($(RELEASE_GPG_KEYNAME),) + $(error RELEASE_GPG_KEYNAME must be set to build a release and deploy this package) +endif +ifeq ("$(wildcard $(PROJDIR)/.python-version)","") + $(error expected $(PROJDIR)/.python-version to exist. Run $(PROJDIR)/build/pyenv-setup) +endif + @echo "==> pypi repository: $(PYPI_REPOSITORY)" + @echo "==> Python 2.7 (bdist_egg)" + @python2.7 setup.py build --build-base=py-build/2.7 bdist_egg upload --repository $(PYPI_REPOSITORY) --show-response --sign --identity $(RELEASE_GPG_KEYNAME) + @echo "==> Python 3.3 (bdist_egg)" + @python3.3 setup.py build --build-base=py-build/3.3 bdist_egg upload --repository $(PYPI_REPOSITORY) --show-response --sign --identity $(RELEASE_GPG_KEYNAME) + @echo "==> Python 3.4 (bdist_egg)" + @python3.4 setup.py build --build-base=py-build/3.4 bdist_egg upload --repository $(PYPI_REPOSITORY) --show-response --sign --identity $(RELEASE_GPG_KEYNAME) + @echo "==> Python 3.5 (bdist_egg)" + @python3.5 setup.py build --build-base=py-build/3.5 bdist_egg upload --repository $(PYPI_REPOSITORY) --show-response --sign --identity $(RELEASE_GPG_KEYNAME) + +.PHONY: unit-test +unit-test: + @$(PROJDIR)/.runner unit-test + +.PHONY: integration-test +integration-test: + @$(PROJDIR)/.runner integration-test + +.PHONY: security-test +security-test: + @$(PROJDIR)/.runner security-test + +.PHONY: timeseries-test +timeseries-test: + @$(PROJDIR)/.runner timeseries-test + +.PHONY: test +test: integration-test + +.PHONY: help +help: + @echo '' + @echo ' Targets: + @echo ' ------------------------------------------------------------' + @echo ' lint - Run linter (flake8) ' + @echo ' test - Run all tests ' + @echo ' unit-test - Run unit tests ' + @echo ' integration-test - Run integration tests ' + @echo ' security-test - Run integration tests (security enabled) ' + @echo ' timeseries-test - Run timeseries integration tests ' + @echo ' ------------------------------------------------------------' + @echo '' diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..37c556ce --- /dev/null +++ b/NOTICE @@ -0,0 +1,2 @@ +Riak Python Client +Copyright 2010-present Basho Technologies, Inc. diff --git a/README.md b/README.md new file mode 100644 index 00000000..9a15864d --- /dev/null +++ b/README.md @@ -0,0 +1,149 @@ +# Python Client for Riak + +## Build Status + +[![Build Status](https://travis-ci.org/basho/riak-python-client.svg?branch=master)](https://travis-ci.org/basho/riak-python-client) + +## Documentation + +[Documentation for the Riak Python Client Library](http://basho.github.io/riak-python-client/index.html) is available [here](http://basho.github.io/riak-python-client/index.html). + +Documentation for Riak is available [here](http://docs.basho.com/riak/latest). + +## Repository Cloning + +*NOTE*: please clone this repository using the `--recursive` argument to `git clone` or follow the clone with `git submodule update --init`. This repository uses two submodules. + +# Installation + +The recommended versions of Python for use with this client are Python `2.7.8` (or greater, `2.7.11` as of `2016-06-21`), `3.3.x`, `3.4.x` and `3.5.x`. The latest version from each series should be preferred. Older versions of the Python `2.7.X` and `3.X` series should be used with caution as they are not covered by integration tests. + +## Riak TS (Timeseries) + +You must use version `2.7.11`, `3.4.4` or `3.5.1` (or greater within a version series). Otherwise you will be affected by [this Python bug](https://bugs.python.org/issue23517). + +## From Source + +```sh +python setup.py install +``` + +There are additional dependencies on Python packages `setuptools` and `protobuf`. + +## From PyPI + +Official packages are signed and published to [PyPI](https://pypi.python.org/pypi/riak). + +To install from [PyPI](https://pypi.python.org/pypi/riak) directly you can use `pip`. + +```sh +pip install riak +``` + +# Testing + +## Unit Tests + +Unit tests will be executed via `tox` if it is in your `PATH`, otherwise by the `python2` and (if available), `python3` executables: + +```sh +make unit-test +``` + +## Integration Tests + +You have two options to run Riak locally - either build from source, or use a pre-installed Riak package. + +### Source + +To setup the default test configuration, build a Riak node from a clone of `github.com/basho/riak`: + +```sh +# check out latest release tag +git checkout riak-2.1.4 +make locked-deps +make rel +``` + +[Source build documentation](http://docs.basho.com/riak/kv/latest/setup/installing/source/). + +When building from source, the protocol buffers port will be `8087` and HTTP will be `8098`. + +### Package + +Install using your platform's package manager ([docs](http://docs.basho.com/riak/kv/latest/setup/installing/)) + +When installing from a package, the protocol buffers port will be `8087` and HTTP will be `8098`. + +### Running Integration Tests + +* Ensure you've initialized this repo's submodules: + +```sh +git submodule update --init +``` + +* Run the following: + +```sh +./tools/setup-riak +make integration-test +``` + + +Contributors +-------------------------- + +* Andrew Thompson +* Andy Gross +* Armon Dadgar +* Brett Hazen +* Brett Hoerner +* Brian Roach +* Bryan Fink +* Daniel Lindsley +* Daniel Néri +* Daniel Reverri +* [Dan Root](https://github.com/daroot) +* [David Basden](https://github.com/dbasden) +* [David Delassus](https://github.com/linkdd) +* David Koblas +* Dmitry Rozhkov +* Eric Florenzano +* Eric Moritz +* Filip de Waard +* Gilles Devaux +* Greg Nelson +* Gregory Burd +* Greg Stein +* Ian Plosker +* Jayson Baird +* Jeffrey Massung +* Jon Meredith +* Josip Lisec +* Justin Sheehy +* Kevin Smith +* [Luke Bakken](https://github.com/lukebakken) +* Mark Erdmann +* Mark Phillips +* Mathias Meyer +* Matt Heitzenroder +* [Matt Lohier](https://github.com/aquam8) +* Mikhail Sobolev +* Reid Draper +* Russell Brown +* Rusty Klophaus +* Rusty Klophaus +* Scott Lystig Fritchie +* Sean Cribbs +* Shuhao Wu +* Silas Sewell +* Socrates Lee +* Soren Hansen +* Sreejith Kesavan +* Timothée Peignier +* [`tobixx`](https://github.com/tobixx) +* [Tin Tvrtković](https://github.com/Tinche) +* [Vitaly Shestovskiy](https://github.com/lamp0chka) +* William Kral +* [Yasser Souri](https://github.com/yassersouri) diff --git a/README.rst b/README.rst deleted file mode 100644 index b2785ddc..00000000 --- a/README.rst +++ /dev/null @@ -1,173 +0,0 @@ -====================== -Python Client for Riak -====================== - -Documentation -============= - -`Documentation for the Riak Python Client Library -`_ is available -here. The documentation source is found in `docs/ subdirectory -`_ and -can be built with `Sphinx `_. - -Documentation for Riak is available at http://docs.basho.com/riak/latest - -Install -======= - -The recommended version of Python for use with this client is Python -2.7. From the Riak Python Client root directory, execute:: - - python setup.py install - -There is an additional dependency on the Python package `setuptools`. - -Official packages are signed and published to `PyPI -`_. - - -Testing -======= - -To setup the default test configuration build a test Riak node (from -a ``riak`` directory):: - - make rel - -See `Basic Cluster Setup -`_ -for more details. - -For all of the simple default values, set the ``RIAK_DIR`` environment -variable to the root of your Riak installation. Then from the -``riak-python-client`` directory :: - - cd buildbot - make preconfigure - -Start your Riak node with ``riak start`` from the the Riak directory, -then back in ``buildbot`` type:: - - make configure - make test - -That will run the test suite twice: once with security enabled and once -without. - -Testing Options ---------------- - -If you wish to change the default options you can run the setup by hand. -First configure the test node by adjusting the ``riak.conf`` -settings, where ``RIAK_DIR`` is the path to the top your -Riak installation:: - - python setup.py preconfigure --riak-conf=$RIAK_DIR/etc/riak.conf - -Optionally the hostname and port numbers can be changed, too, via these -arguments: - - - ``--host=`` IP of host running Riak (default is ``localhost``) - - ``--pb-port=`` protocol buffers port number (default is ``8087``) - - ``--http-port=`` http port number (default is ``8098``) - - ``--https-port=`` https port number (default is ``8099``) - -You may alternately add these lines to ``setup.cfg``:: - - [preconfigure] - riak-conf=/Users/sean/dev/riak/rel/riak/etc/riak.conf - host=localhost - pb-port=8087 - http-port=8098 - https-port=8099 - -Next start the test node. Once it is running, a test configuration is -installed which includes security test users and bucket types:: - - python setup.py configure --riak-admin=$RIAK_DIR/bin/riak-admin - -Optionally these configuration settings can be changed, too: - - - ``--username=`` test user account (default is ``testuser``) - - ``--password=`` password for test user account (default is - ``testpassword``) - - ``--certuser=`` secruity test user account (default is ``certuser``) - - ``--certpass=`` password for security test user account (default is - ``certpass``) - -Similarly ``setup.cfg`` may be modified instead. To run the tests against a -Riak server (with configured TCP port configuration) on localhost, execute:: - - python setup.py test - -Connections to Riak in Tests ----------------------------- - -If your Riak server isn't running on localhost or you have built a -Riak devrel from source, use the environment variables -``RIAK_TEST_HOST``, ``RIAK_TEST_HTTP_PORT`` and -``RIAK_TEST_PB_PORT`` to specify where to find the Riak server. - -Some of the connection tests need port numbers that are NOT in use. If -ports 1023 and 1022 are in use on your test system, set the -environment variables ``DUMMY_HTTP_PORT`` and ``DUMMY_PB_PORT`` to -unused port numbers. - -Testing Search --------------- - -If you don't have `Riak Search -`_ enabled, you -can set the ``SKIP_SEARCH`` environment variable to 1 skip those -tests. - -If you don't have `Search 2.0 `_ -enabled, you can set the ``RUN_YZ`` environment variable to 0 to skip -those tests. - -Testing Bucket Types (Riak 2+) ------------------------------- - -To test bucket-types, you must run the ``create_bucket_types`` setup -command, which will create the bucket-types used in testing, or create -them manually yourself. It can be run like so (substituting ``$RIAK`` -with the root of your Riak install):: - - ./setup.py create_bucket_types --riak-admin=$RIAK/bin/riak-admin - -You may alternately add these lines to `setup.cfg`:: - - [create_bucket_types] - riak-admin=/Users/sean/dev/riak/rel/riak/bin/riak-admin - -To skip the bucket-type tests, set the ``SKIP_BTYPES`` environment -variable to ``1``. - -Testing Secondary Indexes -------------------------- - -To test -`Secondary Indexes `_, -the ``SKIP_INDEX`` environment variable must be set to 0 (or 1 to skip them.) - -Testing Security (Riak 2+) --------------------------- - -By default -`Security `_ is not -enabled on Riak. Once ``security = on`` is configured in the ``riak.conf`` -file it can be enabled with ``riak-admin``. - -If you have set up the test environment outlined in the `Testing`_ section -you can go ahead and use this command to enable security:: - - python setup.py enable_security --riak-admin=$RIAK_DIR/bin/riak-admin - -Once you are done testing security you can also:: - - python setup.py disable_security --riak-admin=$RIAK_DIR/bin/riak-admin - -To run the tests, then simply:: - - RUN_SECURITY=1 RIAK_TEST_HTTP_PORT=18098 python setup.py test diff --git a/RELEASE_NOTES.md b/RELNOTES.md similarity index 61% rename from RELEASE_NOTES.md rename to RELNOTES.md index 80fef083..6722c3ec 100644 --- a/RELEASE_NOTES.md +++ b/RELNOTES.md @@ -1,8 +1,87 @@ # Riak Python Client Release Notes -## 2.2.0 Feature Release - 2014-12-18 +## [`3.0.0` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-3.0.0) -Release 2.2.0 features support for +* [Running expensive operations *now raise exceptions*](https://github.com/basho/riak-python-client/pull/518). You can disable these exceptions for development purposes but should not do so in production. + +## [`2.7.0` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.7.0) + * Riak TS 1.5 support + * Support for `head` parameter + +## [`2.6.1` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.6.0) + * NOTE: Due to pypi upload errors, `2.6.1` takes the place of `2.6.0`. + +## [`2.6.0` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.6.0) + * NOTE: Due to pypi upload errors, `2.6.1` takes the place of `2.6.0`. + +## [`2.5.5` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.5.5) + + * [Stop all pools when client shuts down](https://github.com/basho/riak-python-client/pull/488) + * [Calling `close` on client closes pools, remove global multi pools](https://github.com/basho/riak-python-client/pull/490). *NOTE*: if you use the multi get or put features of the client, you *MUST* call `close()` on your `RiakClient` instance to correctly clean up the thread pools used for these multi-operations. + +## [`2.5.4` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.5.4) + + * [When converting `datetime` objects to send to Riak TS, `tzinfo` will be used if present](https://github.com/basho/riak-python-client/pull/486) + * [Workaround for incorrect version returned by Riak TS OSS](https://github.com/basho/riak-python-client/pull/472) + +## [`2.5.3` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.5.3) + + * [Bug fix for raising `BadResource`](https://github.com/basho/riak-python-client/pull/481) + +## [`2.5.2` Release](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.5.2) + +* *NOTE*: for Riak TS data, automatic conversion from epoch values *to* Python `datetime` objects has been removed. If you would like to have automatic conversion, use `RiakClient(transport_options={'ts_convert_timestamp': True})` +* Miscellaneous fixes for term-to-binary encoding of messages for Riak TS. +* [Ensure `six` is not required during installation](https://github.com/basho/riak-python-client/pull/459) + +## [`2.5.0` Release - Deprecated](https://github.com/basho/riak-python-client/issues?q=milestone%3Ariak-python-client-2.5.0) + +* *NOTE*: due to the `basho-erlastic` dependency, this version will not install correctly. Please use ``2.5.2``. +* *NOTE*: for Riak TS data, automatic conversion from epoch values *to* Python `datetime` objects has been removed. If you would like to have automatic conversion, use `RiakClient(transport_options={'ts_convert_timestamp': True})` +* [Socket Enhancements](https://github.com/basho/riak-python-client/pull/453) - Resolves [#399](https://github.com/basho/riak-python-client/issues/399) +* [Add multi-put](https://github.com/basho/riak-python-client/pull/452) +* [Add support for term-to-binary encoding](https://github.com/basho/riak-python-client/pull/448) *Note:* This requires at least version ``1.3.0`` of Riak TS. + +## `2.4.2` Patch Release - 2016-02-20 + +* [Fix SSL host name](https://github.com/basho/riak-python-client/pull/436) +* [Use `riak-client-tools`](https://github.com/basho/riak-python-client/issues/434) + +## `2.4.1` Patch Release - 2016-02-03 + +* [Riak TS: Millisecond precision](https://github.com/basho/riak-python-client/issues/430) +* [Fix release process](https://github.com/basho/riak-python-client/issues/429) + +## `2.4.0` Feature Release - 2016-01-13 + +This release enhances Riak Time Series functionality. + +* [Encapsulate table description](https://github.com/basho/riak-python-client/pull/422) + +## `2.3.0` Feature Release - 2015-12-14 + +Release `2.3.0` features support for new +[time series](https://github.com/basho/riak-python-client/pull/416) +functionality. + +This is release retires support for Python 2.6.x but adds support for +Python 3.5.x. + +There are also many bugfixes and new enhancements: + +* [The `riak_pb` module is now integrated into the Python Client] + (https://github.com/basho/riak-python-client/pull/418) +* [Support for Preflists and Write-Once bucket types] + (https://github.com/basho/riak-python-client/pull/414) +* [Support Riak `2.1.1`] + (https://github.com/basho/riak-python-client/pull/407) +* [Native SSL support for Python `2.7.9`+] + (https://github.com/basho/riak-python-client/pull/397) + + +## `2.2.0` Feature Release - 2014-12-18 + +Release `2.2.0` features support for [Python 3](https://github.com/basho/riak-python-client/pull/379), specifically 3.3 and 3.4. This version uses the native SSL security instead of [pyOpenSSL](http://pypi.python.org/pypi/pyOpenSSL) which is required @@ -27,9 +106,9 @@ notably: (https://github.com/basho/riak-python-client/pull/388) -## 2.1.0 Feature Release - 2014-09-03 +## `2.1.0` Feature Release - 2014-09-03 -Release 2.1.0 features support for Riak 2.0 capabilities including: +Release `2.1.0` features support for Riak 2.0 capabilities including: * Bucket Types * Riak Data Types (CRDTs) @@ -54,9 +133,9 @@ notably: * The additional request options `basic_quorum` and `notfound_ok` are now supported. -## 2.0.3 Patch Release - 2014-03-06 +## `2.0.3` Patch Release - 2014-03-06 -Release 2.0.3 includes support for 1.4.4's 2I regexp feature and fixes +Release `2.0.3` includes support for 1.4.4's 2I regexp feature and fixes a few bugs: * Docs generation now uses the version from the top-level package. @@ -64,17 +143,17 @@ a few bugs: * More errors will be caught and propagated properly from multiget requests, preventing deadlocks on the caller side. -## 2.0.2 Patch release - 2013-11-18 +## `2.0.2` Patch release - 2013-11-18 -Release 2.0.2 includes support for the 1.4.1+ "timeout" option on +Release `2.0.2` includes support for the 1.4.1+ "timeout" option on secondary index queries. -## 2.0.1 Patch release - 2013-08-28 +## `2.0.1` Patch release - 2013-08-28 -Release 2.0.1 includes a minor compatibility fix for Python 2.6 and an +Release `2.0.1` includes a minor compatibility fix for Python 2.6 and an updated README. -## 2.0.0 Feature Release - 2013-07-30 +## `2.0.0` Feature Release - 2013-07-30 Release 2.0 is the culmination of many months of rearchitecting the client. Highlights: @@ -117,9 +196,9 @@ Other bugfixes: * Enabling and disabling search indexing on a bucket now uses the `search` bucket property. -## 1.5.2 Patch Release - 2013-01-31 +## `1.5.2` Patch Release - 2013-01-31 -Release 1.5.2 fixes some bugs and adds HTTPS/SSL support. +Release `1.5.2` fixes some bugs and adds HTTPS/SSL support. * Added support for HTTPS. * Fixed writing of the `app.config` for the `TestServer`. @@ -130,24 +209,24 @@ Release 1.5.2 fixes some bugs and adds HTTPS/SSL support. * Prevent fetching the `protobuf` package from Google Code. * Prefer `simplejson` over `json` when present. -## 1.5.1 Patch Release - 2012-10-24 +## `1.5.1` Patch Release - 2012-10-24 -Release 1.5.1 fixes one bug and some documentation errors. +Release `1.5.1` fixes one bug and some documentation errors. * Fix bug where `http_status` is used instead of `http_code`. * Fix documentation of `RiakMapReduce.index` method. * Fix documentation of `RiakClient.__init__` method. -## 1.5.0 Feature Release - 2012-08-29 +## `1.5.0` Feature Release - 2012-08-29 -Release 1.5.0 is a feature release that supports Riak 1.2. +Release `1.5.0` is a feature release that supports Riak 1.2. Noteworthy features: * Riak 1.2 features are now supported, including Search and 2I queries over Protocol Buffers transport. The Protocol Buffers message definitions now exist as a separate package, available on - [PyPi](http://pypi.python.org/pypi/riak_pb/1.2.0). + [PyPi](http://pypi.python.org/pypi/riak_pb/`1.2.0`). **NOTE:** The return value of search queries over HTTP and MapReduce were changed to be compatible with the results returned from the @@ -166,7 +245,7 @@ Noteworthy bugfixes: * Various fixes were made to the TestServer and it will throw an exception when it fails to start. -## 1.4.1 Patch Release - 2012-06-19 +## `1.4.1` Patch Release - 2012-06-19 Noteworthy features: @@ -176,16 +255,16 @@ Noteworthy bugfixes: * Map Reduce queries now use "application/json" as the Content-Type -## 1.4.0 Feature Release - 2012-03-30 +## `1.4.0` Feature Release - 2012-03-30 -Release 1.4.0 is a feature release comprising over 117 individual +Release `1.4.0` is a feature release comprising over 117 individual commits. Noteworthy features: * Python 2.6 and 2.7 are supported. On 2.6, the unittest2 package is required to run the test suite. -* Google's official protobuf package (2.4.1 or later) is now a +* Google's official protobuf package (`2.4.1` or later) is now a dependency. The package from downloads.basho.com/support is no longer necessary. * Travis-CI is enabled on the client. Go to @@ -219,11 +298,11 @@ Noteworthy bugfixes: be handled properly when no results are returned. There are lots of other great fixes from our wonderful -community. [Check them out!](https://github.com/basho/riak-python-client/compare/1.3.0...1.4.0) +community. [Check them out!](https://github.com/basho/riak-python-client/compare/`1.3.0`...1.4.0) -## 1.3.0 Feature Release - 2011-08-04 +## `1.3.0` Feature Release - 2011-08-04 -Release 1.3.0 is a feature release bringing a slew of updates. +Release `1.3.0` is a feature release bringing a slew of updates. Noteworthy features: @@ -249,9 +328,9 @@ Fixes: pool. (Reid Draper) * #42: Reset protocol buffer connection up on connection error (Brett Hoerner) -## 1.2.2 Patch Release - 2011-06-22 +## `1.2.2` Patch Release - 2011-06-22 -Release 1.2.2 is a minor patch release. +Release `1.2.2` is a minor patch release. Noteworthy fixes and improvements: diff --git a/THANKS b/THANKS deleted file mode 100644 index 4fccfe7f..00000000 --- a/THANKS +++ /dev/null @@ -1,45 +0,0 @@ -The following people have contributed to the Riak Python client: - -Andrew Thompson -Andy Gross -Armon Dadgar -Brett Hazen -Brett Hoerner -Brian Roach -Bryan Fink -Daniel Lindsley -Daniel Néri -Daniel Reverri -David Koblas -Dmitry Rozhkov -Eric Florenzano -Eric Moritz -Filip de Waard -Gilles Devaux -Greg Nelson -Greg Stein -Gregory Burd -Ian Plosker -Jayson Baird -Jeffrey Massung -Jon Meredith -Josip Lisec -Justin Sheehy -Kevin Smith -Mark Erdmann -Mark Phillips -Mathias Meyer -Matt Heitzenroder -Mikhail Sobolev -Reid Draper -Russell Brown -Rusty Klophaus -Scott Lystig Fritchie -Sean Cribbs -Shuhao Wu -Silas Sewell -Socrates Lee -Soren Hansen -Sreejith Kesavan -Timothée Peignier -William Kral diff --git a/build/publish b/build/publish new file mode 100755 index 00000000..13268ac3 --- /dev/null +++ b/build/publish @@ -0,0 +1,189 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset + +declare -r debug='false' +declare -r tmpfile_file="/tmp/publish.$$.tmpfiles" + +function make_temp_file +{ + local template="${1:-publish.$$.XXXXXX}" + if [[ $template != *XXXXXX ]] + then + template="$template.XXXXXX" + fi + local tmp=$(mktemp -t "$template") + echo "$tmp" >> "$tmpfile_file" + echo "$tmp" +} + +function now +{ + date '+%Y-%m-%d %H:%M:%S' +} + +function pwarn +{ + echo "$(now) [warning]: $@" 1>&2 +} + +function perr +{ + echo "$(now) [error]: $@" 1>&2 +} + +function pinfo +{ + echo "$(now) [info]: $@" +} + +function pdebug +{ + if [[ $debug == 'true' ]] + then + echo "$(now) [debug]: $@" + fi +} + +function errexit +{ + perr "$@" + exit 1 +} + +function onexit +{ + if [[ -f $tmpfile_file ]] + then + for tmpfile in $(< $tmpfile_file) + do + pdebug "removing temp file $tmpfile" + rm -f $tmpfile + done + rm -f $tmpfile_file + fi +} + +function gh_publish { + if [[ -z $version_string ]] + then + errexit 'gh_publish: version_string required' + fi + + # NB: no 'v' here at start of version_string + local -r package_name="riak-$version_string.tar.gz" + local -r package="./dist/riak-$version_string.tar.gz" + if [[ ! -s $package ]] + then + errexit "gh_publish: expected to find $package in dist/" + fi + + # NB: we use a X.Y.Z tag + local -r release_json="{ + \"tag_name\" : \"$version_string\", + \"name\" : \"Riak Python Client $version_string\", + \"body\" : \"riak-python-client $version_string\nhttps://github.com/basho/riak-python-client/blob/master/RELNOTES.md\", + \"draft\" : false, + \"prerelease\" : $is_prerelease + }" + + pdebug "Release JSON: $release_json" + + local curl_content_file="$(make_temp_file)" + local curl_stdout_file="$(make_temp_file)" + local curl_stderr_file="$(make_temp_file)" + + curl -4so $curl_content_file -w '%{http_code}' -XPOST \ + -H "Authorization: token $(< $github_api_key_file)" -H 'Content-type: application/json' \ + 'https://api.github.com/repos/basho/riak-python-client/releases' -d "$release_json" 1> "$curl_stdout_file" 2> "$curl_stderr_file" + if [[ $? != 0 ]] + then + errexit "curl error exited with code: '$?' see '$curl_stderr_file'" + fi + + local -i curl_rslt="$(< $curl_stdout_file)" + if (( curl_rslt == 422 )) + then + pwarn "Release in GitHub already exists! (http code: '$curl_rslt')" + curl -4so $curl_content_file -w '%{http_code}' -XGET \ + -H "Authorization: token $(< $github_api_key_file)" -H 'Content-type: application/json' \ + "https://api.github.com/repos/basho/riak-python-client/releases/tags/$version_string" 1> "$curl_stdout_file" 2> "$curl_stderr_file" + if [[ $? != 0 ]] + then + errexit "curl error exited with code: '$?' see '$curl_stderr_file'" + fi + elif (( curl_rslt != 201 )) + then + errexit "Creating release in GitHub failed with http code '$curl_rslt'" + fi + + if [[ ! -s $curl_content_file ]] + then + errexit 'no release info to parse for asset uploads' + fi + + # "upload_url": "https://uploads.github.com/repos/basho/riak-python-client/releases/1115734/assets{?name,label}" + # https://uploads.github.com/repos/basho/riak-python-client/releases/1115734/assets{?name,label} + local -r upload_url_with_name=$(perl -ne 'print qq($1\n) and exit if /"upload_url"[ :]+"(https:\/\/[^"]+)"/' "$curl_content_file") + local -r upload_url="${upload_url_with_name/\{?name,label\}/?name=$package_name}" + + local curl_content_file="$(make_temp_file)" + local curl_stdout_file="$(make_temp_file)" + local curl_stderr_file="$(make_temp_file)" + + curl -4so $curl_content_file -w '%{http_code}' -XPOST \ + -H "Authorization: token $(< $github_api_key_file)" -H 'Content-type: application/x-compressed, application/x-tar' \ + "$upload_url" --data-binary "@$package" 1> "$curl_stdout_file" 2> "$curl_stderr_file" + if [[ $? != 0 ]] + then + errexit "curl error exited with code: '$?' see '$curl_stderr_file'" + fi + + curl_rslt="$(< $curl_stdout_file)" + if (( curl_rslt != 201 )) + then + errexit "Uploading release assets to GitHub failed with http code '$curl_rslt'" + fi +} + +trap onexit EXIT + +declare -r version_string="${1:-unknown}" + +# https://www.python.org/dev/peps/pep-0440/ +if [[ ! $version_string =~ ^[0-9].[0-9].[0-9]([abcr]+[0-9]+)?$ ]] +then + errexit 'first argument must be valid version string in X.Y.Z, X.Y.ZaN, X.Y.ZbN or X.Y.ZrcN format' +fi + +is_prerelease='false' +if [[ $version_string =~ ^[0-9].[0-9].[0-9][abcr]+[0-9]+$ ]] +then + pinfo "publishing pre-release version: $version_string" + is_prerelease='true' +else + pinfo "publishing version $version_string" +fi + +declare -r current_branch="$(git rev-parse --abbrev-ref HEAD)" + +declare -r github_api_key_file="$HOME/.ghapi" +if [[ ! -s $github_api_key_file ]] +then + errexit "please save your GitHub API token in $github_api_key_file" +fi + +# Validate commands +if ! hash curl 2>/dev/null +then + errexit "'curl' must be in your PATH" +fi + +validate=${2:-''} +if [[ $validate == 'validate' ]] +then + exit 0 +fi + +gh_publish diff --git a/build/pyenv-setup b/build/pyenv-setup new file mode 100755 index 00000000..7759d45a --- /dev/null +++ b/build/pyenv-setup @@ -0,0 +1,111 @@ +#!/usr/bin/env bash + +unset PYENV_VERSION + +if [[ ! -d $PYENV_ROOT ]] +then + export PYENV_ROOT="$HOME/.pyenv" +fi + +declare -r PROJDIR="$PWD" +if [[ ! -s $PROJDIR/riak/__init__.py ]] +then + echo "[ERROR] script must be run from the clone of github.com/basho/riak-python-client" 1>&2 + exit 1 +fi + +rm -f $PROJDIR/.python-version + +# Install pyenv if it's missing +if [[ ! -d $PYENV_ROOT ]] +then + git clone 'https://github.com/yyuu/pyenv.git' $PYENV_ROOT +else + (cd $PYENV_ROOT && git fetch --all) +fi + +(cd $PYENV_ROOT && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))) + +declare -r pyenv_alias_dir="$PYENV_ROOT/plugins/pyenv-alias" +if [[ ! -d $pyenv_alias_dir ]] +then + git clone 'https://github.com/s1341/pyenv-alias.git' $pyenv_alias_dir +else + (cd $pyenv_alias_dir && git pull origin master) +fi + +# Add pyenv root to PATH +# and initialize pyenv +if [[ $PATH != */.pyenv* ]] +then + echo "[INFO] adding $PYENV_ROOT/bin to PATH" + export PATH="$PYENV_ROOT/bin:$PATH" +fi + +if [[ $(type -t pyenv) != 'function' ]] +then + echo "[INFO] init pyenv" + eval "$(pyenv init -)" +fi + +do_pip_upgrades='false' + +# NB: 2.7.8 is special-cased +for pyver in 2.7 3.3 3.4 3.5 3.6 +do + riak_py_alias="riak_$pyver" + if ! pyenv versions | fgrep -v 'riak_2.7.8' | fgrep -q "$riak_py_alias" + then + # Need to install it + do_pip_upgrades='true' + + declare -i pymaj="${pyver%.*}" + declare -i pymin="${pyver#*.}" + pyver_latest="$(pyenv install --list | grep -E "^[[:space:]]+$pymaj\\.$pymin\\.[[:digit:]]+\$" | tail -n1 | sed -e 's/[[:space:]]//g')" + + echo "[INFO] installing Python $pyver_latest" + VERSION_ALIAS="$riak_py_alias" pyenv install "$pyver_latest" + fi +done + +if ! pyenv versions | fgrep -q 'riak_2.7.8' +then + # Need to install it + do_pip_upgrades='true' + + echo "[INFO] installing Python 2.7.8" + VERSION_ALIAS='riak_2.7.8' pyenv install '2.7.8' +fi + +pushd $PROJDIR +pyenv local 'riak_3.6' 'riak_3.5' 'riak_3.4' 'riak_3.3' 'riak_2.7' 'riak_2.7.8' + +pyenv rehash + +if [[ $do_pip_upgrades == 'true' ]] +then + for PY in $(pyenv versions --bare --skip-aliases | grep '^riak_') + do + echo "[INFO] $PY - upgrading pip / setuptools" + PYENV_VERSION="$PY" pip install --upgrade pip setuptools + done +fi + +python_version="$(python --version)" +if [[ $python_version == Python\ 3* ]] +then + pip install --ignore-installed tox + if ! pip show --quiet tox + then + echo "[ERROR] install of 'tox' failed" 1>&2 + popd + exit 1 + fi + pyenv rehash +else + echo "[ERROR] expected Python 3 to be 'python' at this point" 1>&2 + popd + exit 1 +fi + +popd diff --git a/buildbot/Makefile b/buildbot/Makefile deleted file mode 100644 index e11c13f4..00000000 --- a/buildbot/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -RIAK_CONF = ${RIAK_DIR}/etc/riak.conf -# ADVANCED_CONF = ${RIAK_DIR}/etc/advanced.config -# RIAK = ${RIAK_DIR}/bin/riak -RIAK_ADMIN = ${RIAK_DIR}/bin/riak-admin -CERTS_DIR = $(shell pwd)/../riak/tests/resources - -preconfigure: - @../setup.py preconfigure --riak-conf=${RIAK_CONF} - -configure: - @../setup.py configure --riak-admin=${RIAK_ADMIN} - -compile: - -@yes y | pip uninstall riak-pb protobuf pyOpenSSL - @../setup.py develop - -lint: - @pip install --upgrade pep8 pyflakes - @cd ..; pep8 riak *.py - @cd ..; pyflakes riak *.py - @openssl verify -CAfile ${CERTS_DIR}/ca.crt ${CERTS_DIR}/client.crt - @openssl verify -CAfile ${CERTS_DIR}/ca.crt ${CERTS_DIR}/server.crt - -test: test_normal test_security - -test_normal: - @echo "Testing Riak Python Client (without security)" - @../setup.py disable_security --riak-admin=${RIAK_ADMIN} - @RUN_YZ=1 SKIP_DATATYPES=0 SKIP_INDEXES=0 ../setup.py test - -test_security: - @echo "Testing Riak Python Client (with security)" - @../setup.py enable_security --riak-admin=${RIAK_ADMIN} - (cd ..; RUN_YZ=1 SKIP_INDEXES=0 RUN_SECURITY=1 SKIP_POOL=1 SKIP_RESOLVE=1 RIAK_TEST_HTTP_PORT=18098 ./setup.py test) diff --git a/commands.py b/commands.py index e90f074c..a20557ab 100644 --- a/commands.py +++ b/commands.py @@ -1,18 +1,30 @@ -""" -distutils commands for riak-python-client -""" - -__all__ = ['create_bucket_types', 'setup_security', 'enable_security', - 'disable_security', 'preconfigure', 'configure'] +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import os +import os.path +import re -from distutils import log from distutils.core import Command from distutils.errors import DistutilsOptionError +from distutils.file_util import write_file +from distutils import log from subprocess import Popen, PIPE -from string import Template -import shutil -import re -import os.path + + +__all__ = ['build_messages', 'setup_timeseries'] # Exception classes used by this module. @@ -67,39 +79,14 @@ def check_output(*popenargs, **kwargs): raise CalledProcessError(retcode, cmd, output=output) return output + try: import simplejson as json except ImportError: import json -class create_bucket_types(Command): - """ - Creates bucket-types appropriate for testing. By default this will create: - - * `pytest-maps` with ``{"datatype":"map"}`` - * `pytest-sets` with ``{"datatype":"set"}`` - * `pytest-counters` with ``{"datatype":"counter"}`` - * `pytest-consistent` with ``{"consistent":true}`` - * `pytest-mr` - * `pytest` with ``{"allow_mult":false}`` - """ - - description = "create bucket-types used in integration tests" - - user_options = [ - ('riak-admin=', None, 'path to the riak-admin script') - ] - - _props = { - 'pytest-maps': {'datatype': 'map'}, - 'pytest-sets': {'datatype': 'set'}, - 'pytest-counters': {'datatype': 'counter'}, - 'pytest-consistent': {'consistent': True}, - 'pytest-mr': {}, - 'pytest': {'allow_mult': False} - } - +class bucket_type_commands: def initialize_options(self): self.riak_admin = None @@ -169,300 +156,250 @@ def _btype_command(self, *args): return cmd -class security_commands(object): - def check_security_command(self, *args): - cmd = self._security_command(*args) - return self.check_output(cmd) - - def run_security_command(self, *args): - self.spawn(self._security_command(*args)) - - def _security_command(self, *args): - cmd = [self.riak_admin, "security"] - if isinstance(args, tuple): - for elem in args: - cmd.extend(elem) - else: - cmd.extend(args) - return cmd - - def check_output(self, *args, **kwargs): - if self.dry_run: - log.info(' '.join(args)) - return bytearray() - else: - return check_output(*args, **kwargs) - - -class setup_security(Command, security_commands): +class setup_timeseries(bucket_type_commands, Command): """ - Sets up security for testing. By default this will create: - - * User `testuser` with password `testpassword` - * User `certuser` with password `certpass` - * Two security sources - * Permissions on - * riak_kv.get - * riak_kv.put - * riak_kv.delete - * riak_kv.index - * riak_kv.list_keys - * riak_kv.list_buckets - * riak_kv.mapreduce - * riak_core.get_bucket - * riak_core.set_bucket - * riak_core.get_bucket_type - * riak_core.set_bucket_type - * search.admin - * search.query + Creates bucket-types appropriate for timeseries. """ - description = "create security settings used in integration tests" + description = "create bucket-types used in timeseries tests" user_options = [ - ('riak-admin=', None, 'path to the riak-admin script'), - ('username=', None, 'test user account'), - ('password=', None, 'password for test user account'), - ('certuser=', None, 'certificate test user account'), - ('certpass=', None, 'password for certificate test user account') - ] - - _commands = [ - "add-user $USERNAME password=$PASSWORD", - "add-source $USERNAME 127.0.0.1/32 password", - "add-user $CERTUSER password=$CERTPASS", - "add-source $CERTUSER 127.0.0.1/32 certificate" + ('riak-admin=', None, 'path to the riak-admin script') ] - _grants = { - "riak_kv.get": ["any"], - "riak_kv.put": ["any"], - "riak_kv.delete": ["any"], - "riak_kv.index": ["any"], - "riak_kv.list_keys": ["any"], - "riak_kv.list_buckets": ["any"], - "riak_kv.mapreduce": ["any"], - "riak_core.get_bucket": ["any"], - "riak_core.set_bucket": ["any"], - "riak_core.get_bucket_type": ["any"], - "riak_core.set_bucket_type": ["any"], - "search.admin": ["index", "schema"], - "search.query": ["index", "schema"] + _props = { + 'GeoCheckin': { + 'n_val': 3, + 'table_def': ''' + CREATE TABLE GeoCheckin ( + geohash varchar not null, + user varchar not null, + time timestamp not null, + weather varchar not null, + temperature double, + PRIMARY KEY( + (geohash, user, quantum(time, 15, m)), + geohash, user, time + ) + )''' + } } - def initialize_options(self): - self.riak_admin = None - self.username = None - self.password = None - self.certuser = None - self.certpass = None - - def finalize_options(self): - if self.riak_admin is None: - raise DistutilsOptionError("riak-admin option not set") - if self.username is None: - self.username = 'testuser' - if self.password is None: - self.password = 'testpassword' - if self.certuser is None: - self.certuser = 'certuser' - if self.certpass is None: - self.certpass = 'certpass' - - def run(self): - if self._check_available(): - for cmd in self._commands: - # Replace the username and password if specified - s = Template(cmd) - newcmd = s.substitute(USERNAME=self.username, - PASSWORD=self.password, - CERTUSER=self.certuser, - CERTPASS=self.certpass) - log.info("Security command: {0}".format(repr(newcmd))) - self.run_security_command(tuple(newcmd.split(' '))) - for perm in self._grants: - self._apply_grant(perm, self._grants[perm]) - def _check_available(self): +class ComparableMixin(object): + def _compare(self, other, method): try: - self.check_security_command("status") - return True - except CalledProcessError: - log.error("Security is not supported on this Riak node!") - return False + return method(self._cmpkey(), other._cmpkey()) + except (AttributeError, TypeError): + # _cmpkey not implemented, or return different type, + # so I can't compare with "other". + return NotImplemented - def _apply_grant(self, perm, targets): - for target in targets: - cmd = ["grant", perm, "on", target, "to", self.username] - log.info("Granting permission {0} on {1} to {2}" - .format(repr(perm), repr(target), repr(self.username))) - self.run_security_command(cmd) - cmd = ["grant", perm, "on", target, "to", self.certuser] - log.info("Granting permission {0} on {1} to {2}" - .format(repr(perm), repr(target), repr(self.certuser))) - self.run_security_command(cmd) + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) -class enable_security(Command, security_commands): - """ - Actually turn on security. - """ - description = "turn on security within Riak" + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) - user_options = [ - ('riak-admin=', None, 'path to the riak-admin script'), - ] + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) - def initialize_options(self): - self.riak_admin = None + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) - def finalize_options(self): - if self.riak_admin is None: - raise DistutilsOptionError("riak-admin option not set") + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) - def run(self): - cmd = "enable" - self.run_security_command(tuple(cmd.split(' '))) +class MessageCodeMapping(ComparableMixin): + def __init__(self, code, message, proto): + self.code = int(code) + self.message = message + self.proto = proto + self.message_code_name = self._message_code_name() + self.module_name = 'riak.pb.{0}_pb2'.format(self.proto) + self.message_class = self._message_class() -class disable_security(Command, security_commands): - """ - Actually turn off security. - """ - description = "turn off security within Riak" + def _cmpkey(self): + return self.code - user_options = [ - ('riak-admin=', None, 'path to the riak-admin script'), - ] + def __hash__(self): + return self.code - def initialize_options(self): - self.riak_admin = None + def _message_code_name(self): + strip_rpb = re.sub(r"^Rpb", "", self.message) + word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', strip_rpb) + word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word) + word = word.replace("-", "_") + return "MSG_CODE_" + word.upper() - def finalize_options(self): - if self.riak_admin is None: - raise DistutilsOptionError("riak-admin option not set") - - def run(self): - cmd = "disable" - self.run_security_command(tuple(cmd.split(' '))) - - -class preconfigure(Command): + def _message_class(self): + try: + pbmod = __import__(self.module_name, globals(), locals(), + [self.message]) + klass = pbmod.__dict__[self.message] + return klass + except KeyError: + log.warn("Did not find '%s' message class in module '%s'", + self.message, self.module_name) + except ImportError as e: + log.error("Could not import module '%s', exception: %s", + self.module_name, e) + raise + return None + + +# NOTE: TO RUN THIS SUCCESSFULLY, YOU NEED TO HAVE THESE +# PACKAGES INSTALLED: +# protobuf or python3_protobuf +# six +# +# Run the following command to install them: +# python setup.py install +# +# TO DEBUG: Set DISTUTILS_DEBUG=1 in the environment or run as +# 'python setup.py -vv build_messages' +class build_messages(Command): """ - Sets up security configuration. - - * Update these lines in riak.conf - * storage_backend = leveldb - * search = on - * listener.protobuf.internal = 127.0.0.1:8087 - * listener.http.internal = 127.0.0.1:8098 - * listener.https.internal = 127.0.0.1:18098 - * ssl.certfile = $pwd/tests/resources/server.crt - * ssl.keyfile = $pwd/tests/resources/server.key - * ssl.cacertfile = $pwd/tests/resources/ca.crt - * check_crl = off + Generates message code mappings. Add to the build process using:: + + setup(cmd_class={'build_messages': build_messages}) """ - description = "preconfigure security settings used in integration tests" + description = "generate protocol message code mappings" user_options = [ - ('riak-conf=', None, 'path to the riak.conf file'), - ('host=', None, 'IP of host running Riak'), - ('pb-port=', None, 'protocol buffers port number'), - ('https-port=', None, 'https port number') + ('source=', None, 'source CSV file containing message code mappings'), + ('destination=', None, 'destination Python source file') ] - def initialize_options(self): - self.riak_conf = None - self.host = "127.0.0.1" - self.pb_port = "8087" - self.http_port = "8098" - self.https_port = "18098" - - def finalize_options(self): - if self.riak_conf is None: - raise DistutilsOptionError("riak-conf option not set") - - def run(self): - self.cert_dir = os.path.dirname(os.path.realpath(__file__)) + \ - "/riak/tests/resources" - self._update_riak_conf() - - def _update_riak_conf(self): - http_host = self.host + ':' + self.http_port - https_host = self.host + ':' + self.https_port - pb_host = self.host + ':' + self.pb_port - self._backup_file(self.riak_conf) - f = open(self.riak_conf, 'r', buffering=1) - conf = f.read() - f.close() - conf = re.sub(r'search\s+=\s+off', r'search = on', conf) - conf = re.sub(r'##[ ]+ssl\.', r'ssl.', conf) - conf = re.sub(r'ssl.certfile\s+=\s+\S+', - r'ssl.certfile = ' + self.cert_dir + '/server.crt', - conf) - conf = re.sub(r'storage_backend\s+=\s+\S+', - r'storage_backend = leveldb', - conf) - conf = re.sub(r'ssl.keyfile\s+=\s+\S+', - r'ssl.keyfile = ' + self.cert_dir + '/server.key', - conf) - conf = re.sub(r'ssl.cacertfile\s+=\s+\S+', - r'ssl.cacertfile = ' + self.cert_dir + - '/ca.crt', - conf) - conf = re.sub(r'#*[ ]*listener.http.internal\s+=\s+\S+', - r'listener.http.internal = ' + http_host, - conf) - conf = re.sub(r'#*[ ]*listener.https.internal\s+=\s+\S+', - r'listener.https.internal = ' + https_host, - conf) - conf = re.sub(r'listener.protobuf.internal\s+=\s+\S+', - r'listener.protobuf.internal = ' + pb_host, - conf) - conf += 'check_crl = off\n' - f = open(self.riak_conf, 'w', buffering=1) - f.write(conf) - f.close() - - def _backup_file(self, name): - backup = name + ".bak" - if os.path.isfile(name): - shutil.copyfile(name, backup) - else: - log.info("Cannot backup missing file {0}".format(repr(name))) - - -class configure(Command): - """ - Sets up security configuration. - - * Run setup_security and create_bucket_types - """ - - description = "create bucket types and security settings for testing" - - user_options = create_bucket_types.user_options + \ - setup_security.user_options + # Used in loading and generating + _pb_imports = set() + _messages = set() + _linesep = os.linesep + _indented_item_sep = ',{0} '.format(_linesep) + + _docstring = [ + '' + '# This is a generated file. DO NOT EDIT.', + '', + '"""', + 'Constants and mappings between Riak protocol codes and messages.', + '"""', + '' + ] def initialize_options(self): - self.riak_admin = None - self.username = None - self.password = None + self.source = None + self.destination = None + self.update_import = None def finalize_options(self): - bucket = self.distribution.get_command_obj('create_bucket_types') - bucket.riak_admin = self.riak_admin - security = self.distribution.get_command_obj('setup_security') - security.riak_admin = self.riak_admin - security.username = self.username - security.password = self.password + if self.source is None: + self.source = 'riak_pb/src/riak_pb_messages.csv' + if self.destination is None: + self.destination = 'riak/pb/messages.py' def run(self): - # Run all relevant sub-commands. - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - sub_commands = [('create_bucket_types', None), - ('setup_security', None) - ] + self.force = True + self.make_file(self.source, self.destination, + self._load_and_generate, []) + + def _load_and_generate(self): + self._format_python2_or_3() + self._load() + self._generate() + + def _load(self): + with open(self.source, 'r', buffering=1) as csvfile: + reader = csv.reader(csvfile) + for row in reader: + message = MessageCodeMapping(*row) + self._messages.add(message) + self._pb_imports.add(message.module_name) + + def _generate(self): + self._contents = [] + self._generate_doc() + self._generate_imports() + self._generate_codes() + self._generate_classes() + write_file(self.destination, self._contents) + + def _generate_doc(self): + # Write the license and docstring header + self._contents.extend(self._docstring) + + def _generate_imports(self): + # Write imports + for im in sorted(self._pb_imports): + self._contents.append("import {0}".format(im)) + + def _generate_codes(self): + # Write protocol code constants + self._contents.extend(['', "# Protocol codes"]) + for message in sorted(self._messages): + self._contents.append("{0} = {1}".format(message.message_code_name, + message.code)) + + def _generate_classes(self): + # Write message classes + classes = [self._generate_mapping(message) + for message in sorted(self._messages)] + + classes = self._indented_item_sep.join(classes) + self._contents.extend(['', + "# Mapping from code to protobuf class", + 'MESSAGE_CLASSES = {', + ' ' + classes, + '}']) + + def _generate_mapping(self, m): + if m.message_class is not None: + klass = "{0}.{1}".format(m.module_name, + m.message_class.__name__) + else: + klass = "None" + pair = "{0}: {1}".format(m.message_code_name, klass) + if len(pair) > 76: + # Try to satisfy PEP8, lulz + pair = (self._linesep + ' ').join(pair.split(' ')) + return pair + + def _format_python2_or_3(self): + """ + Change the PB files to use full pathnames for Python 3.x + and modify the metaclasses to be version agnostic + """ + pb_files = set() + with open(self.source, 'r', buffering=1) as csvfile: + reader = csv.reader(csvfile) + for row in reader: + _, _, proto = row + pb_files.add('riak/pb/{0}_pb2.py'.format(proto)) + + for im in sorted(pb_files): + with open(im, 'r', buffering=1) as pbfile: + contents = 'from six import *\n' + pbfile.read() + contents = re.sub(r'riak_pb2', + r'riak.pb.riak_pb2', + contents) + # Look for this pattern in the protoc-generated file: + # + # class RpbCounterGetResp(_message.Message): + # __metaclass__ = _reflection.GeneratedProtocolMessageType + # + # and convert it to: + # + # @add_metaclass(_reflection.GeneratedProtocolMessageType) + # class RpbCounterGetResp(_message.Message): + contents = re.sub( + r'class\s+(\S+)\((\S+)\):\s*\n' + '\s+__metaclass__\s+=\s+(\S+)\s*\n', + r'@add_metaclass(\3)\nclass \1(\2):\n', contents) + + with open(im, 'w', buffering=1) as pbfile: + pbfile.write(contents) diff --git a/docs b/docs new file mode 160000 index 00000000..f8f1ae3b --- /dev/null +++ b/docs @@ -0,0 +1 @@ +Subproject commit f8f1ae3b2b8258ed494dec9530683fe29b381cf9 diff --git a/docs/Makefile b/docsrc/Makefile similarity index 100% rename from docs/Makefile rename to docsrc/Makefile diff --git a/docs/_templates/layout.html b/docsrc/_templates/layout.html similarity index 100% rename from docs/_templates/layout.html rename to docsrc/_templates/layout.html diff --git a/docs/advanced.rst b/docsrc/advanced.rst similarity index 86% rename from docs/advanced.rst rename to docsrc/advanced.rst index 523b465d..fe355f88 100644 --- a/docs/advanced.rst +++ b/docsrc/advanced.rst @@ -12,14 +12,17 @@ Connection pool .. currentmodule:: riak.transports.pool -.. autoexception:: BadResource .. autoclass:: Resource :members: + .. autoclass:: Pool :members: .. autoclass:: PoolIterator +.. autoexception:: BadResource +.. autoexception:: ConnectionClosed + ----------- Retry logic ----------- @@ -36,24 +39,29 @@ Retry logic .. autofunction:: retryableHttpOnly --------- -Multiget --------- +------------------- +Multiget / Multiput +------------------- -.. currentmodule:: riak.client.multiget +.. currentmodule:: riak.client.multi .. autodata:: POOL_SIZE .. autoclass:: Task +.. autoclass:: PutTask .. autoclass:: MultiGetPool :members: :private-members: -.. autodata:: RIAK_MULTIGET_POOL - .. autofunction:: multiget +.. autoclass:: MultiPutPool + :members: + :private-members: + +.. autofunction:: multiput + --------- Datatypes --------- @@ -93,7 +101,7 @@ Transports .. currentmodule:: riak.transports.transport -.. autoclass:: RiakTransport +.. autoclass:: Transport :members: :private-members: @@ -124,20 +132,24 @@ HTTP Transport .. currentmodule:: riak.transports.http -.. autoclass:: RiakHttpPool +.. autoclass:: HttpPool .. autofunction:: is_retryable -.. autoclass:: RiakHttpTransport +.. autoclass:: HttpTransport :members: -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Protocol Buffers Transport -^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^ +TCP Transport +^^^^^^^^^^^^^ -.. currentmodule:: riak.transports.pbc +.. currentmodule:: riak.transports.tcp + +.. autoclass:: TcpPool + +.. autofunction:: is_retryable -.. autoclass:: RiakPbcTransport +.. autoclass:: TcpTransport :members: --------- diff --git a/docs/bucket.rst b/docsrc/bucket.rst similarity index 100% rename from docs/bucket.rst rename to docsrc/bucket.rst diff --git a/docs/client.rst b/docsrc/client.rst similarity index 95% rename from docs/client.rst rename to docsrc/client.rst index edf9d14a..f014afd9 100644 --- a/docs/client.rst +++ b/docsrc/client.rst @@ -123,6 +123,17 @@ Key-level Operations .. automethod:: RiakClient.fetch_datatype .. automethod:: RiakClient.update_datatype +-------------------- +Timeseries Operations +-------------------- + +.. automethod:: RiakClient.ts_describe +.. automethod:: RiakClient.ts_get +.. automethod:: RiakClient.ts_put +.. automethod:: RiakClient.ts_delete +.. automethod:: RiakClient.ts_query +.. automethod:: RiakClient.ts_stream_keys + ---------------- Query Operations ---------------- diff --git a/docs/conf.py b/docsrc/conf.py similarity index 93% rename from docs/conf.py rename to docsrc/conf.py index 57d39b2e..7b5cb000 100644 --- a/docs/conf.py +++ b/docsrc/conf.py @@ -1,3 +1,17 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # -*- coding: utf-8 -*- # # Riak (Python binding) documentation build configuration file, created by diff --git a/docs/datatypes.rst b/docsrc/datatypes.rst similarity index 100% rename from docs/datatypes.rst rename to docsrc/datatypes.rst diff --git a/docs/index.rst b/docsrc/index.rst similarity index 100% rename from docs/index.rst rename to docsrc/index.rst diff --git a/docs/make.bat b/docsrc/make.bat similarity index 100% rename from docs/make.bat rename to docsrc/make.bat diff --git a/docs/object.rst b/docsrc/object.rst similarity index 100% rename from docs/object.rst rename to docsrc/object.rst diff --git a/docs/query.rst b/docsrc/query.rst similarity index 100% rename from docs/query.rst rename to docsrc/query.rst diff --git a/docs/security.rst b/docsrc/security.rst similarity index 100% rename from docs/security.rst rename to docsrc/security.rst diff --git a/make.ps1 b/make.ps1 new file mode 100644 index 00000000..6d3c4181 --- /dev/null +++ b/make.ps1 @@ -0,0 +1,20 @@ +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +$env:RIAK_TEST_HOST = 'riak-test' +$env:RIAK_TEST_PROTOCOL = 'pbc' +$env:RIAK_TEST_PB_PORT = 10017 +$env:RUN_DATATYPES = 1 +$env:RUN_INDEXES = 1 +$env:RUN_POOL = 1 +$env:RUN_YZ = 1 + +flake8 --exclude=riak/pb riak commands.py setup.py version.py +if ($LastExitCode -ne 0) { + throw 'flake8 failed!' +} + +python setup.py test +if ($LastExitCode -ne 0) { + throw 'python tests failed!' +} diff --git a/riak/__init__.py b/riak/__init__.py index 3806af49..306cf7a0 100644 --- a/riak/__init__.py +++ b/riak/__init__.py @@ -1,69 +1,46 @@ -""" -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. ---- +""" The Riak API for Python allows you to connect to a Riak instance, create, modify, and delete Riak objects, add and remove links from Riak objects, run Javascript (and Erlang) based Map/Reduce operations, and run Linkwalking operations. - -See the unit_tests.py file for example usage. - -@author Rusty Klophaus (@rklophaus) (rusty@basho.com) -@author Andy Gross (@argv0) (andy@basho.com) -@author Jon Meredith (@jmeredith) (jmeredith@basho.com) -@author Jay Baird (@skatterbean) (jay@mochimedia.com) """ -__all__ = ['RiakBucket', 'BucketType', 'RiakNode', 'RiakObject', 'RiakClient', - 'RiakMapReduce', 'RiakKeyFilter', 'RiakLink', 'RiakError', - 'ConflictError', 'ONE', 'ALL', 'QUORUM', 'key_filter'] - - -class RiakError(Exception): - """ - Base class for exceptions generated in the Riak API. - """ - def __init__(self, value): - self.value = value - - def __str__(self): - return repr(self.value) - - -class ConflictError(RiakError): - """ - Raised when an operation is attempted on a - :class:`~riak.riak_object.RiakObject` that has more than one - sibling. - """ - def __init__(self, message="Object in conflict"): - super(ConflictError, self).__init__(message) - - +from riak.riak_error import RiakError, ConflictError, ListError from riak.client import RiakClient from riak.bucket import RiakBucket, BucketType +from riak.table import Table from riak.node import RiakNode from riak.riak_object import RiakObject from riak.mapreduce import RiakKeyFilter, RiakMapReduce, RiakLink + +__all__ = ['RiakBucket', 'Table', 'BucketType', 'RiakNode', + 'RiakObject', 'RiakClient', 'RiakMapReduce', 'RiakKeyFilter', + 'RiakLink', 'RiakError', 'ConflictError', 'ListError', + 'ONE', 'ALL', 'QUORUM', 'key_filter', + 'disable_list_exceptions'] + ONE = "one" ALL = "all" QUORUM = "quorum" key_filter = RiakKeyFilter() + +""" +Set to true to allow listing operations +""" +disable_list_exceptions = False diff --git a/riak/benchmark.py b/riak/benchmark.py index 13286100..e1f3e55c 100644 --- a/riak/benchmark.py +++ b/riak/benchmark.py @@ -1,24 +1,23 @@ -""" -Copyright 2013 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function + import os import gc +import sys +import traceback __all__ = ['measure', 'measure_with_rehearsal'] @@ -172,5 +171,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): elif exc_type is KeyboardInterrupt: return False else: - print("EXCEPTION! %r" % ((exc_type, exc_val, exc_tb),)) - return True + msg = "EXCEPTION! type: %r val: %r" % (exc_type, exc_val) + print(msg, file=sys.stderr) + traceback.print_tb(exc_tb) + return True if exc_type is None else False diff --git a/riak/benchmarks/multiget.py b/riak/benchmarks/multiget.py new file mode 100644 index 00000000..87a97a6a --- /dev/null +++ b/riak/benchmarks/multiget.py @@ -0,0 +1,65 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import binascii +import os + +import riak.benchmark as benchmark + +from riak import RiakClient +from multiprocessing import cpu_count + +nodes = [ + ('riak-test', 8098, 8087), + # ('riak-test', 10018, 10017), + # ('riak-test', 10028, 10027), + # ('riak-test', 10038, 10037), + # ('riak-test', 10048, 10047), + # ('riak-test', 10058, 10057), +] +client = RiakClient( + nodes=nodes, + protocol='pbc', + multiget_pool_size=128) + +bkeys = [('default', 'multiget', str(key)) for key in range(10000)] + +data = binascii.b2a_hex(os.urandom(1024)) + +print("Benchmarking multiget:") +print(" CPUs: {0}".format(cpu_count())) +print(" Threads: {0}".format(client._multiget_pool._size)) +print(" Keys: {0}".format(len(bkeys))) +print() + +with benchmark.measure() as b: + with b.report('populate'): + for _, bucket, key in bkeys: + client.bucket(bucket).new(key, encoded_data=data, + content_type='text/plain' + ).store() +for b in benchmark.measure_with_rehearsal(): + # client.protocol = 'http' + # with b.report('http seq'): + # for _, bucket, key in bkeys: + # client.bucket(bucket).get(key) + # with b.report('http multi'): + # client.multiget(bkeys) + + client.protocol = 'pbc' + with b.report('pbc seq'): + for _, bucket, key in bkeys: + client.bucket(bucket).get(key) + with b.report('pbc multi'): + client.multiget(bkeys) diff --git a/riak/benchmarks/timeseries.py b/riak/benchmarks/timeseries.py new file mode 100644 index 00000000..5d0f89c3 --- /dev/null +++ b/riak/benchmarks/timeseries.py @@ -0,0 +1,88 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import random +import sys + +import riak.benchmark as benchmark + +from multiprocessing import cpu_count +from riak import RiakClient + +# logger = logging.getLogger() +# logger.level = logging.DEBUG +# logger.addHandler(logging.StreamHandler(sys.stdout)) + +# batch sizes 8, 16, 32, 64, 128, 256 +if len(sys.argv) != 3: + raise AssertionError( + 'first arg is batch size, second arg is true / false' + 'for use_ttb') + +rowcount = 32768 +batchsz = int(sys.argv[1]) +if rowcount % batchsz != 0: + raise AssertionError('rowcount must be divisible by batchsz') +use_ttb = sys.argv[2].lower() == 'true' + +epoch = datetime.datetime.utcfromtimestamp(0) +onesec = datetime.timedelta(0, 1) + +weather = ['typhoon', 'hurricane', 'rain', 'wind', 'snow'] +rows = [] +for i in range(rowcount): + ts = datetime.datetime(2016, 1, 1, 12, 0, 0) + \ + datetime.timedelta(seconds=i) + family_idx = i % batchsz + series_idx = i % batchsz + family = 'hash{:d}'.format(family_idx) + series = 'user{:d}'.format(series_idx) + w = weather[i % len(weather)] + temp = (i % 100) + random.random() + row = [family, series, ts, w, temp] + key = [family, series, ts] + rows.append(row) + +print("Benchmarking timeseries:") +print(" Use TTB: {}".format(use_ttb)) +print("Batch Size: {}".format(batchsz)) +print(" CPUs: {}".format(cpu_count())) +print(" Rows: {}".format(len(rows))) +print() + +tbl = 'GeoCheckin' +h = 'riak-test' +n = [ + {'host': h, 'pb_port': 10017}, + {'host': h, 'pb_port': 10027}, + {'host': h, 'pb_port': 10037}, + {'host': h, 'pb_port': 10047}, + {'host': h, 'pb_port': 10057} +] +client = RiakClient(nodes=n, protocol='pbc', + transport_options={'use_ttb': use_ttb}) +table = client.table(tbl) + +with benchmark.measure() as b: + for i in (1, 2, 3): + with b.report('populate-%d' % i): + for i in range(0, rowcount, batchsz): + x = i + y = i + batchsz + r = rows[x:y] + ts_obj = table.new(r) + result = ts_obj.store() + if result is not True: + raise AssertionError("expected success") diff --git a/riak/bucket.py b/riak/bucket.py index d7bbd9fb..7dde7351 100644 --- a/riak/bucket.py +++ b/riak/bucket.py @@ -1,25 +1,24 @@ -""" -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010 Rusty Klophaus +# Copyright 2010 Justin Sheehy +# Copyright 2009 Jay Baird +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from six import string_types, PY2 import mimetypes from riak.util import lazy_property +from riak.datatypes import TYPES def bucket_property(name, doc=None): @@ -51,12 +50,13 @@ def __init__(self, client, name, bucket_type): :param bucket_type: The parent bucket type of this bucket :type bucket_type: :class:`BucketType` """ + + if not isinstance(name, string_types): + raise TypeError('Bucket name must be a string') + if PY2: try: - if isinstance(name, string_types): - name = name.encode('ascii') - else: - raise TypeError('Bucket name must be a string') + name = name.encode('ascii') except UnicodeError: raise TypeError('Unicode bucket names are not supported.') @@ -172,6 +172,7 @@ def new(self, key=None, data=None, content_type='application/json', :class:`~riak.datatypes.Datatype` """ + from riak import RiakObject if self.bucket_type.datatype: return TYPES[self.bucket_type.datatype](bucket=self, key=key) @@ -191,9 +192,9 @@ def new(self, key=None, data=None, content_type='application/json', return obj def get(self, key, r=None, pr=None, timeout=None, include_context=None, - basic_quorum=None, notfound_ok=None): + basic_quorum=None, notfound_ok=None, head_only=False): """ - Retrieve an :class:`~riak.riak_object.RiakObject` or + Retrieve a :class:`~riak.riak_object.RiakObject` or :class:`~riak.datatypes.Datatype`, based on the presence and value of the :attr:`datatype ` bucket property. @@ -213,10 +214,14 @@ def get(self, key, r=None, pr=None, timeout=None, include_context=None, :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool + :param head_only: whether to fetch without value, so only metadata + (only available on PB transport) + :type head_only: bool :rtype: :class:`RiakObject ` or :class:`~riak.datatypes.Datatype` """ + from riak import RiakObject if self.bucket_type.datatype: return self._client.fetch_datatype(self, key, r=r, pr=pr, timeout=timeout, @@ -227,10 +232,12 @@ def get(self, key, r=None, pr=None, timeout=None, include_context=None, obj = RiakObject(self._client, self, key) return obj.reload(r=r, pr=pr, timeout=timeout, basic_quorum=basic_quorum, - notfound_ok=notfound_ok) + notfound_ok=notfound_ok, + head_only=head_only) def multiget(self, keys, r=None, pr=None, timeout=None, - basic_quorum=None, notfound_ok=None): + basic_quorum=None, notfound_ok=None, + head_only=False): """ Retrieves a list of keys belonging to this bucket in parallel. @@ -247,6 +254,9 @@ def multiget(self, keys, r=None, pr=None, timeout=None, :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool + :param head_only: whether to fetch without value, so only metadata + (only available on PB transport) + :type head_only: bool :rtype: list of :class:`RiakObjects `, :class:`Datatypes `, or tuples of bucket_type, bucket, key, and the exception raised on fetch @@ -254,7 +264,8 @@ def multiget(self, keys, r=None, pr=None, timeout=None, bkeys = [(self.bucket_type.name, self.name, key) for key in keys] return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout, basic_quorum=basic_quorum, - notfound_ok=notfound_ok) + notfound_ok=notfound_ok, + head_only=head_only) def _get_resolver(self): if callable(self._resolver): @@ -406,7 +417,9 @@ def new_from_file(self, key, filename): :type filename: string :rtype: :class:`RiakObject ` """ - binary_data = open(filename, "rb").read() + binary_data = None + with open(filename, 'rb') as f: + binary_data = f.read() mimetype, encoding = mimetypes.guess_type(filename) if encoding: binary_data = bytearray(binary_data, encoding) @@ -582,6 +595,16 @@ def update_counter(self, key, value, **kwargs): increment_counter = update_counter + def get_preflist(self, key): + """ + Retrieve the preflist associated with a given bucket/key + + :param key: Name of the key. + :type key: string + :rtype: list of dict() + """ + return self._client.get_preflist(self, key) + def __str__(self): if self.bucket_type.is_default(): return ''.format(self.name) @@ -736,7 +759,3 @@ def __ne__(self, other): return hash(self) != hash(other) else: return True - - -from riak.riak_object import RiakObject -from riak.datatypes import TYPES diff --git a/riak/client/__init__.py b/riak/client/__init__.py index 1d7cfa68..7015b48f 100644 --- a/riak/client/__init__.py +++ b/riak/client/__init__.py @@ -1,23 +1,16 @@ -""" -Copyright 2011 Basho Technologies, Inc. -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: import simplejson as json @@ -25,17 +18,20 @@ import json import random + from weakref import WeakValueDictionary from riak.client.operations import RiakClientOperations from riak.node import RiakNode from riak.bucket import RiakBucket, BucketType from riak.mapreduce import RiakMapReduceChain from riak.resolver import default_resolver -from riak.transports.http import RiakHttpPool -from riak.transports.pbc import RiakPbcPool +from riak.table import Table +from riak.transports.http import HttpPool +from riak.transports.tcp import TcpPool from riak.security import SecurityCreds from riak.util import lazy_property, bytes_to_str, str_to_bytes from six import string_types, PY2 +from riak.client.multi import MultiGetPool, MultiPutPool def default_encoder(obj): @@ -86,8 +82,10 @@ class RiakClient(RiakMapReduceChain, RiakClientOperations): #: The supported protocols PROTOCOLS = ['http', 'pbc'] - def __init__(self, protocol='pbc', transport_options={}, nodes=None, - credentials=None, multiget_pool_size=None, **unused_args): + def __init__(self, protocol='pbc', transport_options={}, + nodes=None, credentials=None, + multiget_pool_size=None, multiput_pool_size=None, + **kwargs): """ Construct a new ``RiakClient`` object. @@ -106,20 +104,26 @@ def __init__(self, protocol='pbc', transport_options={}, nodes=None, :meth:`multiget` operations. Defaults to a factor of the number of CPUs in the system :type multiget_pool_size: int + :param multiput_pool_size: the number of threads to use in + :meth:`multiput` operations. Defaults to a factor of the number of + CPUs in the system + :type multiput_pool_size: int """ - unused_args = unused_args.copy() + kwargs = kwargs.copy() if nodes is None: - self.nodes = [self._create_node(unused_args), ] + self.nodes = [self._create_node(kwargs), ] else: self.nodes = [self._create_node(n) for n in nodes] self._multiget_pool_size = multiget_pool_size + self._multiput_pool_size = multiput_pool_size self.protocol = protocol or 'pbc' self._resolver = None self._credentials = self._create_credentials(credentials) - self._http_pool = RiakHttpPool(self, **transport_options) - self._pb_pool = RiakPbcPool(self, **transport_options) + self._http_pool = HttpPool(self, **transport_options) + self._tcp_pool = TcpPool(self, **transport_options) + self._closed = False if PY2: self._encoders = {'application/json': default_encoder, @@ -139,6 +143,10 @@ def __init__(self, protocol='pbc', transport_options={}, nodes=None, 'binary/octet-stream': binary_encoder_decoder} self._buckets = WeakValueDictionary() self._bucket_types = WeakValueDictionary() + self._tables = WeakValueDictionary() + + def __del__(self): + self.close() def _get_protocol(self): return self._protocol @@ -185,7 +193,7 @@ def _get_client_id(self): def _set_client_id(self, client_id): for http in self._http_pool: http.client_id = client_id - for pb in self._pb_pool: + for pb in self._tcp_pool: pb.client_id = client_id client_id = property(_get_client_id, _set_client_id, @@ -267,8 +275,9 @@ def bucket(self, name, bucket_type='default'): raise TypeError('bucket_type must be a string ' 'or riak.bucket.BucketType') - return self._buckets.setdefault((bucket_type, name), - RiakBucket(self, name, bucket_type)) + b = RiakBucket(self, name, bucket_type) + return self._setdefault_handle_none( + self._buckets, (bucket_type, name), b) def bucket_type(self, name): """ @@ -276,28 +285,58 @@ def bucket_type(self, name): not always exist (unlike buckets), but this will always return a :class:`BucketType ` object. - :param name: the bucket name + :param name: the bucket-type name :type name: str :rtype: :class:`BucketType ` """ if not isinstance(name, string_types): - raise TypeError('Bucket name must be a string') + raise TypeError('BucketType name must be a string') + + btype = BucketType(self, name) + return self._setdefault_handle_none( + self._bucket_types, name, btype) + + def table(self, name): + """ + Gets the table by the specified name. Tables do + not always exist (unlike buckets), but this will always return + a :class:`Table ` object. + + :param name: the table name + :type name: str + :rtype: :class:`Table ` + """ + if not isinstance(name, string_types): + raise TypeError('Table name must be a string') - if name in self._bucket_types: - return self._bucket_types[name] + if name in self._tables: + return self._tables[name] else: - btype = BucketType(self, name) - self._bucket_types[name] = btype - return btype + table = Table(self, name) + self._tables[name] = table + return table def close(self): """ Iterate through all of the connections and close each one. """ - if self._http_pool is not None: - self._http_pool.clear() - if self._pb_pool is not None: - self._pb_pool.clear() + if not self._closed: + self._closed = True + self._stop_multi_pools() + if self._http_pool is not None: + self._http_pool.clear() + self._http_pool = None + if self._tcp_pool is not None: + self._tcp_pool.clear() + self._tcp_pool = None + + def _stop_multi_pools(self): + if self._multiget_pool: + self._multiget_pool.stop() + self._multiget_pool = None + if self._multiput_pool: + self._multiput_pool.stop() + self._multiput_pool = None def _create_node(self, n): if isinstance(n, RiakNode): @@ -349,6 +388,16 @@ def _error_rate(node): else: return random.choice(good) + def _setdefault_handle_none(self, wvdict, key, value): + # TODO FIXME FUTURE + # This is a workaround for Python issue 19542 + # http://bugs.python.org/issue19542 + rv = wvdict.setdefault(key, value) + if rv is None: + return value + else: + return rv + @lazy_property def _multiget_pool(self): if self._multiget_pool_size: @@ -356,6 +405,13 @@ def _multiget_pool(self): else: return None + @lazy_property + def _multiput_pool(self): + if self._multiput_pool_size: + return MultiPutPool(self._multiput_pool_size) + else: + return None + def __hash__(self): return hash(frozenset([(n.host, n.http_port, n.pb_port) for n in self.nodes])) @@ -371,5 +427,3 @@ def __ne__(self, other): return hash(self) != hash(other) else: return True - -from riak.client.multiget import MultiGetPool diff --git a/riak/client/index_page.py b/riak/client/index_page.py index 3d273e4b..8e094a66 100644 --- a/riak/client/index_page.py +++ b/riak/client/index_page.py @@ -1,20 +1,16 @@ -""" -Copyright 2013 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from collections import namedtuple, Sequence diff --git a/riak/client/multi.py b/riak/client/multi.py new file mode 100644 index 00000000..681d3ec3 --- /dev/null +++ b/riak/client/multi.py @@ -0,0 +1,324 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +from collections import namedtuple +from threading import Thread, Lock, Event +from multiprocessing import cpu_count +from six import PY2 + +from riak.riak_object import RiakObject +from riak.ts_object import TsObject + +if PY2: + from Queue import Queue, Empty +else: + from queue import Queue, Empty + +__all__ = ['multiget', 'multiput', 'MultiGetPool', 'MultiPutPool'] + + +try: + #: The default size of the worker pool, either based on the number + #: of CPUS or defaulting to 6 + POOL_SIZE = cpu_count() +except NotImplementedError: + # Make an educated guess + POOL_SIZE = 6 + +#: A :class:`namedtuple` for tasks that are fed to workers in the +#: multi get pool. +Task = namedtuple('Task', + ['client', 'outq', 'bucket_type', 'bucket', 'key', + 'object', 'options']) + + +#: A :class:`namedtuple` for tasks that are fed to workers in the +#: multi put pool. +PutTask = namedtuple('PutTask', + ['client', 'outq', 'object', 'options']) + + +class MultiPool(object): + """ + Encapsulates a pool of threads. These threads can be used + across many multi requests. + """ + + def __init__(self, size=POOL_SIZE, name='unknown'): + """ + :param size: the desired size of the worker pool + :type size: int + """ + + self._inq = Queue() + self._size = size + self._name = name + self._started = Event() + self._stop = Event() + self._lock = Lock() + self._workers = [] + + def enq(self, task): + """ + Enqueues a fetch task to the pool of workers. This will raise + a RuntimeError if the pool is stopped or in the process of + stopping. + + :param task: the Task object + :type task: Task or PutTask + """ + if not self._stop.is_set(): + self._inq.put(task) + else: + raise RuntimeError("Attempted to enqueue an operation while " + "multi pool was shutdown!") + + def start(self): + """ + Starts the worker threads if they are not already started. + This method is thread-safe and will be called automatically + when executing an operation. + """ + # Check whether we are already started, skip if we are. + if not self._started.is_set(): + # If we are not started, try to capture the lock. + if self._lock.acquire(False): + # If we got the lock, go ahead and start the worker + # threads, set the started flag, and release the lock. + for i in range(self._size): + name = "riak.client.multi-worker-{0}-{1}".format( + self._name, i) + worker = Thread(target=self._worker_method, name=name) + worker.daemon = False + worker.start() + self._workers.append(worker) + self._started.set() + self._lock.release() + else: + # We didn't get the lock, so someone else is already + # starting the worker threads. Wait until they have + # signaled that the threads are started. + self._started.wait() + + def stop(self): + """ + Signals the worker threads to exit and waits on them. + """ + if not self.stopped(): + self._stop.set() + for worker in self._workers: + worker.join() + + def stopped(self): + """ + Detects whether this pool has been stopped. + """ + return self._stop.is_set() + + def __del__(self): + # Ensure that all work in the queue is processed before + # shutting down. + self.stop() + + def _worker_method(self): + raise NotImplementedError + + def _should_quit(self): + """ + Worker threads should exit when the stop flag is set and the + input queue is empty. Once the stop flag is set, new enqueues + are disallowed, meaning that the workers can safely drain the + queue before exiting. + + :rtype: bool + """ + return self.stopped() and self._inq.empty() + + +class MultiGetPool(MultiPool): + def __init__(self, size=POOL_SIZE): + super(MultiGetPool, self).__init__(size=size, name='get') + + def _worker_method(self): + """ + The body of the multi-get worker. Loops until + :meth:`_should_quit` returns ``True``, taking tasks off the + input queue, fetching the object, and putting them on the + output queue. + """ + while not self._should_quit(): + try: + task = self._inq.get(block=True, timeout=0.25) + except TypeError: + if self._should_quit(): + break + else: + raise + except Empty: + continue + + try: + btype = task.client.bucket_type(task.bucket_type) + obj = btype.bucket(task.bucket).get(task.key, **task.options) + task.outq.put(obj) + except KeyboardInterrupt: + raise + except Exception as err: + errdata = (task.bucket_type, task.bucket, task.key, err) + task.outq.put(errdata) + finally: + self._inq.task_done() + + +class MultiPutPool(MultiPool): + def __init__(self, size=POOL_SIZE): + super(MultiPutPool, self).__init__(size=size, name='put') + + def _worker_method(self): + """ + The body of the multi-put worker. Loops until + :meth:`_should_quit` returns ``True``, taking tasks off the + input queue, storing the object, and putting the result on + the output queue. + """ + while not self._should_quit(): + try: + task = self._inq.get(block=True, timeout=0.25) + except TypeError: + if self._should_quit(): + break + else: + raise + except Empty: + continue + + try: + obj = task.object + if isinstance(obj, RiakObject): + rv = task.client.put(obj, **task.options) + elif isinstance(obj, TsObject): + rv = task.client.ts_put(obj, **task.options) + else: + raise ValueError('unknown obj type: %s'.format(type(obj))) + task.outq.put(rv) + except KeyboardInterrupt: + raise + except Exception as err: + errdata = (task.object, err) + task.outq.put(errdata) + finally: + self._inq.task_done() + + +def multiget(client, keys, **options): + """Executes a parallel-fetch across multiple threads. Returns a list + containing :class:`~riak.riak_object.RiakObject` or + :class:`~riak.datatypes.Datatype` instances, or 4-tuples of + bucket-type, bucket, key, and the exception raised. + + If a ``pool`` option is included, the request will use the given worker + pool and not a transient :class:`~riak.client.multi.MultiGetPool`. This + option will be passed by the client if the ``multiget_pool_size`` + option was set on client initialization. + + :param client: the client to use + :type client: :class:`~riak.client.RiakClient` + :param keys: the keys to fetch in parallel + :type keys: list of three-tuples -- bucket_type/bucket/key + :param options: request options to + :meth:`RiakBucket.get ` + :type options: dict + :rtype: list + + """ + transient_pool = False + outq = Queue() + + if 'pool' in options: + pool = options['pool'] + del options['pool'] + else: + pool = MultiGetPool() + transient_pool = True + + try: + pool.start() + for bucket_type, bucket, key in keys: + task = Task(client, outq, bucket_type, bucket, key, None, options) + pool.enq(task) + + results = [] + for _ in range(len(keys)): + if pool.stopped(): + raise RuntimeError( + 'Multi-get operation interrupted by pool ' + 'stopping!') + results.append(outq.get()) + outq.task_done() + finally: + if transient_pool: + pool.stop() + + return results + + +def multiput(client, objs, **options): + """Executes a parallel-store across multiple threads. Returns a list + containing booleans or :class:`~riak.riak_object.RiakObject` + + If a ``pool`` option is included, the request will use the given worker + pool and not a transient :class:`~riak.client.multi.MultiPutPool`. This + option will be passed by the client if the ``multiput_pool_size`` + option was set on client initialization. + + :param client: the client to use + :type client: :class:`RiakClient ` + :param objs: the objects to store in parallel + :type objs: list of `RiakObject ` or + `TsObject ` + :param options: request options to + :meth:`RiakClient.put ` + :type options: dict + :rtype: list + """ + transient_pool = False + outq = Queue() + + if 'pool' in options: + pool = options['pool'] + del options['pool'] + else: + pool = MultiPutPool() + transient_pool = True + + try: + pool.start() + for obj in objs: + task = PutTask(client, outq, obj, options) + pool.enq(task) + + results = [] + for _ in range(len(objs)): + if pool.stopped(): + raise RuntimeError( + 'Multi-put operation interrupted by pool ' + 'stopping!') + results.append(outq.get()) + outq.task_done() + finally: + if transient_pool: + pool.stop() + + return results diff --git a/riak/client/multiget.py b/riak/client/multiget.py deleted file mode 100644 index a8573cc8..00000000 --- a/riak/client/multiget.py +++ /dev/null @@ -1,241 +0,0 @@ -""" -Copyright 2013 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -from __future__ import print_function -from collections import namedtuple -from threading import Thread, Lock, Event -from multiprocessing import cpu_count -from six import PY2 -if PY2: - from Queue import Queue -else: - from queue import Queue - -__all__ = ['multiget', 'MultiGetPool'] - - -try: - #: The default size of the worker pool, either based on the number - #: of CPUS or defaulting to 6 - POOL_SIZE = cpu_count() -except NotImplementedError: - # Make an educated guess - POOL_SIZE = 6 - -#: A :class:`namedtuple` for tasks that are fed to workers in the -#: multiget pool. -Task = namedtuple('Task', ['client', 'outq', 'bucket_type', 'bucket', 'key', - 'options']) - - -class MultiGetPool(object): - """ - Encapsulates a pool of fetcher threads. These threads can be used - across many multi-get requests. - """ - - def __init__(self, size=POOL_SIZE): - """ - :param size: the desired size of the worker pool - :type size: int - """ - - self._inq = Queue() - self._size = size - self._started = Event() - self._stop = Event() - self._lock = Lock() - self._workers = [] - - def enq(self, task): - """ - Enqueues a fetch task to the pool of workers. This will raise - a RuntimeError if the pool is stopped or in the process of - stopping. - - :param task: the Task object - :type task: Task - """ - if not self._stop.is_set(): - self._inq.put(task) - else: - raise RuntimeError("Attempted to enqueue a fetch operation while " - "multi-get pool was shutdown!") - - def start(self): - """ - Starts the worker threads if they are not already started. - This method is thread-safe and will be called automatically - when executing a MultiGet operation. - """ - # Check whether we are already started, skip if we are. - if not self._started.is_set(): - # If we are not started, try to capture the lock. - if self._lock.acquire(False): - # If we got the lock, go ahead and start the worker - # threads, set the started flag, and release the lock. - for i in range(self._size): - name = "riak.client.multiget-worker-{0}".format(i) - worker = Thread(target=self._fetcher, name=name) - worker.daemon = True - worker.start() - self._workers.append(worker) - self._started.set() - self._lock.release() - else: - # We didn't get the lock, so someone else is already - # starting the worker threads. Wait until they have - # signaled that the threads are started. - self._started.wait() - - def stop(self): - """ - Signals the worker threads to exit and waits on them. - """ - self._stop.set() - for worker in self._workers: - worker.join() - - def stopped(self): - """ - Detects whether this pool has been stopped. - """ - return self._stop.is_set() - - def __del__(self): - # Ensure that all work in the queue is processed before - # shutting down. - self.stop() - - def _fetcher(self): - """ - The body of the multi-get worker. Loops until - :meth:`_should_quit` returns ``True``, taking tasks off the - input queue, fetching the object, and putting them on the - output queue. - """ - while not self._should_quit(): - task = self._inq.get() - try: - btype = task.client.bucket_type(task.bucket_type) - obj = btype.bucket(task.bucket).get(task.key, **task.options) - task.outq.put(obj) - except KeyboardInterrupt: - raise - except Exception as err: - task.outq.put((task.bucket_type, task.bucket, task.key, err), ) - finally: - self._inq.task_done() - - def _should_quit(self): - """ - Worker threads should exit when the stop flag is set and the - input queue is empty. Once the stop flag is set, new enqueues - are disallowed, meaning that the workers can safely drain the - queue before exiting. - - :rtype: bool - """ - return self.stopped() and self._inq.empty() - - -#: The default pool is automatically created and stored in this constant. -RIAK_MULTIGET_POOL = MultiGetPool() - - -def multiget(client, keys, **options): - """Executes a parallel-fetch across multiple threads. Returns a list - containing :class:`~riak.riak_object.RiakObject` or - :class:`~riak.datatypes.Datatype` instances, or 4-tuples of - bucket-type, bucket, key, and the exception raised. - - If a ``pool`` option is included, the request will use the given worker - pool and not the default :data:`RIAK_MULTIGET_POOL`. This option will - be passed by the client if the ``multiget_pool_size`` option was set on - client initialization. - - :param client: the client to use - :type client: :class:`~riak.client.RiakClient` - :param keys: the keys to fetch in parallel - :type keys: list of three-tuples -- bucket_type/bucket/key - :param options: request options to - :meth:`RiakBucket.get ` - :type options: dict - :rtype: list - - """ - outq = Queue() - - if 'pool' in options: - pool = options['pool'] - del options['pool'] - else: - pool = RIAK_MULTIGET_POOL - - pool.start() - for bucket_type, bucket, key in keys: - task = Task(client, outq, bucket_type, bucket, key, options) - pool.enq(task) - - results = [] - for _ in range(len(keys)): - if pool.stopped(): - raise RuntimeError("Multi-get operation interrupted by pool " - "stopping!") - results.append(outq.get()) - outq.task_done() - - return results - -if __name__ == '__main__': - # Run a benchmark! - from riak import RiakClient - import riak.benchmark as benchmark - client = RiakClient(protocol='pbc') - bkeys = [('default', 'multiget', str(key)) for key in range(10000)] - - data = open(__file__).read() - - print("Benchmarking multiget:") - print(" CPUs: {0}".format(cpu_count())) - print(" Threads: {0}".format(POOL_SIZE)) - print(" Keys: {0}".format(len(bkeys))) - print() - - with benchmark.measure() as b: - with b.report('populate'): - for _, bucket, key in bkeys: - client.bucket(bucket).new(key, encoded_data=data, - content_type='text/plain' - ).store() - for b in benchmark.measure_with_rehearsal(): - client.protocol = 'http' - with b.report('http seq'): - for _, bucket, key in bkeys: - client.bucket(bucket).get(key) - - with b.report('http multi'): - multiget(client, bkeys) - - client.protocol = 'pbc' - with b.report('pbc seq'): - for _, bucket, key in bkeys: - client.bucket(bucket).get(key) - - with b.report('pbc multi'): - multiget(client, bkeys) diff --git a/riak/client/operations.py b/riak/client/operations.py index 07109846..0d507f12 100644 --- a/riak/client/operations.py +++ b/riak/client/operations.py @@ -1,28 +1,27 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import riak.client.multi + +from riak import ListError from riak.client.transport import RiakClientTransport, \ - retryable, retryableHttpOnly -from riak.client.multiget import multiget + retryable, retryableHttpOnly from riak.client.index_page import IndexPage from riak.datatypes import TYPES +from riak.table import Table from riak.util import bytes_to_str -from six import string_types, PY2 class RiakClientOperations(RiakClientTransport): @@ -56,13 +55,17 @@ def get_buckets(self, transport, bucket_type=None, timeout=None): :rtype: list of :class:`RiakBucket ` instances """ + if not riak.disable_list_exceptions: + raise ListError() + _validate_timeout(timeout) + if bucket_type: - bucketfn = lambda name: bucket_type.bucket(name) + bucketfn = self._bucket_type_bucket_builder else: - bucketfn = lambda name: self.bucket(name) + bucketfn = self._default_type_bucket_builder - return [bucketfn(bytes_to_str(name)) for name in + return [bucketfn(bytes_to_str(name), bucket_type) for name in transport.get_buckets(bucket_type=bucket_type, timeout=timeout)] @@ -101,25 +104,25 @@ def stream_buckets(self, bucket_type=None, timeout=None): ` instances """ + if not riak.disable_list_exceptions: + raise ListError() + _validate_timeout(timeout) + if bucket_type: - bucketfn = lambda name: bucket_type.bucket(name) + bucketfn = self._bucket_type_bucket_builder else: - bucketfn = lambda name: self.bucket(name) + bucketfn = self._default_type_bucket_builder - resource = self._acquire() - transport = resource.object - stream = transport.stream_buckets(bucket_type=bucket_type, - timeout=timeout) - stream.attach(resource) - try: - for bucket_list in stream: - bucket_list = [bucketfn(bytes_to_str(name)) - for name in bucket_list] - if len(bucket_list) > 0: - yield bucket_list - finally: - stream.close() + def make_op(transport): + return transport.stream_buckets( + bucket_type=bucket_type, timeout=timeout) + + for bucket_list in self._stream_with_retry(make_op): + bucket_list = [bucketfn(bytes_to_str(name), bucket_type) + for name in bucket_list] + if len(bucket_list) > 0: + yield bucket_list @retryable def ping(self, transport): @@ -172,8 +175,7 @@ def get_index(self, transport, bucket, index, startkey, endkey=None, :type term_regex: string :rtype: :class:`~riak.client.index_page.IndexPage` """ - if timeout != 'infinity': - _validate_timeout(timeout) + _validate_timeout(timeout, infinity_ok=True) page = IndexPage(self, bucket, index, startkey, endkey, return_terms, max_results, term_regex) @@ -282,8 +284,9 @@ def stream_index(self, bucket, index, startkey, endkey=None, :rtype: :class:`~riak.client.index_page.IndexPage` """ - if timeout != 'infinity': - _validate_timeout(timeout) + # TODO FUTURE: implement "retry on connection closed" + # as in stream_mapred + _validate_timeout(timeout, infinity_ok=True) page = IndexPage(self, bucket, index, startkey, endkey, return_terms, max_results, term_regex) @@ -355,6 +358,8 @@ def paginate_stream_index(self, bucket, index, startkey, endkey=None, :class:`~riak.client.index_page.IndexPage` """ + # TODO FUTURE: implement "retry on connection closed" + # as in stream_mapred page = self.stream_index(bucket, index, startkey, endkey=endkey, max_results=max_results, @@ -398,6 +403,7 @@ def set_bucket_props(self, transport, bucket, props): :param props: the properties to set :type props: dict """ + _validate_bucket_props(props) return transport.set_bucket_props(bucket, props) @retryable @@ -446,6 +452,7 @@ def set_bucket_type_props(self, transport, bucket_type, props): :param props: the properties to set :type props: dict """ + _validate_bucket_props(props) return transport.set_bucket_type_props(bucket_type, props) @retryable @@ -467,7 +474,11 @@ def get_keys(self, transport, bucket, timeout=None): :type timeout: int :rtype: list """ + if not riak.disable_list_exceptions: + raise ListError() + _validate_timeout(timeout) + return transport.get_keys(bucket, timeout=timeout) def stream_keys(self, bucket, timeout=None): @@ -503,20 +514,20 @@ def stream_keys(self, bucket, timeout=None): :type timeout: int :rtype: iterator """ + if not riak.disable_list_exceptions: + raise ListError() + _validate_timeout(timeout) - resource = self._acquire() - transport = resource.object - stream = transport.stream_keys(bucket, timeout=timeout) - stream.attach(resource) - try: - for keylist in stream: - if len(keylist) > 0: - if PY2: - yield keylist - else: - yield [bytes_to_str(item) for item in keylist] - finally: - stream.close() + + def make_op(transport): + return transport.stream_keys(bucket, timeout=timeout) + + for keylist in self._stream_with_retry(make_op): + if len(keylist) > 0: + if six.PY2: + yield keylist + else: + yield [bytes_to_str(item) for item in keylist] @retryable def put(self, transport, robj, w=None, dw=None, pw=None, return_body=None, @@ -553,9 +564,157 @@ def put(self, transport, robj, w=None, dw=None, pw=None, return_body=None, if_none_match=if_none_match, timeout=timeout) + @retryable + def ts_describe(self, transport, table): + """ + ts_describe(table) + + Retrieve a time series table description from the Riak cluster. + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param table: The timeseries table. + :type table: string or :class:`Table ` + :rtype: :class:`TsObject ` + """ + t = table + if isinstance(t, six.string_types): + t = Table(self, table) + return transport.ts_describe(t) + + @retryable + def ts_get(self, transport, table, key): + """ + ts_get(table, key) + + Retrieve timeseries value by key + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param table: The timeseries table. + :type table: string or :class:`Table ` + :param key: The timeseries value's key. + :type key: list + :rtype: :class:`TsObject ` + """ + t = table + if isinstance(t, six.string_types): + t = Table(self, table) + return transport.ts_get(t, key) + + @retryable + def ts_put(self, transport, tsobj): + """ + ts_put(tsobj) + + Stores time series data in the Riak cluster. + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param tsobj: the time series object to store + :type tsobj: RiakTsObject + :rtype: boolean + """ + return transport.ts_put(tsobj) + + @retryable + def ts_delete(self, transport, table, key): + """ + ts_delete(table, key) + + Delete timeseries value by key + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param table: The timeseries table. + :type table: string or :class:`Table ` + :param key: The timeseries value's key. + :type key: list or dict + :rtype: boolean + """ + t = table + if isinstance(t, six.string_types): + t = Table(self, table) + return transport.ts_delete(t, key) + + @retryable + def ts_query(self, transport, table, query, interpolations=None): + """ + ts_query(table, query, interpolations=None) + + Queries time series data in the Riak cluster. + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param table: The timeseries table. + :type table: string or :class:`Table ` + :param query: The timeseries query. + :type query: string + :rtype: :class:`TsObject ` + """ + t = table + if isinstance(t, six.string_types): + t = Table(self, table) + return transport.ts_query(t, query, interpolations) + + def ts_stream_keys(self, table, timeout=None): + """ + Lists all keys in a time series table via a stream. This is a + generator method which should be iterated over. + + The caller should explicitly close the returned iterator, + either using :func:`contextlib.closing` or calling ``close()`` + explicitly. Consuming the entire iterator will also close the + stream. If it does not, the associated connection might + not be returned to the pool. Example:: + + from contextlib import closing + + # Using contextlib.closing + with closing(client.ts_stream_keys(mytable)) as keys: + for key_list in keys: + do_something(key_list) + + # Explicit close() + stream = client.ts_stream_keys(mytable) + for key_list in stream: + do_something(key_list) + stream.close() + + :param table: the table from which to stream keys + :type table: string or :class:`Table ` + :param timeout: a timeout value in milliseconds + :type timeout: int + :rtype: iterator + """ + if not riak.disable_list_exceptions: + raise ListError() + + t = table + if isinstance(t, six.string_types): + t = Table(self, table) + + _validate_timeout(timeout) + + resource = self._acquire() + transport = resource.object + stream = transport.ts_stream_keys(t, timeout) + stream.attach(resource) + try: + for keylist in stream: + if len(keylist) > 0: + yield keylist + finally: + stream.close() + @retryable def get(self, transport, robj, r=None, pr=None, timeout=None, - basic_quorum=None, notfound_ok=None): + basic_quorum=None, notfound_ok=None, head_only=False): """ get(robj, r=None, pr=None, timeout=None) @@ -577,15 +736,19 @@ def get(self, transport, robj, r=None, pr=None, timeout=None, :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool + :param head_only: whether to fetch without value, so only metadata + (only available on PB transport) + :type head_only: bool """ _validate_timeout(timeout) - if not isinstance(robj.key, string_types): + if not isinstance(robj.key, six.string_types): raise TypeError( 'key must be a string, instead got {0}'.format(repr(robj.key))) return transport.get(robj, r=r, pr=pr, timeout=timeout, basic_quorum=basic_quorum, - notfound_ok=notfound_ok) + notfound_ok=notfound_ok, + head_only=head_only) @retryable def delete(self, transport, robj, rw=None, r=None, w=None, dw=None, @@ -674,18 +837,16 @@ def stream_mapred(self, inputs, query, timeout): :rtype: iterator """ _validate_timeout(timeout) - resource = self._acquire() - transport = resource.object - stream = transport.stream_mapred(inputs, query, timeout) - stream.attach(resource) - try: - for phase, data in stream: - yield phase, data - finally: - stream.close() + + def make_op(transport): + return transport.stream_mapred(inputs, query, timeout) + + for phase, data in self._stream_with_retry(make_op): + yield phase, data @retryable - def create_search_index(self, transport, index, schema=None, n_val=None): + def create_search_index(self, transport, index, schema=None, n_val=None, + timeout=None): """ create_search_index(index, schema=None, n_val=None) @@ -698,8 +859,10 @@ def create_search_index(self, transport, index, schema=None, n_val=None): :type schema: string, None :param n_val: this indexes N value :type n_val: integer, None + :param timeout: optional timeout (in ms) + :type timeout: integer, None """ - return transport.create_search_index(index, schema, n_val) + return transport.create_search_index(index, schema, n_val, timeout) @retryable def get_search_index(self, transport, index): @@ -850,7 +1013,22 @@ def multiget(self, pairs, **params): """ if self._multiget_pool: params['pool'] = self._multiget_pool - return multiget(self, pairs, **params) + return riak.client.multi.multiget(self, pairs, **params) + + def multiput(self, objs, **params): + """ + Stores objects in parallel via threads. + + :param objs: the objects to store + :type objs: list of `RiakObject ` + :param params: additional request flags, e.g. w, dw, pw + :type params: dict + :rtype: list of boolean or + :class:`RiakObjects `, + """ + if self._multiput_pool: + params['pool'] = self._multiput_pool + return riak.client.multi.multiput(self, objs, **params) @retryable def get_counter(self, transport, bucket, key, r=None, pr=None, @@ -913,11 +1091,7 @@ def update_counter(self, bucket, key, value, w=None, dw=None, pw=None, :param returnvalue: whether to return the updated value of the counter :type returnvalue: bool """ - if PY2: - valid_types = (int, long) - else: - valid_types = (int,) - if type(value) not in valid_types: + if not isinstance(value, six.integer_types): raise TypeError("Counter update amount must be an integer") if value == 0: raise ValueError("Cannot increment counter by 0") @@ -1000,6 +1174,44 @@ def update_datatype(self, datatype, w=None, dw=None, pw=None, timeout=timeout, include_context=include_context) + @retryable + def get_preflist(self, transport, bucket, key): + """ + Fetch the preflist for a given bucket and key. + + .. note:: This request is automatically retried :attr:`retries` + times if it fails due to network error. + + :param bucket: the bucket whose index will be queried + :type bucket: RiakBucket + :param key: the key of the preflist + :type key: string + + :return: list of dicts (partition, node, primary) + """ + return transport.get_preflist(bucket, key) + + def _bucket_type_bucket_builder(self, name, bucket_type): + """ + Build a bucket from a bucket type + + :param name: Bucket name + :param bucket_type: A bucket type + :return: A bucket object + """ + return bucket_type.bucket(name) + + def _default_type_bucket_builder(self, name, unused): + """ + Build a bucket for the default bucket type + + :param name: Default bucket name + :param unused: Unused + :return: A bucket object + """ + del unused # Ignored parameters. + return self.bucket(name) + @retryable def _fetch_datatype(self, transport, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None, @@ -1047,11 +1259,30 @@ def _fetch_datatype(self, transport, bucket, key, r=None, pr=None, include_context=include_context) -def _validate_timeout(timeout): +def _validate_bucket_props(props): + if 'hll_precision' in props: + precision = props['hll_precision'] + if precision < 4 or precision > 16: + raise ValueError( + 'hll_precision must be between 4 and 16, inclusive') + + +def _validate_timeout(timeout, infinity_ok=False): """ Raises an exception if the given timeout is an invalid value. """ - if not (timeout is None or - ((type(timeout) == int or (PY2 and type(timeout) == long)) - and timeout > 0)): - raise ValueError("timeout must be a positive integer") + if timeout is None: + return + + if timeout == 'infinity': + if infinity_ok: + return + else: + raise ValueError( + 'timeout must be a positive integer ' + '("infinity" is not valid)') + + if isinstance(timeout, six.integer_types) and timeout > 0: + return + + raise ValueError('timeout must be a positive integer') diff --git a/riak/client/transport.py b/riak/client/transport.py index 027951d6..ffc705e4 100644 --- a/riak/client/transport.py +++ b/riak/client/transport.py @@ -1,26 +1,25 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from contextlib import contextmanager -from riak.transports.pool import BadResource -from riak.transports.pbc import is_retryable as is_pbc_retryable +from riak.transports.pool import BadResource, ConnectionClosed +from riak.transports.tcp import is_retryable as is_tcp_retryable from riak.transports.http import is_retryable as is_http_retryable -import threading from six import PY2 + +import threading + if PY2: from httplib import HTTPException else: @@ -49,7 +48,7 @@ class RiakClientTransport(object): # These will be set or redefined by the RiakClient initializer protocol = 'pbc' _http_pool = None - _pb_pool = None + _tcp_pool = None _locals = _client_locals() def _get_retry_count(self): @@ -101,7 +100,8 @@ def _transport(self): _transport() Yields a single transport to the caller from the default pool, - without retries. + without retries. NB: no need to re-try as this method is only + used by CRDT operations that should never be re-tried. """ pool = self._choose_pool() with pool.transaction() as transport: @@ -115,6 +115,31 @@ def _acquire(self): """ return self._choose_pool().acquire() + def _stream_with_retry(self, make_op): + first_try = True + while True: + resource = self._acquire() + transport = resource.object + streaming_op = None + try: + streaming_op = make_op(transport) + streaming_op.attach(resource) + for item in streaming_op: + yield item + break + except BadResource as e: + resource.errored = True + # NB: *only* re-try if connection closed happened + # at the start of the streaming op + if first_try and not e.mid_stream: + continue + else: + raise + finally: + first_try = False + if streaming_op: + streaming_op.close() + def _with_retries(self, pool, fn): """ Performs the passed function with retries against the given pool. @@ -129,26 +154,38 @@ def _with_retries(self, pool, fn): def _skip_bad_nodes(transport): return transport._node not in skip_nodes - retry_count = self.retries - - for retry in range(retry_count): + retry_count = self.retries - 1 + first_try = True + current_try = 0 + while True: try: - with pool.transaction(_filter=_skip_bad_nodes) as transport: + with pool.transaction( + _filter=_skip_bad_nodes, + yield_resource=True) as resource: + transport = resource.object try: return fn(transport) - except (IOError, HTTPException) as e: + except (IOError, HTTPException, ConnectionClosed) as e: + resource.errored = True if _is_retryable(e): transport._node.error_rate.incr(1) skip_nodes.append(transport._node) - raise BadResource(e) + if first_try: + continue + else: + raise BadResource(e) else: raise except BadResource as e: - if retry < (retry_count - 1): + if current_try < retry_count: + resource.errored = True + current_try += 1 continue else: # Re-raise the inner exception raise e.args[0] + finally: + first_try = False def _choose_pool(self, protocol=None): """ @@ -163,10 +200,13 @@ def _choose_pool(self, protocol=None): protocol = self.protocol if protocol == 'http': pool = self._http_pool - elif protocol == 'pbc': - pool = self._pb_pool + elif protocol == 'tcp' or protocol == 'pbc': + pool = self._tcp_pool else: raise ValueError("invalid protocol %s" % protocol) + if pool is None or self._closed: + # NB: GH-500, this can happen if client is closed + raise RuntimeError("Client is closed.") return pool @@ -179,9 +219,10 @@ def _is_retryable(error): :type error: Exception :rtype: boolean """ - return is_pbc_retryable(error) or is_http_retryable(error) + return is_tcp_retryable(error) or is_http_retryable(error) +# http://thecodeship.com/patterns/guide-to-python-function-decorators/ def retryable(fn, protocol=None): """ Wraps a client operation that can be retried according to the set diff --git a/riak/codecs/__init__.py b/riak/codecs/__init__.py new file mode 100644 index 00000000..b824fcc0 --- /dev/null +++ b/riak/codecs/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +import riak.pb.messages + +from riak import RiakError +from riak.codecs.util import parse_pbuf_msg +from riak.util import bytes_to_str + +Msg = collections.namedtuple('Msg', + ['msg_code', 'data', 'resp_code']) + + +class Codec(object): + def parse_msg(self): + raise NotImplementedError('parse_msg not implemented') + + def maybe_incorrect_code(self, resp_code, expect=None): + if expect and resp_code != expect: + raise RiakError("unexpected message code: %d, expected %d" + % (resp_code, expect)) + + def maybe_riak_error(self, msg_code, data=None): + if msg_code == riak.pb.messages.MSG_CODE_ERROR_RESP: + if data is None: + raise RiakError('no error provided!') + else: + err = parse_pbuf_msg(msg_code, data) + raise RiakError(bytes_to_str(err.errmsg)) diff --git a/riak/transports/http/codec.py b/riak/codecs/http.py similarity index 90% rename from riak/transports/http/codec.py rename to riak/codecs/http.py index 0a9db54f..b981b77a 100644 --- a/riak/transports/http/codec.py +++ b/riak/codecs/http.py @@ -1,35 +1,21 @@ -""" -Copyright 2012 Basho Technologies, Inc. -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -# subtract length of "Link: " header string and newline -MAX_LINK_HEADER_SIZE = 8192 - 8 - +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re import csv -from six import PY2, PY3 -if PY2: - from urllib import unquote_plus -else: - from urllib.parse import unquote_plus +import six + from cgi import parse_header from email import message_from_string from email.utils import parsedate_tz, mktime_tz @@ -41,8 +27,17 @@ from riak.transports.http.search import XMLSearchResult from riak.util import decode_index_value, bytes_to_str +if six.PY2: + from urllib import unquote_plus +else: + from urllib.parse import unquote_plus + -class RiakHttpCodec(object): +# subtract length of "Link: " header string and newline +MAX_LINK_HEADER_SIZE = 8192 - 8 + + +class HttpCodec(object): """ Methods for HTTP transport that marshals and unmarshals HTTP messages. @@ -82,7 +77,7 @@ def _parse_body(self, robj, response, expected_statuses): elif status == 300: ctype, params = parse_header(headers['content-type']) if ctype == 'multipart/mixed': - if PY3: + if six.PY3: data = bytes_to_str(data) boundary = re.compile('\r?\n--%s(?:--)?\r?\n' % re.escape(params['boundary'])) @@ -231,6 +226,12 @@ def _normalize_json_search_response(self, json): same return value """ result = {} + if 'facet_counts' in json: + result['facet_counts'] = json[u'facet_counts'] + if 'grouped' in json: + result['grouped'] = json[u'grouped'] + if 'stats' in json: + result['stats'] = json[u'stats'] if u'response' in json: result['num_found'] = json[u'response'][u'numFound'] result['max_score'] = float(json[u'response'][u'maxScore']) @@ -244,7 +245,7 @@ def _normalize_json_search_response(self, json): # Riak Search 1.0 Legacy assumptions about format resdoc[u'id'] = doc[u'id'] if u'fields' in doc: - for k, v in doc[u'fields'].iteritems(): + for k, v in six.iteritems(doc[u'fields']): resdoc[k] = v docs.append(resdoc) result['docs'] = docs @@ -278,7 +279,6 @@ def _parse_content_type(self, value): def _decode_datatype(self, dtype, value): if not dtype == 'map': return value - map = {} for key in value: field = self._map_key_to_pair(key) @@ -300,13 +300,17 @@ def _encode_dt_op(self, dtype, op): elif dtype == 'flag': return op elif dtype == 'set': - # self._encode_set_op(msg, op) set_op = {} if 'adds' in op: set_op['add_all'] = op['adds'] if 'removes' in op: set_op['remove_all'] = op['removes'] return set_op + elif dtype == 'hll': + hll_op = {} + if 'adds' in op: + hll_op['add_all'] = op['adds'] + return hll_op elif dtype == 'map': map_op = {} for fop in op: diff --git a/riak/codecs/pbuf.py b/riak/codecs/pbuf.py new file mode 100644 index 00000000..0b4de2a6 --- /dev/null +++ b/riak/codecs/pbuf.py @@ -0,0 +1,1276 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import six + +import riak.pb.messages +import riak.pb.riak_pb2 +import riak.pb.riak_dt_pb2 +import riak.pb.riak_kv_pb2 +import riak.pb.riak_ts_pb2 + +from riak import RiakError +from riak.codecs import Codec, Msg +from riak.codecs.util import parse_pbuf_msg +from riak.content import RiakContent +from riak.pb.riak_ts_pb2 import TsColumnType +from riak.riak_object import VClock +from riak.ts_object import TsColumns +from riak.util import decode_index_value, str_to_bytes, bytes_to_str, \ + unix_time_millis, datetime_from_unix_time_millis +from riak.multidict import MultiDict + + +def _invert(d): + out = {} + for key in d: + value = d[key] + out[value] = key + return out + + +REPL_TO_PY = { + riak.pb.riak_pb2.RpbBucketProps.FALSE: False, + riak.pb.riak_pb2.RpbBucketProps.TRUE: True, + riak.pb.riak_pb2.RpbBucketProps.REALTIME: 'realtime', + riak.pb.riak_pb2.RpbBucketProps.FULLSYNC: 'fullsync' +} + +REPL_TO_PB = _invert(REPL_TO_PY) + +RIAKC_RW_ONE = 4294967294 +RIAKC_RW_QUORUM = 4294967293 +RIAKC_RW_ALL = 4294967292 +RIAKC_RW_DEFAULT = 4294967291 + +QUORUM_TO_PB = {'default': RIAKC_RW_DEFAULT, + 'all': RIAKC_RW_ALL, + 'quorum': RIAKC_RW_QUORUM, + 'one': RIAKC_RW_ONE} + +QUORUM_TO_PY = _invert(QUORUM_TO_PB) + +NORMAL_PROPS = ['n_val', 'allow_mult', 'last_write_wins', 'old_vclock', + 'young_vclock', 'big_vclock', 'small_vclock', 'basic_quorum', + 'notfound_ok', 'search', 'backend', 'search_index', 'datatype', + 'write_once', 'hll_precision'] +COMMIT_HOOK_PROPS = ['precommit', 'postcommit'] +MODFUN_PROPS = ['chash_keyfun', 'linkfun'] +QUORUM_PROPS = ['r', 'pr', 'w', 'pw', 'dw', 'rw'] + +MAP_FIELD_TYPES = { + riak.pb.riak_dt_pb2.MapField.COUNTER: 'counter', + riak.pb.riak_dt_pb2.MapField.SET: 'set', + riak.pb.riak_dt_pb2.MapField.REGISTER: 'register', + riak.pb.riak_dt_pb2.MapField.FLAG: 'flag', + riak.pb.riak_dt_pb2.MapField.MAP: 'map', + 'counter': riak.pb.riak_dt_pb2.MapField.COUNTER, + 'set': riak.pb.riak_dt_pb2.MapField.SET, + 'register': riak.pb.riak_dt_pb2.MapField.REGISTER, + 'flag': riak.pb.riak_dt_pb2.MapField.FLAG, + 'map': riak.pb.riak_dt_pb2.MapField.MAP +} + +DT_FETCH_TYPES = { + riak.pb.riak_dt_pb2.DtFetchResp.COUNTER: 'counter', + riak.pb.riak_dt_pb2.DtFetchResp.SET: 'set', + riak.pb.riak_dt_pb2.DtFetchResp.MAP: 'map', + riak.pb.riak_dt_pb2.DtFetchResp.HLL: 'hll' +} + + +class PbufCodec(Codec): + ''' + Protobuffs Encoding and decoding methods for TcpTransport. + ''' + + def __init__(self, + client_timeouts=False, quorum_controls=False, + tombstone_vclocks=False, bucket_types=False): + if riak.pb is None: + raise NotImplementedError("this codec is not available") + self._client_timeouts = client_timeouts + self._quorum_controls = quorum_controls + self._tombstone_vclocks = tombstone_vclocks + self._bucket_types = bucket_types + + def parse_msg(self, msg_code, data): + return parse_pbuf_msg(msg_code, data) + + def encode_auth(self, username, password): + req = riak.pb.riak_pb2.RpbAuthReq() + req.user = str_to_bytes(username) + req.password = str_to_bytes(password) + mc = riak.pb.messages.MSG_CODE_AUTH_REQ + rc = riak.pb.messages.MSG_CODE_AUTH_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_ping(self): + return Msg(riak.pb.messages.MSG_CODE_PING_REQ, None, + riak.pb.messages.MSG_CODE_PING_RESP) + + def encode_quorum(self, rw): + """ + Converts a symbolic quorum value into its on-the-wire + equivalent. + + :param rw: the quorum + :type rw: string, integer + :rtype: integer + """ + if rw in QUORUM_TO_PB: + return QUORUM_TO_PB[rw] + elif type(rw) is int and rw >= 0: + return rw + else: + return None + + def decode_quorum(self, rw): + """ + Converts a protobuf quorum value to a symbolic value if + necessary. + + :param rw: the quorum + :type rw: int + :rtype int or string + """ + if rw in QUORUM_TO_PY: + return QUORUM_TO_PY[rw] + else: + return rw + + def decode_contents(self, contents, obj): + """ + Decodes the list of siblings from the protobuf representation + into the object. + + :param contents: a list of RpbContent messages + :type contents: list + :param obj: a RiakObject + :type obj: RiakObject + :rtype RiakObject + """ + obj.siblings = [self.decode_content(c, RiakContent(obj)) + for c in contents] + # Invoke sibling-resolution logic + if len(obj.siblings) > 1 and obj.resolver is not None: + obj.resolver(obj) + return obj + + def decode_content(self, rpb_content, sibling): + """ + Decodes a single sibling from the protobuf representation into + a RiakObject. + + :param rpb_content: a single RpbContent message + :type rpb_content: riak.pb.riak_pb2.RpbContent + :param sibling: a RiakContent sibling container + :type sibling: RiakContent + :rtype: RiakContent + """ + + if rpb_content.HasField("deleted") and rpb_content.deleted: + sibling.exists = False + else: + sibling.exists = True + if rpb_content.HasField("content_type"): + sibling.content_type = bytes_to_str(rpb_content.content_type) + if rpb_content.HasField("charset"): + sibling.charset = bytes_to_str(rpb_content.charset) + if rpb_content.HasField("content_encoding"): + sibling.content_encoding = \ + bytes_to_str(rpb_content.content_encoding) + if rpb_content.HasField("vtag"): + sibling.etag = bytes_to_str(rpb_content.vtag) + + sibling.links = [self.decode_link(link) + for link in rpb_content.links] + if rpb_content.HasField("last_mod"): + sibling.last_modified = float(rpb_content.last_mod) + if rpb_content.HasField("last_mod_usecs"): + sibling.last_modified += rpb_content.last_mod_usecs / 1000000.0 + + sibling.usermeta = dict([(bytes_to_str(usermd.key), + bytes_to_str(usermd.value)) + for usermd in rpb_content.usermeta]) + sibling.indexes = set([(bytes_to_str(index.key), + decode_index_value(index.key, index.value)) + for index in rpb_content.indexes]) + sibling.encoded_data = rpb_content.value + + return sibling + + def encode_content(self, robj, rpb_content): + """ + Fills an RpbContent message with the appropriate data and + metadata from a RiakObject. + + :param robj: a RiakObject + :type robj: RiakObject + :param rpb_content: the protobuf message to fill + :type rpb_content: riak.pb.riak_pb2.RpbContent + """ + if robj.content_type: + rpb_content.content_type = str_to_bytes(robj.content_type) + if robj.charset: + rpb_content.charset = str_to_bytes(robj.charset) + if robj.content_encoding: + rpb_content.content_encoding = str_to_bytes(robj.content_encoding) + for uk in robj.usermeta: + pair = rpb_content.usermeta.add() + pair.key = str_to_bytes(uk) + pair.value = str_to_bytes(robj.usermeta[uk]) + for link in robj.links: + pb_link = rpb_content.links.add() + try: + bucket, key, tag = link + except ValueError: + raise RiakError("Invalid link tuple %s" % link) + + pb_link.bucket = str_to_bytes(bucket) + pb_link.key = str_to_bytes(key) + if tag: + pb_link.tag = str_to_bytes(tag) + else: + pb_link.tag = str_to_bytes('') + + for field, value in robj.indexes: + pair = rpb_content.indexes.add() + pair.key = str_to_bytes(field) + pair.value = str_to_bytes(str(value)) + + # Python 2.x data is stored in a string + if six.PY2: + rpb_content.value = str(robj.encoded_data) + else: + rpb_content.value = robj.encoded_data + + def decode_link(self, link): + """ + Decodes an RpbLink message into a tuple + + :param link: an RpbLink message + :type link: riak.pb.riak_pb2.RpbLink + :rtype tuple + """ + + if link.HasField("bucket"): + bucket = bytes_to_str(link.bucket) + else: + bucket = None + if link.HasField("key"): + key = bytes_to_str(link.key) + else: + key = None + if link.HasField("tag"): + tag = bytes_to_str(link.tag) + else: + tag = None + + return (bucket, key, tag) + + def decode_index_value(self, index, value): + """ + Decodes a secondary index value into the correct Python type. + :param index: the name of the index + :type index: str + :param value: the value of the index entry + :type value: str + :rtype str or int + """ + if index.endswith("_int"): + return int(value) + else: + return bytes_to_str(value) + + def encode_bucket_props(self, props, msg): + """ + Encodes a dict of bucket properties into the protobuf message. + + :param props: bucket properties + :type props: dict + :param msg: the protobuf message to fill + :type msg: riak.pb.riak_pb2.RpbSetBucketReq + """ + for prop in NORMAL_PROPS: + if prop in props and props[prop] is not None: + if isinstance(props[prop], six.string_types): + setattr(msg.props, prop, str_to_bytes(props[prop])) + else: + setattr(msg.props, prop, props[prop]) + for prop in COMMIT_HOOK_PROPS: + if prop in props: + setattr(msg.props, 'has_' + prop, True) + self.encode_hooklist(props[prop], getattr(msg.props, prop)) + for prop in MODFUN_PROPS: + if prop in props and props[prop] is not None: + self.encode_modfun(props[prop], getattr(msg.props, prop)) + for prop in QUORUM_PROPS: + if prop in props and props[prop] not in (None, 'default'): + value = self.encode_quorum(props[prop]) + if value is not None: + if isinstance(value, six.string_types): + setattr(msg.props, prop, str_to_bytes(value)) + else: + setattr(msg.props, prop, value) + if 'repl' in props: + msg.props.repl = REPL_TO_PB[props['repl']] + + return msg + + def decode_bucket_props(self, msg): + """ + Decodes the protobuf bucket properties message into a dict. + + :param msg: the protobuf message to decode + :type msg: riak.pb.riak_pb2.RpbBucketProps + :rtype dict + """ + props = {} + for prop in NORMAL_PROPS: + if msg.HasField(prop): + props[prop] = getattr(msg, prop) + if isinstance(props[prop], bytes): + props[prop] = bytes_to_str(props[prop]) + for prop in COMMIT_HOOK_PROPS: + if getattr(msg, 'has_' + prop): + props[prop] = self.decode_hooklist(getattr(msg, prop)) + for prop in MODFUN_PROPS: + if msg.HasField(prop): + props[prop] = self.decode_modfun(getattr(msg, prop)) + for prop in QUORUM_PROPS: + if msg.HasField(prop): + props[prop] = self.decode_quorum(getattr(msg, prop)) + if msg.HasField('repl'): + props['repl'] = REPL_TO_PY[msg.repl] + return props + + def decode_modfun(self, modfun): + """ + Decodes a protobuf modfun pair into a dict with 'mod' and + 'fun' keys. Used in bucket properties. + + :param modfun: the protobuf message to decode + :type modfun: riak.pb.riak_pb2.RpbModFun + :rtype dict + """ + return {'mod': bytes_to_str(modfun.module), + 'fun': bytes_to_str(modfun.function)} + + def encode_modfun(self, props, msg=None): + """ + Encodes a dict with 'mod' and 'fun' keys into a protobuf + modfun pair. Used in bucket properties. + + :param props: the module/function pair + :type props: dict + :param msg: the protobuf message to fill + :type msg: riak.pb.riak_pb2.RpbModFun + :rtype riak.pb.riak_pb2.RpbModFun + """ + if msg is None: + msg = riak.pb.riak_pb2.RpbModFun() + msg.module = str_to_bytes(props['mod']) + msg.function = str_to_bytes(props['fun']) + return msg + + def decode_hooklist(self, hooklist): + """ + Decodes a list of protobuf commit hooks into their python + equivalents. Used in bucket properties. + + :param hooklist: a list of protobuf commit hooks + :type hooklist: list + :rtype list + """ + return [self.decode_hook(hook) for hook in hooklist] + + def encode_hooklist(self, hooklist, msg): + """ + Encodes a list of commit hooks into their protobuf equivalent. + Used in bucket properties. + + :param hooklist: a list of commit hooks + :type hooklist: list + :param msg: a protobuf field that is a list of commit hooks + """ + for hook in hooklist: + pbhook = msg.add() + self.encode_hook(hook, pbhook) + + def decode_hook(self, hook): + """ + Decodes a protobuf commit hook message into a dict. Used in + bucket properties. + + :param hook: the hook to decode + :type hook: riak.pb.riak_pb2.RpbCommitHook + :rtype dict + """ + if hook.HasField('modfun'): + return self.decode_modfun(hook.modfun) + else: + return {'name': bytes_to_str(hook.name)} + + def encode_hook(self, hook, msg): + """ + Encodes a commit hook dict into the protobuf message. Used in + bucket properties. + + :param hook: the hook to encode + :type hook: dict + :param msg: the protobuf message to fill + :type msg: riak.pb.riak_pb2.RpbCommitHook + :rtype riak.pb.riak_pb2.RpbCommitHook + """ + if 'name' in hook: + msg.name = str_to_bytes(hook['name']) + else: + self.encode_modfun(hook, msg.modfun) + return msg + + def encode_index_req(self, bucket, index, startkey, endkey=None, + return_terms=None, max_results=None, + continuation=None, timeout=None, term_regex=None, + streaming=False): + """ + Encodes a secondary index request into the protobuf message. + + :param bucket: the bucket whose index to query + :type bucket: string + :param index: the index to query + :type index: string + :param startkey: the value or beginning of the range + :type startkey: integer, string + :param endkey: the end of the range + :type endkey: integer, string + :param return_terms: whether to return the index term with the key + :type return_terms: bool + :param max_results: the maximum number of results to return (page size) + :type max_results: integer + :param continuation: the opaque continuation returned from a + previous paginated request + :type continuation: string + :param timeout: a timeout value in milliseconds, or 'infinity' + :type timeout: int + :param term_regex: a regular expression used to filter index terms + :type term_regex: string + :param streaming: encode as streaming request + :type streaming: bool + :rtype riak.pb.riak_kv_pb2.RpbIndexReq + """ + req = riak.pb.riak_kv_pb2.RpbIndexReq( + bucket=str_to_bytes(bucket.name), + index=str_to_bytes(index)) + self._add_bucket_type(req, bucket.bucket_type) + if endkey is not None: + req.qtype = riak.pb.riak_kv_pb2.RpbIndexReq.range + req.range_min = str_to_bytes(str(startkey)) + req.range_max = str_to_bytes(str(endkey)) + else: + req.qtype = riak.pb.riak_kv_pb2.RpbIndexReq.eq + req.key = str_to_bytes(str(startkey)) + if return_terms is not None: + req.return_terms = return_terms + if max_results: + req.max_results = max_results + if continuation: + req.continuation = str_to_bytes(continuation) + if timeout: + if timeout == 'infinity': + req.timeout = 0 + else: + req.timeout = timeout + if term_regex: + req.term_regex = str_to_bytes(term_regex) + req.stream = streaming + mc = riak.pb.messages.MSG_CODE_INDEX_REQ + rc = riak.pb.messages.MSG_CODE_INDEX_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_index_req(self, resp, index, + return_terms=None, max_results=None): + if return_terms and resp.results: + results = [(decode_index_value(index, pair.key), + bytes_to_str(pair.value)) + for pair in resp.results] + else: + results = resp.keys[:] + if six.PY3: + results = [bytes_to_str(key) for key in resp.keys] + + if max_results is not None and resp.HasField('continuation'): + return (results, bytes_to_str(resp.continuation)) + else: + return (results, None) + + def decode_search_index(self, index): + """ + Fills an RpbYokozunaIndex message with the appropriate data. + + :param index: a yz index message + :type index: riak.pb.riak_yokozuna_pb2.RpbYokozunaIndex + :rtype dict + """ + result = {} + result['name'] = bytes_to_str(index.name) + if index.HasField('schema'): + result['schema'] = bytes_to_str(index.schema) + if index.HasField('n_val'): + result['n_val'] = index.n_val + return result + + def _add_bucket_type(self, req, bucket_type): + if bucket_type and not bucket_type.is_default(): + if not self._bucket_types: + raise NotImplementedError( + 'Server does not support bucket-types') + req.type = str_to_bytes(bucket_type.name) + + def encode_search_query(self, req, **kwargs): + if 'rows' in kwargs: + req.rows = kwargs['rows'] + if 'start' in kwargs: + req.start = kwargs['start'] + if 'sort' in kwargs: + req.sort = str_to_bytes(kwargs['sort']) + if 'filter' in kwargs: + req.filter = str_to_bytes(kwargs['filter']) + if 'df' in kwargs: + req.df = str_to_bytes(kwargs['df']) + if 'op' in kwargs: + req.op = str_to_bytes(kwargs['op']) + if 'q.op' in kwargs: + req.op = kwargs['q.op'] + if 'fl' in kwargs: + if isinstance(kwargs['fl'], list): + req.fl.extend([str_to_bytes(fl) for fl in kwargs['fl']]) + else: + req.fl.append(str_to_bytes(kwargs['fl'])) + if 'presort' in kwargs: + req.presort = kwargs['presort'] + + def decode_search_doc(self, doc): + resultdoc = MultiDict() + for pair in doc.fields: + if six.PY2: + ukey = unicode(pair.key, 'utf-8') # noqa + uval = unicode(pair.value, 'utf-8') # noqa + else: + ukey = bytes_to_str(pair.key) + uval = bytes_to_str(pair.value) + resultdoc.add(ukey, uval) + return resultdoc.mixed() + + def decode_dt_fetch(self, resp): + dtype = DT_FETCH_TYPES.get(resp.type) + if dtype is None: + raise ValueError("Unknown datatype on wire: {}".format(resp.type)) + + value = self.decode_dt_value(dtype, resp.value) + + if resp.HasField('context'): + context = resp.context[:] + else: + context = None + + return dtype, value, context + + def decode_dt_value(self, dtype, msg): + if dtype == 'counter': + return msg.counter_value + elif dtype == 'set': + return self.decode_set_value(msg.set_value) + elif dtype == 'hll': + return self.decode_hll_value(msg.hll_value) + elif dtype == 'map': + return self.decode_map_value(msg.map_value) + + def encode_dt_options(self, req, **kwargs): + for q in ['r', 'pr', 'w', 'dw', 'pw']: + if q in kwargs and kwargs[q] is not None: + setattr(req, q, self.encode_quorum(kwargs[q])) + + for o in ['basic_quorum', 'notfound_ok', 'timeout', 'return_body', + 'include_context']: + if o in kwargs and kwargs[o] is not None: + setattr(req, o, kwargs[o]) + + def decode_map_value(self, entries): + out = {} + for entry in entries: + name = bytes_to_str(entry.field.name[:]) + dtype = MAP_FIELD_TYPES[entry.field.type] + if dtype == 'counter': + value = entry.counter_value + elif dtype == 'set': + value = self.decode_set_value(entry.set_value) + elif dtype == 'register': + value = bytes_to_str(entry.register_value[:]) + elif dtype == 'flag': + value = entry.flag_value + elif dtype == 'map': + value = self.decode_map_value(entry.map_value) + else: + raise ValueError( + 'Map may not contain datatype: {}' + .format(dtype)) + out[(name, dtype)] = value + return out + + def decode_set_value(self, set_value): + return [bytes_to_str(string[:]) for string in set_value] + + def decode_hll_value(self, hll_value): + return int(hll_value) + + def encode_dt_op(self, dtype, req, op): + if dtype == 'counter': + req.op.counter_op.increment = op[1] + elif dtype == 'set': + self.encode_set_op(req.op, op) + elif dtype == 'hll': + self.encode_hll_op(req.op, op) + elif dtype == 'map': + self.encode_map_op(req.op.map_op, op) + else: + raise TypeError("Cannot send operation on datatype {!r}". + format(dtype)) + + def encode_set_op(self, msg, op): + if 'adds' in op: + msg.set_op.adds.extend(str_to_bytes(op['adds'])) + if 'removes' in op: + msg.set_op.removes.extend(str_to_bytes(op['removes'])) + + def encode_hll_op(self, msg, op): + if 'adds' in op: + msg.hll_op.adds.extend(str_to_bytes(op['adds'])) + + def encode_map_op(self, msg, ops): + for op in ops: + name, dtype = op[1] + ftype = MAP_FIELD_TYPES[dtype] + if op[0] == 'add': + add = msg.adds.add() + add.name = str_to_bytes(name) + add.type = ftype + elif op[0] == 'remove': + remove = msg.removes.add() + remove.name = str_to_bytes(name) + remove.type = ftype + elif op[0] == 'update': + update = msg.updates.add() + update.field.name = str_to_bytes(name) + update.field.type = ftype + self.encode_map_update(dtype, update, op[2]) + + def encode_map_update(self, dtype, msg, op): + if dtype == 'counter': + # ('increment', some_int) + msg.counter_op.increment = op[1] + elif dtype == 'set': + self.encode_set_op(msg, op) + elif dtype == 'map': + self.encode_map_op(msg.map_op, op) + elif dtype == 'register': + # ('assign', some_str) + msg.register_op = str_to_bytes(op[1]) + elif dtype == 'flag': + if op == 'enable': + msg.flag_op = riak.pb.riak_dt_pb2.MapUpdate.ENABLE + else: + msg.flag_op = riak.pb.riak_dt_pb2.MapUpdate.DISABLE + else: + raise ValueError( + 'Map may not contain datatype: {}' + .format(dtype)) + + def encode_to_ts_cell(self, cell, ts_cell): + if cell is not None: + if isinstance(cell, datetime.datetime): + ts_cell.timestamp_value = unix_time_millis(cell) + elif isinstance(cell, bool): + ts_cell.boolean_value = cell + elif isinstance(cell, six.binary_type): + ts_cell.varchar_value = cell + elif isinstance(cell, six.text_type): + ts_cell.varchar_value = str_to_bytes(cell) + elif isinstance(cell, six.string_types): + ts_cell.varchar_value = str_to_bytes(cell) + elif (isinstance(cell, six.integer_types)): + ts_cell.sint64_value = cell + elif isinstance(cell, float): + ts_cell.double_value = cell + else: + t = type(cell) + raise RiakError("can't serialize type '{}', value '{}'" + .format(t, cell)) + + def encode_timeseries_keyreq(self, table, key, is_delete=False): + key_vals = None + if isinstance(key, list): + key_vals = key + else: + raise ValueError("key must be a list") + + req = riak.pb.riak_ts_pb2.TsGetReq() + mc = riak.pb.messages.MSG_CODE_TS_GET_REQ + rc = riak.pb.messages.MSG_CODE_TS_GET_RESP + if is_delete: + req = riak.pb.riak_ts_pb2.TsDelReq() + mc = riak.pb.messages.MSG_CODE_TS_DEL_REQ + rc = riak.pb.messages.MSG_CODE_TS_DEL_RESP + + req.table = str_to_bytes(table.name) + for cell in key_vals: + ts_cell = req.key.add() + self.encode_to_ts_cell(cell, ts_cell) + return Msg(mc, req.SerializeToString(), rc) + + def encode_timeseries_listkeysreq(self, table, timeout=None): + req = riak.pb.riak_ts_pb2.TsListKeysReq() + req.table = str_to_bytes(table.name) + if self._client_timeouts and timeout: + req.timeout = timeout + mc = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_REQ + rc = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_RESP + return Msg(mc, req.SerializeToString(), rc) + + def validate_timeseries_put_resp(self, resp_code, resp): + if resp is not None: + return True + else: + raise RiakError("missing response object") + + def encode_timeseries_put(self, tsobj): + """ + Fills an TsPutReq message with the appropriate data and + metadata from a TsObject. + + :param tsobj: a TsObject + :type tsobj: TsObject + :param req: the protobuf message to fill + :type req: riak.pb.riak_ts_pb2.TsPutReq + """ + req = riak.pb.riak_ts_pb2.TsPutReq() + req.table = str_to_bytes(tsobj.table.name) + + if tsobj.columns: + raise NotImplementedError("columns are not implemented yet") + + if tsobj.rows and isinstance(tsobj.rows, list): + for row in tsobj.rows: + tsr = req.rows.add() # NB: type TsRow + if not isinstance(row, list): + raise ValueError("TsObject row must be a list of values") + for cell in row: + tsc = tsr.cells.add() # NB: type TsCell + self.encode_to_ts_cell(cell, tsc) + else: + raise RiakError("TsObject requires a list of rows") + + mc = riak.pb.messages.MSG_CODE_TS_PUT_REQ + rc = riak.pb.messages.MSG_CODE_TS_PUT_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_timeseries_query(self, table, query, interpolations=None): + req = riak.pb.riak_ts_pb2.TsQueryReq() + q = query + if '{table}' in q: + q = q.format(table=table.name) + req.query.base = str_to_bytes(q) + mc = riak.pb.messages.MSG_CODE_TS_QUERY_REQ + rc = riak.pb.messages.MSG_CODE_TS_QUERY_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_timeseries(self, resp, tsobj, + convert_timestamp=False): + """ + Fills an TsObject with the appropriate data and + metadata from a TsGetResp / TsQueryResp. + + :param resp: the protobuf message from which to process data + :type resp: riak.pb.riak_ts_pb2.TsQueryRsp or + riak.pb.riak_ts_pb2.TsGetResp + :param tsobj: a TsObject + :type tsobj: TsObject + :param convert_timestamp: Convert timestamps to datetime objects + :type tsobj: boolean + """ + if resp.columns is not None: + col_names = [] + col_types = [] + for col in resp.columns: + col_names.append(bytes_to_str(col.name)) + col_type = self.decode_timeseries_col_type(col.type) + col_types.append(col_type) + tsobj.columns = TsColumns(col_names, col_types) + + tsobj.rows = [] + if resp.rows is not None: + for row in resp.rows: + tsobj.rows.append( + self.decode_timeseries_row( + row, resp.columns, convert_timestamp)) + + def decode_timeseries_col_type(self, col_type): + # NB: these match the atom names for column types + if col_type == TsColumnType.Value('VARCHAR'): + return 'varchar' + elif col_type == TsColumnType.Value('SINT64'): + return 'sint64' + elif col_type == TsColumnType.Value('DOUBLE'): + return 'double' + elif col_type == TsColumnType.Value('TIMESTAMP'): + return 'timestamp' + elif col_type == TsColumnType.Value('BOOLEAN'): + return 'boolean' + elif col_type == TsColumnType.Value('BLOB'): + return 'blob' + else: + msg = 'could not decode column type: {}'.format(col_type) + raise RiakError(msg) + + def decode_timeseries_row(self, tsrow, tscols=None, + convert_timestamp=False): + """ + Decodes a TsRow into a list + + :param tsrow: the protobuf TsRow to decode. + :type tsrow: riak.pb.riak_ts_pb2.TsRow + :param tscols: the protobuf TsColumn data to help decode. + :type tscols: list + :rtype list + """ + row = [] + for i, cell in enumerate(tsrow.cells): + col = None + if tscols is not None: + col = tscols[i] + if cell.HasField('varchar_value'): + if col and not (col.type == TsColumnType.Value('VARCHAR') or + col.type == TsColumnType.Value('BLOB')): + raise TypeError('expected VARCHAR or BLOB column') + else: + row.append(cell.varchar_value) + elif cell.HasField('sint64_value'): + if col and col.type != TsColumnType.Value('SINT64'): + raise TypeError('expected SINT64 column') + else: + row.append(cell.sint64_value) + elif cell.HasField('double_value'): + if col and col.type != TsColumnType.Value('DOUBLE'): + raise TypeError('expected DOUBLE column') + else: + row.append(cell.double_value) + elif cell.HasField('timestamp_value'): + if col and col.type != TsColumnType.Value('TIMESTAMP'): + raise TypeError('expected TIMESTAMP column') + else: + dt = cell.timestamp_value + if convert_timestamp: + dt = datetime_from_unix_time_millis( + cell.timestamp_value) + row.append(dt) + elif cell.HasField('boolean_value'): + if col and col.type != TsColumnType.Value('BOOLEAN'): + raise TypeError('expected BOOLEAN column') + else: + row.append(cell.boolean_value) + else: + row.append(None) + return row + + def decode_preflist(self, item): + """ + Decodes a preflist response + + :param preflist: a bucket/key preflist + :type preflist: list of + riak.pb.riak_kv_pb2.RpbBucketKeyPreflistItem + :rtype dict + """ + result = {'partition': item.partition, + 'node': bytes_to_str(item.node), + 'primary': item. primary} + return result + + def encode_get(self, robj, r=None, pr=None, timeout=None, + basic_quorum=None, notfound_ok=None, + head_only=False): + bucket = robj.bucket + req = riak.pb.riak_kv_pb2.RpbGetReq() + if r: + req.r = self.encode_quorum(r) + if self._quorum_controls: + if pr: + req.pr = self.encode_quorum(pr) + if basic_quorum is not None: + req.basic_quorum = basic_quorum + if notfound_ok is not None: + req.notfound_ok = notfound_ok + if self._client_timeouts and timeout: + req.timeout = timeout + if self._tombstone_vclocks: + req.deletedvclock = True + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + req.key = str_to_bytes(robj.key) + req.head = head_only + mc = riak.pb.messages.MSG_CODE_GET_REQ + rc = riak.pb.messages.MSG_CODE_GET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_put(self, robj, w=None, dw=None, pw=None, + return_body=True, if_none_match=False, + timeout=None): + bucket = robj.bucket + req = riak.pb.riak_kv_pb2.RpbPutReq() + if w: + req.w = self.encode_quorum(w) + if dw: + req.dw = self.encode_quorum(dw) + if self._quorum_controls and pw: + req.pw = self.encode_quorum(pw) + if return_body: + req.return_body = 1 + if if_none_match: + req.if_none_match = 1 + if self._client_timeouts and timeout: + req.timeout = timeout + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + if robj.key: + req.key = str_to_bytes(robj.key) + if robj.vclock: + req.vclock = robj.vclock.encode('binary') + self.encode_content(robj, req.content) + mc = riak.pb.messages.MSG_CODE_PUT_REQ + rc = riak.pb.messages.MSG_CODE_PUT_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_get(self, robj, resp): + if resp is not None: + if resp.HasField('vclock'): + robj.vclock = VClock(resp.vclock, 'binary') + # We should do this even if there are no contents, i.e. + # the object is tombstoned + self.decode_contents(resp.content, robj) + else: + # "not found" returns an empty message, + # so let's make sure to clear the siblings + robj.siblings = [] + return robj + + def decode_put(self, robj, resp): + if resp is not None: + if resp.HasField('key'): + robj.key = bytes_to_str(resp.key) + if resp.HasField("vclock"): + robj.vclock = VClock(resp.vclock, 'binary') + if resp.content: + self.decode_contents(resp.content, robj) + elif not robj.key: + raise RiakError("missing response object") + return robj + + def encode_delete(self, robj, rw=None, r=None, + w=None, dw=None, pr=None, pw=None, + timeout=None): + req = riak.pb.riak_kv_pb2.RpbDelReq() + if rw: + req.rw = self.encode_quorum(rw) + if r: + req.r = self.encode_quorum(r) + if w: + req.w = self.encode_quorum(w) + if dw: + req.dw = self.encode_quorum(dw) + + if self._quorum_controls: + if pr: + req.pr = self.encode_quorum(pr) + if pw: + req.pw = self.encode_quorum(pw) + + if self._client_timeouts and timeout: + req.timeout = timeout + + use_vclocks = (self._tombstone_vclocks and + hasattr(robj, 'vclock') and robj.vclock) + if use_vclocks: + req.vclock = robj.vclock.encode('binary') + + bucket = robj.bucket + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + req.key = str_to_bytes(robj.key) + mc = riak.pb.messages.MSG_CODE_DEL_REQ + rc = riak.pb.messages.MSG_CODE_DEL_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_stream_keys(self, bucket, timeout=None): + req = riak.pb.riak_kv_pb2.RpbListKeysReq() + req.bucket = str_to_bytes(bucket.name) + if self._client_timeouts and timeout: + req.timeout = timeout + self._add_bucket_type(req, bucket.bucket_type) + mc = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ + rc = riak.pb.messages.MSG_CODE_LIST_KEYS_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_get_keys(self, stream): + keys = [] + for keylist in stream: + for key in keylist: + keys.append(bytes_to_str(key)) + return keys + + def decode_get_server_info(self, resp): + return {'node': bytes_to_str(resp.node), + 'server_version': bytes_to_str(resp.server_version)} + + def encode_get_client_id(self): + mc = riak.pb.messages.MSG_CODE_GET_CLIENT_ID_REQ + rc = riak.pb.messages.MSG_CODE_GET_CLIENT_ID_RESP + return Msg(mc, None, rc) + + def decode_get_client_id(self, resp): + return bytes_to_str(resp.client_id) + + def encode_set_client_id(self, client_id): + req = riak.pb.riak_kv_pb2.RpbSetClientIdReq() + req.client_id = str_to_bytes(client_id) + mc = riak.pb.messages.MSG_CODE_SET_CLIENT_ID_REQ + rc = riak.pb.messages.MSG_CODE_SET_CLIENT_ID_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_get_buckets(self, bucket_type, + timeout=None, streaming=False): + # Bucket streaming landed in the same release as timeouts, so + # we don't need to check the capability. + req = riak.pb.riak_kv_pb2.RpbListBucketsReq() + req.stream = streaming + self._add_bucket_type(req, bucket_type) + if self._client_timeouts and timeout: + req.timeout = timeout + mc = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ + rc = riak.pb.messages.MSG_CODE_LIST_BUCKETS_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_get_bucket_props(self, bucket): + req = riak.pb.riak_pb2.RpbGetBucketReq() + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + mc = riak.pb.messages.MSG_CODE_GET_BUCKET_REQ + rc = riak.pb.messages.MSG_CODE_GET_BUCKET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_set_bucket_props(self, bucket, props): + req = riak.pb.riak_pb2.RpbSetBucketReq() + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + self.encode_bucket_props(props, req) + mc = riak.pb.messages.MSG_CODE_SET_BUCKET_REQ + rc = riak.pb.messages.MSG_CODE_SET_BUCKET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_clear_bucket_props(self, bucket): + req = riak.pb.riak_pb2.RpbResetBucketReq() + req.bucket = str_to_bytes(bucket.name) + self._add_bucket_type(req, bucket.bucket_type) + mc = riak.pb.messages.MSG_CODE_RESET_BUCKET_REQ + rc = riak.pb.messages.MSG_CODE_RESET_BUCKET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_get_bucket_type_props(self, bucket_type): + req = riak.pb.riak_pb2.RpbGetBucketTypeReq() + req.type = str_to_bytes(bucket_type.name) + mc = riak.pb.messages.MSG_CODE_GET_BUCKET_TYPE_REQ + rc = riak.pb.messages.MSG_CODE_GET_BUCKET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_set_bucket_type_props(self, bucket_type, props): + req = riak.pb.riak_pb2.RpbSetBucketTypeReq() + req.type = str_to_bytes(bucket_type.name) + self.encode_bucket_props(props, req) + mc = riak.pb.messages.MSG_CODE_SET_BUCKET_TYPE_REQ + rc = riak.pb.messages.MSG_CODE_SET_BUCKET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_stream_mapred(self, content): + req = riak.pb.riak_kv_pb2.RpbMapRedReq() + req.request = str_to_bytes(content) + req.content_type = str_to_bytes("application/json") + mc = riak.pb.messages.MSG_CODE_MAP_RED_REQ + rc = riak.pb.messages.MSG_CODE_MAP_RED_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_create_search_index(self, index, schema=None, + n_val=None, timeout=None): + index = str_to_bytes(index) + idx = riak.pb.riak_yokozuna_pb2.RpbYokozunaIndex(name=index) + if schema: + idx.schema = str_to_bytes(schema) + if n_val: + idx.n_val = n_val + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexPutReq(index=idx) + if timeout is not None: + req.timeout = timeout + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_PUT_REQ + rc = riak.pb.messages.MSG_CODE_PUT_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_get_search_index(self, index): + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexGetReq( + name=str_to_bytes(index)) + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_REQ + rc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_list_search_indexes(self): + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexGetReq() + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_REQ + rc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_delete_search_index(self, index): + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexDeleteReq( + name=str_to_bytes(index)) + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ + rc = riak.pb.messages.MSG_CODE_DEL_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_create_search_schema(self, schema, content): + scma = riak.pb.riak_yokozuna_pb2.RpbYokozunaSchema( + name=str_to_bytes(schema), + content=str_to_bytes(content)) + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaSchemaPutReq( + schema=scma) + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ + rc = riak.pb.messages.MSG_CODE_PUT_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_get_search_schema(self, schema): + req = riak.pb.riak_yokozuna_pb2.RpbYokozunaSchemaGetReq( + name=str_to_bytes(schema)) + mc = riak.pb.messages.MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ + rc = riak.pb.messages.MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_get_search_schema(self, resp): + result = {} + result['name'] = bytes_to_str(resp.schema.name) + result['content'] = bytes_to_str(resp.schema.content) + return result + + def encode_search(self, index, query, **kwargs): + req = riak.pb.riak_search_pb2.RpbSearchQueryReq( + index=str_to_bytes(index), + q=str_to_bytes(query)) + self.encode_search_query(req, **kwargs) + mc = riak.pb.messages.MSG_CODE_SEARCH_QUERY_REQ + rc = riak.pb.messages.MSG_CODE_SEARCH_QUERY_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_search(self, resp): + result = {} + if resp.HasField('max_score'): + result['max_score'] = resp.max_score + if resp.HasField('num_found'): + result['num_found'] = resp.num_found + result['docs'] = [self.decode_search_doc(doc) for doc in resp.docs] + return result + + def encode_get_counter(self, bucket, key, **kwargs): + req = riak.pb.riak_kv_pb2.RpbCounterGetReq() + req.bucket = str_to_bytes(bucket.name) + req.key = str_to_bytes(key) + if kwargs.get('r') is not None: + req.r = self.encode_quorum(kwargs['r']) + if kwargs.get('pr') is not None: + req.pr = self.encode_quorum(kwargs['pr']) + if kwargs.get('basic_quorum') is not None: + req.basic_quorum = kwargs['basic_quorum'] + if kwargs.get('notfound_ok') is not None: + req.notfound_ok = kwargs['notfound_ok'] + mc = riak.pb.messages.MSG_CODE_COUNTER_GET_REQ + rc = riak.pb.messages.MSG_CODE_COUNTER_GET_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_update_counter(self, bucket, key, value, **kwargs): + req = riak.pb.riak_kv_pb2.RpbCounterUpdateReq() + req.bucket = str_to_bytes(bucket.name) + req.key = str_to_bytes(key) + req.amount = value + if kwargs.get('w') is not None: + req.w = self.encode_quorum(kwargs['w']) + if kwargs.get('dw') is not None: + req.dw = self.encode_quorum(kwargs['dw']) + if kwargs.get('pw') is not None: + req.pw = self.encode_quorum(kwargs['pw']) + if kwargs.get('returnvalue') is not None: + req.returnvalue = kwargs['returnvalue'] + mc = riak.pb.messages.MSG_CODE_COUNTER_UPDATE_REQ + rc = riak.pb.messages.MSG_CODE_COUNTER_UPDATE_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_fetch_datatype(self, bucket, key, **kwargs): + req = riak.pb.riak_dt_pb2.DtFetchReq() + req.type = str_to_bytes(bucket.bucket_type.name) + req.bucket = str_to_bytes(bucket.name) + req.key = str_to_bytes(key) + self.encode_dt_options(req, **kwargs) + mc = riak.pb.messages.MSG_CODE_DT_FETCH_REQ + rc = riak.pb.messages.MSG_CODE_DT_FETCH_RESP + return Msg(mc, req.SerializeToString(), rc) + + def encode_update_datatype(self, datatype, **kwargs): + op = datatype.to_op() + type_name = datatype.type_name + if not op: + raise ValueError("No operation to send on datatype {!r}". + format(datatype)) + req = riak.pb.riak_dt_pb2.DtUpdateReq() + req.bucket = str_to_bytes(datatype.bucket.name) + req.type = str_to_bytes(datatype.bucket.bucket_type.name) + if datatype.key: + req.key = str_to_bytes(datatype.key) + if datatype._context: + req.context = datatype._context + self.encode_dt_options(req, **kwargs) + self.encode_dt_op(type_name, req, op) + mc = riak.pb.messages.MSG_CODE_DT_UPDATE_REQ + rc = riak.pb.messages.MSG_CODE_DT_UPDATE_RESP + return Msg(mc, req.SerializeToString(), rc) + + def decode_update_datatype(self, datatype, resp, **kwargs): + type_name = datatype.type_name + if resp.HasField('key'): + datatype.key = resp.key[:] + if resp.HasField('context'): + datatype._context = resp.context[:] + if kwargs.get('return_body'): + datatype._set_value(self.decode_dt_value(type_name, resp)) + + def encode_get_preflist(self, bucket, key): + req = riak.pb.riak_kv_pb2.RpbGetBucketKeyPreflistReq() + req.bucket = str_to_bytes(bucket.name) + req.key = str_to_bytes(key) + req.type = str_to_bytes(bucket.bucket_type.name) + mc = riak.pb.messages.MSG_CODE_GET_BUCKET_KEY_PREFLIST_REQ + rc = riak.pb.messages.MSG_CODE_GET_BUCKET_KEY_PREFLIST_RESP + return Msg(mc, req.SerializeToString(), rc) diff --git a/riak/codecs/ttb.py b/riak/codecs/ttb.py new file mode 100644 index 00000000..70a8b6fc --- /dev/null +++ b/riak/codecs/ttb.py @@ -0,0 +1,228 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import six + +from erlastic import encode, decode +from erlastic.types import Atom + +from riak import RiakError +from riak.codecs import Codec, Msg +from riak.pb.messages import MSG_CODE_TS_TTB_MSG +from riak.ts_object import TsColumns +from riak.util import bytes_to_str, unix_time_millis, \ + datetime_from_unix_time_millis + +udef_a = Atom('undefined') + +rpberrorresp_a = Atom('rpberrorresp') +tsgetreq_a = Atom('tsgetreq') +tsgetresp_a = Atom('tsgetresp') +tsqueryreq_a = Atom('tsqueryreq') +tsqueryresp_a = Atom('tsqueryresp') +tsinterpolation_a = Atom('tsinterpolation') +tsputreq_a = Atom('tsputreq') +tsputresp_a = Atom('tsputresp') +tsdelreq_a = Atom('tsdelreq') +timestamp_a = Atom('timestamp') + + +class TtbCodec(Codec): + ''' + Erlang term-to-binary Encoding and decoding methods for TcpTransport + ''' + + def __init__(self, **unused_args): + super(TtbCodec, self).__init__(**unused_args) + + def parse_msg(self, msg_code, data): + if msg_code != MSG_CODE_TS_TTB_MSG: + raise RiakError("TTB can't parse code: {}".format(msg_code)) + if len(data) > 0: + decoded = decode(data) + self.maybe_err_ttb(decoded) + return decoded + else: + return None + + def maybe_err_ttb(self, err_ttb): + resp_a = err_ttb[0] + if resp_a == rpberrorresp_a: + errmsg = err_ttb[1] + # errcode = err_ttb[2] + raise RiakError(bytes_to_str(errmsg)) + + def encode_to_ts_cell(self, cell): + if cell is None: + return [] + else: + if isinstance(cell, datetime.datetime): + ts = unix_time_millis(cell) + # logging.debug('encoded datetime %s as %s', cell, ts) + return ts + elif isinstance(cell, bool): + return cell + elif isinstance(cell, six.text_type) or \ + isinstance(cell, six.binary_type) or \ + isinstance(cell, six.string_types): + return cell + elif (isinstance(cell, six.integer_types)): + return cell + elif isinstance(cell, float): + return cell + else: + t = type(cell) + raise RiakError("can't serialize type '{}', value '{}'" + .format(t, cell)) + + def encode_timeseries_keyreq(self, table, key, is_delete=False): + key_vals = None + if isinstance(key, list): + key_vals = key + else: + raise ValueError("key must be a list") + + mc = MSG_CODE_TS_TTB_MSG + rc = MSG_CODE_TS_TTB_MSG + req_atom = tsgetreq_a + if is_delete: + req_atom = tsdelreq_a + + # TODO FUTURE add timeout as last param + req = req_atom, table.name, \ + [self.encode_to_ts_cell(k) for k in key_vals], udef_a + return Msg(mc, encode(req), rc) + + def validate_timeseries_put_resp(self, resp_code, resp): + if resp is None and resp_code == MSG_CODE_TS_TTB_MSG: + return True + if resp is not None: + return True + else: + raise RiakError("missing response object") + + def encode_timeseries_put(self, tsobj): + ''' + Returns an Erlang-TTB encoded tuple with the appropriate data and + metadata from a TsObject. + + :param tsobj: a TsObject + :type tsobj: TsObject + :rtype: term-to-binary encoded object + ''' + if tsobj.columns: + raise NotImplementedError('columns are not used') + + if tsobj.rows and isinstance(tsobj.rows, list): + req_rows = [] + for row in tsobj.rows: + req_r = [] + for cell in row: + req_r.append(self.encode_to_ts_cell(cell)) + req_rows.append(tuple(req_r)) + req = tsputreq_a, tsobj.table.name, [], req_rows + mc = MSG_CODE_TS_TTB_MSG + rc = MSG_CODE_TS_TTB_MSG + return Msg(mc, encode(req), rc) + else: + raise RiakError("TsObject requires a list of rows") + + def encode_timeseries_query(self, table, query, interpolations=None): + q = query + if '{table}' in q: + q = q.format(table=table.name) + tsi = tsinterpolation_a, q, [] + req = tsqueryreq_a, tsi, False, udef_a + mc = MSG_CODE_TS_TTB_MSG + rc = MSG_CODE_TS_TTB_MSG + return Msg(mc, encode(req), rc) + + def decode_timeseries(self, resp_ttb, tsobj, + convert_timestamp=False): + """ + Fills an TsObject with the appropriate data and + metadata from a TTB-encoded TsGetResp / TsQueryResp. + + :param resp_ttb: the decoded TTB data + :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp + :param tsobj: a TsObject + :type tsobj: TsObject + :param convert_timestamp: Convert timestamps to datetime objects + :type tsobj: boolean + """ + if resp_ttb is None: + return tsobj + + self.maybe_err_ttb(resp_ttb) + + # NB: some queries return a BARE 'tsqueryresp' atom + # catch that here: + if resp_ttb == tsqueryresp_a: + return tsobj + + # The response atom is the first element in the response tuple + resp_a = resp_ttb[0] + if resp_a == tsputresp_a: + return + elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a: + resp_data = resp_ttb[1] + if len(resp_data) == 0: + return + elif len(resp_data) == 3: + resp_colnames = resp_data[0] + resp_coltypes = resp_data[1] + tsobj.columns = self.decode_timeseries_cols( + resp_colnames, resp_coltypes) + resp_rows = resp_data[2] + tsobj.rows = [] + for resp_row in resp_rows: + tsobj.rows.append( + self.decode_timeseries_row(resp_row, resp_coltypes, + convert_timestamp)) + else: + raise RiakError( + "Expected 3-tuple in response, got: {}".format(resp_data)) + else: + raise RiakError("Unknown TTB response type: {}".format(resp_a)) + + def decode_timeseries_cols(self, cnames, ctypes): + cnames = [bytes_to_str(cname) for cname in cnames] + ctypes = [str(ctype) for ctype in ctypes] + return TsColumns(cnames, ctypes) + + def decode_timeseries_row(self, tsrow, tsct, convert_timestamp=False): + """ + Decodes a TTB-encoded TsRow into a list + + :param tsrow: the TTB decoded TsRow to decode. + :type tsrow: TTB dncoded row + :param tsct: the TTB decoded column types (atoms). + :type tsct: list + :param convert_timestamp: Convert timestamps to datetime objects + :type tsobj: boolean + :rtype list + """ + row = [] + for i, cell in enumerate(tsrow): + if cell is None: + row.append(None) + elif isinstance(cell, list) and len(cell) == 0: + row.append(None) + else: + if convert_timestamp and tsct[i] == timestamp_a: + row.append(datetime_from_unix_time_millis(cell)) + else: + row.append(cell) + return row diff --git a/riak/codecs/util.py b/riak/codecs/util.py new file mode 100644 index 00000000..1fa492bb --- /dev/null +++ b/riak/codecs/util.py @@ -0,0 +1,24 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import riak.pb.messages + + +def parse_pbuf_msg(msg_code, data): + pbclass = riak.pb.messages.MESSAGE_CLASSES.get(msg_code, None) + if pbclass is None: + return None + pbo = pbclass() + pbo.ParseFromString(data) + return pbo diff --git a/riak/content.py b/riak/content.py index d885827b..6eb9e7df 100644 --- a/riak/content.py +++ b/riak/content.py @@ -1,20 +1,17 @@ -""" -Copyright 2013 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from riak import RiakError from six import string_types @@ -90,6 +87,8 @@ def _serialize(self, value): format(self.content_type)) def _deserialize(self, value): + if not value: + return value decoder = self._robject.bucket.get_decoder(self.content_type) if decoder: return decoder(value) diff --git a/riak/datatypes/__init__.py b/riak/datatypes/__init__.py index 8ffd49cf..87575a33 100644 --- a/riak/datatypes/__init__.py +++ b/riak/datatypes/__init__.py @@ -1,8 +1,18 @@ -#: A dict from :attr:`type names ` to the -#: class that implements them. This is used inside :class:`Map` to -#: initialize new values. -TYPES = {} +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .types import TYPES from .datatype import Datatype from .counter import Counter from .flag import Flag @@ -10,7 +20,8 @@ from .set import Set from .map import Map from .errors import ContextRequired +from .hll import Hll -__all__ = ['Datatype', 'Flag', 'Counter', 'Register', 'Set', 'Map', 'TYPES', - 'ContextRequired'] +__all__ = ['Datatype', 'TYPES', 'ContextRequired', + 'Flag', 'Counter', 'Register', 'Set', 'Map', 'Hll'] diff --git a/riak/datatypes/counter.py b/riak/datatypes/counter.py index af08df2f..d8c8fd24 100644 --- a/riak/datatypes/counter.py +++ b/riak/datatypes/counter.py @@ -1,4 +1,21 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + from riak.datatypes.datatype import Datatype +from riak.datatypes import TYPES class Counter(Datatype): @@ -53,9 +70,7 @@ def decrement(self, amount=1): self._increment -= amount def _check_type(self, new_value): - return (isinstance(new_value, int) or - isinstance(new_value, long)) + return isinstance(new_value, six.integer_types) -from riak.datatypes import TYPES TYPES['counter'] = Counter diff --git a/riak/datatypes/datatype.py b/riak/datatypes/datatype.py index 4aed67e4..2303dba7 100644 --- a/riak/datatypes/datatype.py +++ b/riak/datatypes/datatype.py @@ -1,4 +1,19 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from .errors import ContextRequired +from . import TYPES class Datatype(object): @@ -34,7 +49,7 @@ def value(self): which is unique for each datatype. **NB**: Do not use this property to mutate data, as it will not - have any effect. Use the methods of the individual type to effect + have any effect. Use the methods of the individual type to affect changes. This value is guaranteed to be independent of any internal data representation. """ @@ -212,5 +227,3 @@ def _require_context(self): """ if not self._context: raise ContextRequired() - -from . import TYPES diff --git a/riak/datatypes/errors.py b/riak/datatypes/errors.py index 4e68707f..16be5589 100644 --- a/riak/datatypes/errors.py +++ b/riak/datatypes/errors.py @@ -1,3 +1,17 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from riak import RiakError @@ -12,5 +26,5 @@ class ContextRequired(RiakError): "fetch the datatype first") def __init__(self, message=None): - super(ContextRequired, self).__init__(message - or self._default_message) + super(ContextRequired, self).__init__(message or + self._default_message) diff --git a/riak/datatypes/flag.py b/riak/datatypes/flag.py index 0b55f472..bfb869a2 100644 --- a/riak/datatypes/flag.py +++ b/riak/datatypes/flag.py @@ -1,4 +1,19 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from riak.datatypes.datatype import Datatype +from riak.datatypes import TYPES class Flag(Datatype): @@ -49,5 +64,4 @@ def _check_type(self, new_value): return isinstance(new_value, bool) -from riak.datatypes import TYPES TYPES['flag'] = Flag diff --git a/riak/datatypes/hll.py b/riak/datatypes/hll.py new file mode 100644 index 00000000..1d962731 --- /dev/null +++ b/riak/datatypes/hll.py @@ -0,0 +1,81 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from .datatype import Datatype +from riak.datatypes import TYPES + +__all__ = ['Hll'] + + +class Hll(Datatype): + """A convergent datatype representing a HyperLogLog set. + Currently strings are the only supported value type. + Example:: + + myhll.add('barista') + myhll.add('roaster') + myhll.add('brewer') + """ + + type_name = 'hll' + _type_error_msg = 'Hlls can only be integers' + + def _post_init(self): + self._adds = set() + + def _default_value(self): + return 0 + + @Datatype.modified.getter + def modified(self): + """ + Whether this HyperLogLog has staged adds. + """ + return len(self._adds) > 0 + + def to_op(self): + """ + Extracts the modification operation from the Hll. + + :rtype: dict, None + """ + if not self._adds: + return None + changes = {} + if self._adds: + changes['adds'] = list(self._adds) + return changes + + def add(self, element): + """ + Adds an element to the HyperLogLog. Datatype cardinality will + be updated when the object is saved. + + :param element: the element to add + :type element: str + """ + if not isinstance(element, six.string_types): + raise TypeError("Hll elements can only be strings") + self._adds.add(element) + + def _coerce_value(self, new_value): + return int(new_value) + + def _check_type(self, new_value): + return isinstance(new_value, six.integer_types) + + +TYPES['hll'] = Hll diff --git a/riak/datatypes/map.py b/riak/datatypes/map.py index e9460101..b5b790bf 100644 --- a/riak/datatypes/map.py +++ b/riak/datatypes/map.py @@ -1,6 +1,21 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from collections import Mapping from riak.util import lazy_property from .datatype import Datatype +from riak.datatypes import TYPES class TypedMapView(Mapping): @@ -238,13 +253,15 @@ def modified(self): """ Whether the map has staged local modifications. """ - is_modified = lambda x: x.modified - values_modified = [is_modified(self._value[v]) for v in self._value] - modified = (any(values_modified) or self._removes or self._updates) - if modified: + if self._removes: return True - else: - return False + for v in self._value: + if self._value[v].modified: + return True + for v in self._updates: + if self._updates[v].modified: + return True + return False def to_op(self): """ @@ -282,5 +299,4 @@ def _extract_updates(self, d): yield ('update', key, d[key].to_op()) -from riak.datatypes import TYPES TYPES['map'] = Map diff --git a/riak/datatypes/register.py b/riak/datatypes/register.py index fe231e64..247a2a52 100644 --- a/riak/datatypes/register.py +++ b/riak/datatypes/register.py @@ -1,6 +1,21 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from collections import Sized from riak.datatypes.datatype import Datatype from six import string_types +from riak.datatypes import TYPES class Register(Sized, Datatype): @@ -61,5 +76,4 @@ def _check_type(self, new_value): return isinstance(new_value, string_types) -from riak.datatypes import TYPES TYPES['register'] = Register diff --git a/riak/datatypes/set.py b/riak/datatypes/set.py index a2d5b1d9..19829cf3 100644 --- a/riak/datatypes/set.py +++ b/riak/datatypes/set.py @@ -1,6 +1,22 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import collections + from .datatype import Datatype from six import string_types +from riak.datatypes import TYPES __all__ = ['Set'] @@ -113,5 +129,4 @@ def _check_element(element): raise TypeError("Set elements can only be strings") -from riak.datatypes import TYPES TYPES['set'] = Set diff --git a/riak/datatypes/types.py b/riak/datatypes/types.py new file mode 100644 index 00000000..b9761294 --- /dev/null +++ b/riak/datatypes/types.py @@ -0,0 +1,18 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#: A dict from :attr:`type names ` to the +#: class that implements them. This is used inside :class:`Map` to +#: initialize new values. +TYPES = {} diff --git a/riak/mapreduce.py b/riak/mapreduce.py index c2b797e0..1b604663 100644 --- a/riak/mapreduce.py +++ b/riak/mapreduce.py @@ -1,28 +1,27 @@ -""" -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010 Rusty Klophaus +# Copyright 2010 Justin Sheehy +# Copyright 2009 Jay Baird +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function from collections import Iterable, namedtuple -from riak import RiakError from six import string_types, PY2 +import riak + + #: Links are just bucket/key/tag tuples, this class provides a #: backwards-compatible format: ``RiakLink(bucket, key, tag)`` RiakLink = namedtuple("RiakLink", ("bucket", "key", "tag")) @@ -66,6 +65,7 @@ def add(self, arg1, arg2=None, arg3=None, bucket_type=None): :type bucket_type: string, None :rtype: :class:`RiakMapReduce` """ + from riak.riak_object import RiakObject if (arg2 is None) and (arg3 is None): if isinstance(arg1, RiakObject): return self.add_object(arg1) @@ -128,8 +128,10 @@ def add_bucket(self, bucket, bucket_type=None): :type bucket_type: string, None :rtype: :class:`RiakMapReduce` """ + if not riak.disable_list_exceptions: + raise riak.ListError() self._input_mode = 'bucket' - if isinstance(bucket, RiakBucket): + if isinstance(bucket, riak.RiakBucket): if bucket.bucket_type.is_default(): self._inputs = {'bucket': bucket.name} else: @@ -308,19 +310,20 @@ def run(self, timeout=None): try: result = self._client.mapred(self._inputs, query, timeout) - except RiakError as e: + except riak.RiakError as e: if 'worker_startup_failed' in e.value: for phase in self._phases: if phase._language == 'erlang': if type(phase._function) is str: - raise RiakError('May have tried erlang strfun ' - 'when not allowed\n' - 'original error: ' + e.value) + raise riak.RiakError( + 'May have tried erlang strfun ' + 'when not allowed\n' + 'original error: ' + e.value) raise e # If the last phase is NOT a link phase, then return the result. - if not (link_results_flag - or isinstance(self._phases[-1], RiakLinkPhase)): + if not (link_results_flag or + isinstance(self._phases[-1], RiakLinkPhase)): return result # If there are no results, then return an empty list. @@ -780,6 +783,3 @@ def reduce(self, *args): """ mr = RiakMapReduce(self) return mr.reduce(*args) - -from riak.riak_object import RiakObject -from riak.bucket import RiakBucket diff --git a/riak/multidict.py b/riak/multidict.py index b13a65b2..0df28f80 100644 --- a/riak/multidict.py +++ b/riak/multidict.py @@ -1,3 +1,17 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # (c) 2005 Ian Bicking and contributors; written for Paste # (http://pythonpaste.org) Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license.php diff --git a/riak/node.py b/riak/node.py index 332dc654..eecffe69 100644 --- a/riak/node.py +++ b/riak/node.py @@ -1,22 +1,20 @@ -""" -Copyright 2012 Basho Technologies, Inc. +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" import math import time + from threading import RLock diff --git a/riak/pb/__init__.py b/riak/pb/__init__.py new file mode 100644 index 00000000..9b867bc5 --- /dev/null +++ b/riak/pb/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/riak/pb/messages.py b/riak/pb/messages.py new file mode 100644 index 00000000..b8f1e91e --- /dev/null +++ b/riak/pb/messages.py @@ -0,0 +1,187 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a generated file. DO NOT EDIT. + +""" +Constants and mappings between Riak protocol codes and messages. +""" + +import riak.pb.riak_dt_pb2 +import riak.pb.riak_kv_pb2 +import riak.pb.riak_pb2 +import riak.pb.riak_search_pb2 +import riak.pb.riak_ts_pb2 +import riak.pb.riak_yokozuna_pb2 + +# Protocol codes +MSG_CODE_ERROR_RESP = 0 +MSG_CODE_PING_REQ = 1 +MSG_CODE_PING_RESP = 2 +MSG_CODE_GET_CLIENT_ID_REQ = 3 +MSG_CODE_GET_CLIENT_ID_RESP = 4 +MSG_CODE_SET_CLIENT_ID_REQ = 5 +MSG_CODE_SET_CLIENT_ID_RESP = 6 +MSG_CODE_GET_SERVER_INFO_REQ = 7 +MSG_CODE_GET_SERVER_INFO_RESP = 8 +MSG_CODE_GET_REQ = 9 +MSG_CODE_GET_RESP = 10 +MSG_CODE_PUT_REQ = 11 +MSG_CODE_PUT_RESP = 12 +MSG_CODE_DEL_REQ = 13 +MSG_CODE_DEL_RESP = 14 +MSG_CODE_LIST_BUCKETS_REQ = 15 +MSG_CODE_LIST_BUCKETS_RESP = 16 +MSG_CODE_LIST_KEYS_REQ = 17 +MSG_CODE_LIST_KEYS_RESP = 18 +MSG_CODE_GET_BUCKET_REQ = 19 +MSG_CODE_GET_BUCKET_RESP = 20 +MSG_CODE_SET_BUCKET_REQ = 21 +MSG_CODE_SET_BUCKET_RESP = 22 +MSG_CODE_MAP_RED_REQ = 23 +MSG_CODE_MAP_RED_RESP = 24 +MSG_CODE_INDEX_REQ = 25 +MSG_CODE_INDEX_RESP = 26 +MSG_CODE_SEARCH_QUERY_REQ = 27 +MSG_CODE_SEARCH_QUERY_RESP = 28 +MSG_CODE_RESET_BUCKET_REQ = 29 +MSG_CODE_RESET_BUCKET_RESP = 30 +MSG_CODE_GET_BUCKET_TYPE_REQ = 31 +MSG_CODE_SET_BUCKET_TYPE_REQ = 32 +MSG_CODE_GET_BUCKET_KEY_PREFLIST_REQ = 33 +MSG_CODE_GET_BUCKET_KEY_PREFLIST_RESP = 34 +MSG_CODE_CS_BUCKET_REQ = 40 +MSG_CODE_CS_BUCKET_RESP = 41 +MSG_CODE_INDEX_BODY_RESP = 42 +MSG_CODE_COUNTER_UPDATE_REQ = 50 +MSG_CODE_COUNTER_UPDATE_RESP = 51 +MSG_CODE_COUNTER_GET_REQ = 52 +MSG_CODE_COUNTER_GET_RESP = 53 +MSG_CODE_YOKOZUNA_INDEX_GET_REQ = 54 +MSG_CODE_YOKOZUNA_INDEX_GET_RESP = 55 +MSG_CODE_YOKOZUNA_INDEX_PUT_REQ = 56 +MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ = 57 +MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ = 58 +MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP = 59 +MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ = 60 +MSG_CODE_COVERAGE_REQ = 70 +MSG_CODE_COVERAGE_RESP = 71 +MSG_CODE_DT_FETCH_REQ = 80 +MSG_CODE_DT_FETCH_RESP = 81 +MSG_CODE_DT_UPDATE_REQ = 82 +MSG_CODE_DT_UPDATE_RESP = 83 +MSG_CODE_TS_QUERY_REQ = 90 +MSG_CODE_TS_QUERY_RESP = 91 +MSG_CODE_TS_PUT_REQ = 92 +MSG_CODE_TS_PUT_RESP = 93 +MSG_CODE_TS_DEL_REQ = 94 +MSG_CODE_TS_DEL_RESP = 95 +MSG_CODE_TS_GET_REQ = 96 +MSG_CODE_TS_GET_RESP = 97 +MSG_CODE_TS_LIST_KEYS_REQ = 98 +MSG_CODE_TS_LIST_KEYS_RESP = 99 +MSG_CODE_TS_COVERAGE_REQ = 100 +MSG_CODE_TS_COVERAGE_RESP = 101 +MSG_CODE_TS_COVERAGE_ENTRY = 102 +MSG_CODE_TS_RANGE = 103 +MSG_CODE_TS_TTB_MSG = 104 +MSG_CODE_AUTH_REQ = 253 +MSG_CODE_AUTH_RESP = 254 +MSG_CODE_START_TLS = 255 + +# Mapping from code to protobuf class +MESSAGE_CLASSES = { + MSG_CODE_ERROR_RESP: riak.pb.riak_pb2.RpbErrorResp, + MSG_CODE_PING_REQ: None, + MSG_CODE_PING_RESP: None, + MSG_CODE_GET_CLIENT_ID_REQ: None, + MSG_CODE_GET_CLIENT_ID_RESP: riak.pb.riak_kv_pb2.RpbGetClientIdResp, + MSG_CODE_SET_CLIENT_ID_REQ: riak.pb.riak_kv_pb2.RpbSetClientIdReq, + MSG_CODE_SET_CLIENT_ID_RESP: None, + MSG_CODE_GET_SERVER_INFO_REQ: None, + MSG_CODE_GET_SERVER_INFO_RESP: riak.pb.riak_pb2.RpbGetServerInfoResp, + MSG_CODE_GET_REQ: riak.pb.riak_kv_pb2.RpbGetReq, + MSG_CODE_GET_RESP: riak.pb.riak_kv_pb2.RpbGetResp, + MSG_CODE_PUT_REQ: riak.pb.riak_kv_pb2.RpbPutReq, + MSG_CODE_PUT_RESP: riak.pb.riak_kv_pb2.RpbPutResp, + MSG_CODE_DEL_REQ: riak.pb.riak_kv_pb2.RpbDelReq, + MSG_CODE_DEL_RESP: None, + MSG_CODE_LIST_BUCKETS_REQ: riak.pb.riak_kv_pb2.RpbListBucketsReq, + MSG_CODE_LIST_BUCKETS_RESP: riak.pb.riak_kv_pb2.RpbListBucketsResp, + MSG_CODE_LIST_KEYS_REQ: riak.pb.riak_kv_pb2.RpbListKeysReq, + MSG_CODE_LIST_KEYS_RESP: riak.pb.riak_kv_pb2.RpbListKeysResp, + MSG_CODE_GET_BUCKET_REQ: riak.pb.riak_pb2.RpbGetBucketReq, + MSG_CODE_GET_BUCKET_RESP: riak.pb.riak_pb2.RpbGetBucketResp, + MSG_CODE_SET_BUCKET_REQ: riak.pb.riak_pb2.RpbSetBucketReq, + MSG_CODE_SET_BUCKET_RESP: None, + MSG_CODE_MAP_RED_REQ: riak.pb.riak_kv_pb2.RpbMapRedReq, + MSG_CODE_MAP_RED_RESP: riak.pb.riak_kv_pb2.RpbMapRedResp, + MSG_CODE_INDEX_REQ: riak.pb.riak_kv_pb2.RpbIndexReq, + MSG_CODE_INDEX_RESP: riak.pb.riak_kv_pb2.RpbIndexResp, + MSG_CODE_SEARCH_QUERY_REQ: riak.pb.riak_search_pb2.RpbSearchQueryReq, + MSG_CODE_SEARCH_QUERY_RESP: riak.pb.riak_search_pb2.RpbSearchQueryResp, + MSG_CODE_RESET_BUCKET_REQ: riak.pb.riak_pb2.RpbResetBucketReq, + MSG_CODE_RESET_BUCKET_RESP: None, + MSG_CODE_GET_BUCKET_TYPE_REQ: riak.pb.riak_pb2.RpbGetBucketTypeReq, + MSG_CODE_SET_BUCKET_TYPE_REQ: riak.pb.riak_pb2.RpbSetBucketTypeReq, + MSG_CODE_GET_BUCKET_KEY_PREFLIST_REQ: + riak.pb.riak_kv_pb2.RpbGetBucketKeyPreflistReq, + MSG_CODE_GET_BUCKET_KEY_PREFLIST_RESP: + riak.pb.riak_kv_pb2.RpbGetBucketKeyPreflistResp, + MSG_CODE_CS_BUCKET_REQ: riak.pb.riak_kv_pb2.RpbCSBucketReq, + MSG_CODE_CS_BUCKET_RESP: riak.pb.riak_kv_pb2.RpbCSBucketResp, + MSG_CODE_INDEX_BODY_RESP: riak.pb.riak_kv_pb2.RpbIndexBodyResp, + MSG_CODE_COUNTER_UPDATE_REQ: riak.pb.riak_kv_pb2.RpbCounterUpdateReq, + MSG_CODE_COUNTER_UPDATE_RESP: riak.pb.riak_kv_pb2.RpbCounterUpdateResp, + MSG_CODE_COUNTER_GET_REQ: riak.pb.riak_kv_pb2.RpbCounterGetReq, + MSG_CODE_COUNTER_GET_RESP: riak.pb.riak_kv_pb2.RpbCounterGetResp, + MSG_CODE_YOKOZUNA_INDEX_GET_REQ: + riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexGetReq, + MSG_CODE_YOKOZUNA_INDEX_GET_RESP: + riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexGetResp, + MSG_CODE_YOKOZUNA_INDEX_PUT_REQ: + riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexPutReq, + MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ: + riak.pb.riak_yokozuna_pb2.RpbYokozunaIndexDeleteReq, + MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ: + riak.pb.riak_yokozuna_pb2.RpbYokozunaSchemaGetReq, + MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP: + riak.pb.riak_yokozuna_pb2.RpbYokozunaSchemaGetResp, + MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ: + riak.pb.riak_yokozuna_pb2.RpbYokozunaSchemaPutReq, + MSG_CODE_COVERAGE_REQ: riak.pb.riak_kv_pb2.RpbCoverageReq, + MSG_CODE_COVERAGE_RESP: riak.pb.riak_kv_pb2.RpbCoverageResp, + MSG_CODE_DT_FETCH_REQ: riak.pb.riak_dt_pb2.DtFetchReq, + MSG_CODE_DT_FETCH_RESP: riak.pb.riak_dt_pb2.DtFetchResp, + MSG_CODE_DT_UPDATE_REQ: riak.pb.riak_dt_pb2.DtUpdateReq, + MSG_CODE_DT_UPDATE_RESP: riak.pb.riak_dt_pb2.DtUpdateResp, + MSG_CODE_TS_QUERY_REQ: riak.pb.riak_ts_pb2.TsQueryReq, + MSG_CODE_TS_QUERY_RESP: riak.pb.riak_ts_pb2.TsQueryResp, + MSG_CODE_TS_PUT_REQ: riak.pb.riak_ts_pb2.TsPutReq, + MSG_CODE_TS_PUT_RESP: riak.pb.riak_ts_pb2.TsPutResp, + MSG_CODE_TS_DEL_REQ: riak.pb.riak_ts_pb2.TsDelReq, + MSG_CODE_TS_DEL_RESP: riak.pb.riak_ts_pb2.TsDelResp, + MSG_CODE_TS_GET_REQ: riak.pb.riak_ts_pb2.TsGetReq, + MSG_CODE_TS_GET_RESP: riak.pb.riak_ts_pb2.TsGetResp, + MSG_CODE_TS_LIST_KEYS_REQ: riak.pb.riak_ts_pb2.TsListKeysReq, + MSG_CODE_TS_LIST_KEYS_RESP: riak.pb.riak_ts_pb2.TsListKeysResp, + MSG_CODE_TS_COVERAGE_REQ: riak.pb.riak_ts_pb2.TsCoverageReq, + MSG_CODE_TS_COVERAGE_RESP: riak.pb.riak_ts_pb2.TsCoverageResp, + MSG_CODE_TS_COVERAGE_ENTRY: riak.pb.riak_ts_pb2.TsCoverageEntry, + MSG_CODE_TS_RANGE: riak.pb.riak_ts_pb2.TsRange, + MSG_CODE_TS_TTB_MSG: None, + MSG_CODE_AUTH_REQ: riak.pb.riak_pb2.RpbAuthReq, + MSG_CODE_AUTH_RESP: None, + MSG_CODE_START_TLS: None +} diff --git a/riak/pb/riak_dt_pb2.py b/riak/pb/riak_dt_pb2.py new file mode 100644 index 00000000..ba9a590d --- /dev/null +++ b/riak/pb/riak_dt_pb2.py @@ -0,0 +1,999 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak_dt.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak_dt.proto', + package='', + serialized_pb='\n\rriak_dt.proto\"\x85\x01\n\x08MapField\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12$\n\x04type\x18\x02 \x02(\x0e\x32\x16.MapField.MapFieldType\"E\n\x0cMapFieldType\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\x07\n\x03SET\x10\x02\x12\x0c\n\x08REGISTER\x10\x03\x12\x08\n\x04\x46LAG\x10\x04\x12\x07\n\x03MAP\x10\x05\"\x98\x01\n\x08MapEntry\x12\x18\n\x05\x66ield\x18\x01 \x02(\x0b\x32\t.MapField\x12\x15\n\rcounter_value\x18\x02 \x01(\x12\x12\x11\n\tset_value\x18\x03 \x03(\x0c\x12\x16\n\x0eregister_value\x18\x04 \x01(\x0c\x12\x12\n\nflag_value\x18\x05 \x01(\x08\x12\x1c\n\tmap_value\x18\x06 \x03(\x0b\x32\t.MapEntry\"\xcf\x01\n\nDtFetchReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\x0c\n\x04type\x18\x03 \x02(\x0c\x12\t\n\x01r\x18\x04 \x01(\r\x12\n\n\x02pr\x18\x05 \x01(\r\x12\x14\n\x0c\x62\x61sic_quorum\x18\x06 \x01(\x08\x12\x13\n\x0bnotfound_ok\x18\x07 \x01(\x08\x12\x0f\n\x07timeout\x18\x08 \x01(\r\x12\x15\n\rsloppy_quorum\x18\t \x01(\x08\x12\r\n\x05n_val\x18\n \x01(\r\x12\x1d\n\x0finclude_context\x18\x0b \x01(\x08:\x04true\"x\n\x07\x44tValue\x12\x15\n\rcounter_value\x18\x01 \x01(\x12\x12\x11\n\tset_value\x18\x02 \x03(\x0c\x12\x1c\n\tmap_value\x18\x03 \x03(\x0b\x32\t.MapEntry\x12\x11\n\thll_value\x18\x04 \x01(\x04\x12\x12\n\ngset_value\x18\x05 \x03(\x0c\"\x9a\x01\n\x0b\x44tFetchResp\x12\x0f\n\x07\x63ontext\x18\x01 \x01(\x0c\x12#\n\x04type\x18\x02 \x02(\x0e\x32\x15.DtFetchResp.DataType\x12\x17\n\x05value\x18\x03 \x01(\x0b\x32\x08.DtValue\"<\n\x08\x44\x61taType\x12\x0b\n\x07\x43OUNTER\x10\x01\x12\x07\n\x03SET\x10\x02\x12\x07\n\x03MAP\x10\x03\x12\x07\n\x03HLL\x10\x04\x12\x08\n\x04GSET\x10\x05\"\x1e\n\tCounterOp\x12\x11\n\tincrement\x18\x01 \x01(\x12\"&\n\x05SetOp\x12\x0c\n\x04\x61\x64\x64s\x18\x01 \x03(\x0c\x12\x0f\n\x07removes\x18\x02 \x03(\x0c\"\x16\n\x06GSetOp\x12\x0c\n\x04\x61\x64\x64s\x18\x01 \x03(\x0c\"\x15\n\x05HllOp\x12\x0c\n\x04\x61\x64\x64s\x18\x01 \x03(\x0c\"\xd1\x01\n\tMapUpdate\x12\x18\n\x05\x66ield\x18\x01 \x02(\x0b\x32\t.MapField\x12\x1e\n\ncounter_op\x18\x02 \x01(\x0b\x32\n.CounterOp\x12\x16\n\x06set_op\x18\x03 \x01(\x0b\x32\x06.SetOp\x12\x13\n\x0bregister_op\x18\x04 \x01(\x0c\x12\"\n\x07\x66lag_op\x18\x05 \x01(\x0e\x32\x11.MapUpdate.FlagOp\x12\x16\n\x06map_op\x18\x06 \x01(\x0b\x32\x06.MapOp\"!\n\x06\x46lagOp\x12\n\n\x06\x45NABLE\x10\x01\x12\x0b\n\x07\x44ISABLE\x10\x02\"@\n\x05MapOp\x12\x1a\n\x07removes\x18\x01 \x03(\x0b\x32\t.MapField\x12\x1b\n\x07updates\x18\x02 \x03(\x0b\x32\n.MapUpdate\"\x88\x01\n\x04\x44tOp\x12\x1e\n\ncounter_op\x18\x01 \x01(\x0b\x32\n.CounterOp\x12\x16\n\x06set_op\x18\x02 \x01(\x0b\x32\x06.SetOp\x12\x16\n\x06map_op\x18\x03 \x01(\x0b\x32\x06.MapOp\x12\x16\n\x06hll_op\x18\x04 \x01(\x0b\x32\x06.HllOp\x12\x18\n\x07gset_op\x18\x05 \x01(\x0b\x32\x07.GSetOp\"\xf1\x01\n\x0b\x44tUpdateReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x0c\n\x04type\x18\x03 \x02(\x0c\x12\x0f\n\x07\x63ontext\x18\x04 \x01(\x0c\x12\x11\n\x02op\x18\x05 \x02(\x0b\x32\x05.DtOp\x12\t\n\x01w\x18\x06 \x01(\r\x12\n\n\x02\x64w\x18\x07 \x01(\r\x12\n\n\x02pw\x18\x08 \x01(\r\x12\x1a\n\x0breturn_body\x18\t \x01(\x08:\x05\x66\x61lse\x12\x0f\n\x07timeout\x18\n \x01(\r\x12\x15\n\rsloppy_quorum\x18\x0b \x01(\x08\x12\r\n\x05n_val\x18\x0c \x01(\r\x12\x1d\n\x0finclude_context\x18\r \x01(\x08:\x04true\"\x9b\x01\n\x0c\x44tUpdateResp\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\x0f\n\x07\x63ontext\x18\x02 \x01(\x0c\x12\x15\n\rcounter_value\x18\x03 \x01(\x12\x12\x11\n\tset_value\x18\x04 \x03(\x0c\x12\x1c\n\tmap_value\x18\x05 \x03(\x0b\x32\t.MapEntry\x12\x11\n\thll_value\x18\x06 \x01(\x04\x12\x12\n\ngset_value\x18\x07 \x03(\x0c\x42#\n\x17\x63om.basho.riak.protobufB\x08RiakDtPB') + + + +_MAPFIELD_MAPFIELDTYPE = _descriptor.EnumDescriptor( + name='MapFieldType', + full_name='MapField.MapFieldType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COUNTER', index=0, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SET', index=1, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REGISTER', index=2, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FLAG', index=3, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MAP', index=4, number=5, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=82, + serialized_end=151, +) + +_DTFETCHRESP_DATATYPE = _descriptor.EnumDescriptor( + name='DataType', + full_name='DtFetchResp.DataType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='COUNTER', index=0, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SET', index=1, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MAP', index=2, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HLL', index=3, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='GSET', index=4, number=5, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=735, + serialized_end=795, +) + +_MAPUPDATE_FLAGOP = _descriptor.EnumDescriptor( + name='FlagOp', + full_name='MapUpdate.FlagOp', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='ENABLE', index=0, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISABLE', index=1, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1093, + serialized_end=1126, +) + + +_MAPFIELD = _descriptor.Descriptor( + name='MapField', + full_name='MapField', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='MapField.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='MapField.type', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _MAPFIELD_MAPFIELDTYPE, + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=18, + serialized_end=151, +) + + +_MAPENTRY = _descriptor.Descriptor( + name='MapEntry', + full_name='MapEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='field', full_name='MapEntry.field', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='counter_value', full_name='MapEntry.counter_value', index=1, + number=2, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set_value', full_name='MapEntry.set_value', index=2, + number=3, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='register_value', full_name='MapEntry.register_value', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flag_value', full_name='MapEntry.flag_value', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='map_value', full_name='MapEntry.map_value', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=154, + serialized_end=306, +) + + +_DTFETCHREQ = _descriptor.Descriptor( + name='DtFetchReq', + full_name='DtFetchReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='DtFetchReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='DtFetchReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='DtFetchReq.type', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='r', full_name='DtFetchReq.r', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pr', full_name='DtFetchReq.pr', index=4, + number=5, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basic_quorum', full_name='DtFetchReq.basic_quorum', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notfound_ok', full_name='DtFetchReq.notfound_ok', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='DtFetchReq.timeout', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sloppy_quorum', full_name='DtFetchReq.sloppy_quorum', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='DtFetchReq.n_val', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='include_context', full_name='DtFetchReq.include_context', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=309, + serialized_end=516, +) + + +_DTVALUE = _descriptor.Descriptor( + name='DtValue', + full_name='DtValue', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='counter_value', full_name='DtValue.counter_value', index=0, + number=1, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set_value', full_name='DtValue.set_value', index=1, + number=2, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='map_value', full_name='DtValue.map_value', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hll_value', full_name='DtValue.hll_value', index=3, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gset_value', full_name='DtValue.gset_value', index=4, + number=5, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=518, + serialized_end=638, +) + + +_DTFETCHRESP = _descriptor.Descriptor( + name='DtFetchResp', + full_name='DtFetchResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='context', full_name='DtFetchResp.context', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='DtFetchResp.type', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='DtFetchResp.value', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _DTFETCHRESP_DATATYPE, + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=641, + serialized_end=795, +) + + +_COUNTEROP = _descriptor.Descriptor( + name='CounterOp', + full_name='CounterOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='increment', full_name='CounterOp.increment', index=0, + number=1, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=797, + serialized_end=827, +) + + +_SETOP = _descriptor.Descriptor( + name='SetOp', + full_name='SetOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='adds', full_name='SetOp.adds', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='removes', full_name='SetOp.removes', index=1, + number=2, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=829, + serialized_end=867, +) + + +_GSETOP = _descriptor.Descriptor( + name='GSetOp', + full_name='GSetOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='adds', full_name='GSetOp.adds', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=869, + serialized_end=891, +) + + +_HLLOP = _descriptor.Descriptor( + name='HllOp', + full_name='HllOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='adds', full_name='HllOp.adds', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=893, + serialized_end=914, +) + + +_MAPUPDATE = _descriptor.Descriptor( + name='MapUpdate', + full_name='MapUpdate', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='field', full_name='MapUpdate.field', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='counter_op', full_name='MapUpdate.counter_op', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set_op', full_name='MapUpdate.set_op', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='register_op', full_name='MapUpdate.register_op', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='flag_op', full_name='MapUpdate.flag_op', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=1, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='map_op', full_name='MapUpdate.map_op', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _MAPUPDATE_FLAGOP, + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=917, + serialized_end=1126, +) + + +_MAPOP = _descriptor.Descriptor( + name='MapOp', + full_name='MapOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='removes', full_name='MapOp.removes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='updates', full_name='MapOp.updates', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1128, + serialized_end=1192, +) + + +_DTOP = _descriptor.Descriptor( + name='DtOp', + full_name='DtOp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='counter_op', full_name='DtOp.counter_op', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set_op', full_name='DtOp.set_op', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='map_op', full_name='DtOp.map_op', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hll_op', full_name='DtOp.hll_op', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gset_op', full_name='DtOp.gset_op', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1195, + serialized_end=1331, +) + + +_DTUPDATEREQ = _descriptor.Descriptor( + name='DtUpdateReq', + full_name='DtUpdateReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='DtUpdateReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='DtUpdateReq.key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='DtUpdateReq.type', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='context', full_name='DtUpdateReq.context', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='op', full_name='DtUpdateReq.op', index=4, + number=5, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='w', full_name='DtUpdateReq.w', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dw', full_name='DtUpdateReq.dw', index=6, + number=7, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pw', full_name='DtUpdateReq.pw', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='return_body', full_name='DtUpdateReq.return_body', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='DtUpdateReq.timeout', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sloppy_quorum', full_name='DtUpdateReq.sloppy_quorum', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='DtUpdateReq.n_val', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='include_context', full_name='DtUpdateReq.include_context', index=12, + number=13, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1334, + serialized_end=1575, +) + + +_DTUPDATERESP = _descriptor.Descriptor( + name='DtUpdateResp', + full_name='DtUpdateResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='DtUpdateResp.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='context', full_name='DtUpdateResp.context', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='counter_value', full_name='DtUpdateResp.counter_value', index=2, + number=3, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='set_value', full_name='DtUpdateResp.set_value', index=3, + number=4, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='map_value', full_name='DtUpdateResp.map_value', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hll_value', full_name='DtUpdateResp.hll_value', index=5, + number=6, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='gset_value', full_name='DtUpdateResp.gset_value', index=6, + number=7, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1578, + serialized_end=1733, +) + +_MAPFIELD.fields_by_name['type'].enum_type = _MAPFIELD_MAPFIELDTYPE +_MAPFIELD_MAPFIELDTYPE.containing_type = _MAPFIELD; +_MAPENTRY.fields_by_name['field'].message_type = _MAPFIELD +_MAPENTRY.fields_by_name['map_value'].message_type = _MAPENTRY +_DTVALUE.fields_by_name['map_value'].message_type = _MAPENTRY +_DTFETCHRESP.fields_by_name['type'].enum_type = _DTFETCHRESP_DATATYPE +_DTFETCHRESP.fields_by_name['value'].message_type = _DTVALUE +_DTFETCHRESP_DATATYPE.containing_type = _DTFETCHRESP; +_MAPUPDATE.fields_by_name['field'].message_type = _MAPFIELD +_MAPUPDATE.fields_by_name['counter_op'].message_type = _COUNTEROP +_MAPUPDATE.fields_by_name['set_op'].message_type = _SETOP +_MAPUPDATE.fields_by_name['flag_op'].enum_type = _MAPUPDATE_FLAGOP +_MAPUPDATE.fields_by_name['map_op'].message_type = _MAPOP +_MAPUPDATE_FLAGOP.containing_type = _MAPUPDATE; +_MAPOP.fields_by_name['removes'].message_type = _MAPFIELD +_MAPOP.fields_by_name['updates'].message_type = _MAPUPDATE +_DTOP.fields_by_name['counter_op'].message_type = _COUNTEROP +_DTOP.fields_by_name['set_op'].message_type = _SETOP +_DTOP.fields_by_name['map_op'].message_type = _MAPOP +_DTOP.fields_by_name['hll_op'].message_type = _HLLOP +_DTOP.fields_by_name['gset_op'].message_type = _GSETOP +_DTUPDATEREQ.fields_by_name['op'].message_type = _DTOP +_DTUPDATERESP.fields_by_name['map_value'].message_type = _MAPENTRY +DESCRIPTOR.message_types_by_name['MapField'] = _MAPFIELD +DESCRIPTOR.message_types_by_name['MapEntry'] = _MAPENTRY +DESCRIPTOR.message_types_by_name['DtFetchReq'] = _DTFETCHREQ +DESCRIPTOR.message_types_by_name['DtValue'] = _DTVALUE +DESCRIPTOR.message_types_by_name['DtFetchResp'] = _DTFETCHRESP +DESCRIPTOR.message_types_by_name['CounterOp'] = _COUNTEROP +DESCRIPTOR.message_types_by_name['SetOp'] = _SETOP +DESCRIPTOR.message_types_by_name['GSetOp'] = _GSETOP +DESCRIPTOR.message_types_by_name['HllOp'] = _HLLOP +DESCRIPTOR.message_types_by_name['MapUpdate'] = _MAPUPDATE +DESCRIPTOR.message_types_by_name['MapOp'] = _MAPOP +DESCRIPTOR.message_types_by_name['DtOp'] = _DTOP +DESCRIPTOR.message_types_by_name['DtUpdateReq'] = _DTUPDATEREQ +DESCRIPTOR.message_types_by_name['DtUpdateResp'] = _DTUPDATERESP + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class MapField(_message.Message): + DESCRIPTOR = _MAPFIELD + + # @@protoc_insertion_point(class_scope:MapField) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class MapEntry(_message.Message): + DESCRIPTOR = _MAPENTRY + + # @@protoc_insertion_point(class_scope:MapEntry) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtFetchReq(_message.Message): + DESCRIPTOR = _DTFETCHREQ + + # @@protoc_insertion_point(class_scope:DtFetchReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtValue(_message.Message): + DESCRIPTOR = _DTVALUE + + # @@protoc_insertion_point(class_scope:DtValue) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtFetchResp(_message.Message): + DESCRIPTOR = _DTFETCHRESP + + # @@protoc_insertion_point(class_scope:DtFetchResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class CounterOp(_message.Message): + DESCRIPTOR = _COUNTEROP + + # @@protoc_insertion_point(class_scope:CounterOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class SetOp(_message.Message): + DESCRIPTOR = _SETOP + + # @@protoc_insertion_point(class_scope:SetOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class GSetOp(_message.Message): + DESCRIPTOR = _GSETOP + + # @@protoc_insertion_point(class_scope:GSetOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class HllOp(_message.Message): + DESCRIPTOR = _HLLOP + + # @@protoc_insertion_point(class_scope:HllOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class MapUpdate(_message.Message): + DESCRIPTOR = _MAPUPDATE + + # @@protoc_insertion_point(class_scope:MapUpdate) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class MapOp(_message.Message): + DESCRIPTOR = _MAPOP + + # @@protoc_insertion_point(class_scope:MapOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtOp(_message.Message): + DESCRIPTOR = _DTOP + + # @@protoc_insertion_point(class_scope:DtOp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtUpdateReq(_message.Message): + DESCRIPTOR = _DTUPDATEREQ + + # @@protoc_insertion_point(class_scope:DtUpdateReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class DtUpdateResp(_message.Message): + DESCRIPTOR = _DTUPDATERESP + + # @@protoc_insertion_point(class_scope:DtUpdateResp) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\010RiakDtPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/pb/riak_kv_pb2.py b/riak/pb/riak_kv_pb2.py new file mode 100644 index 00000000..f1832df6 --- /dev/null +++ b/riak/pb/riak_kv_pb2.py @@ -0,0 +1,1987 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak_kv.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + +import riak.pb.riak_pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak_kv.proto', + package='', + serialized_pb='\n\rriak_kv.proto\x1a\nriak.proto\"\'\n\x12RpbGetClientIdResp\x12\x11\n\tclient_id\x18\x01 \x02(\x0c\"&\n\x11RpbSetClientIdReq\x12\x11\n\tclient_id\x18\x01 \x02(\x0c\"\xe9\x01\n\tRpbGetReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\t\n\x01r\x18\x03 \x01(\r\x12\n\n\x02pr\x18\x04 \x01(\r\x12\x14\n\x0c\x62\x61sic_quorum\x18\x05 \x01(\x08\x12\x13\n\x0bnotfound_ok\x18\x06 \x01(\x08\x12\x13\n\x0bif_modified\x18\x07 \x01(\x0c\x12\x0c\n\x04head\x18\x08 \x01(\x08\x12\x15\n\rdeletedvclock\x18\t \x01(\x08\x12\x0f\n\x07timeout\x18\n \x01(\r\x12\x15\n\rsloppy_quorum\x18\x0b \x01(\x08\x12\r\n\x05n_val\x18\x0c \x01(\r\x12\x0c\n\x04type\x18\r \x01(\x0c\"M\n\nRpbGetResp\x12\x1c\n\x07\x63ontent\x18\x01 \x03(\x0b\x32\x0b.RpbContent\x12\x0e\n\x06vclock\x18\x02 \x01(\x0c\x12\x11\n\tunchanged\x18\x03 \x01(\x08\"\xa6\x02\n\tRpbPutReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x0e\n\x06vclock\x18\x03 \x01(\x0c\x12\x1c\n\x07\x63ontent\x18\x04 \x02(\x0b\x32\x0b.RpbContent\x12\t\n\x01w\x18\x05 \x01(\r\x12\n\n\x02\x64w\x18\x06 \x01(\r\x12\x13\n\x0breturn_body\x18\x07 \x01(\x08\x12\n\n\x02pw\x18\x08 \x01(\r\x12\x17\n\x0fif_not_modified\x18\t \x01(\x08\x12\x15\n\rif_none_match\x18\n \x01(\x08\x12\x13\n\x0breturn_head\x18\x0b \x01(\x08\x12\x0f\n\x07timeout\x18\x0c \x01(\r\x12\x0c\n\x04\x61sis\x18\r \x01(\x08\x12\x15\n\rsloppy_quorum\x18\x0e \x01(\x08\x12\r\n\x05n_val\x18\x0f \x01(\r\x12\x0c\n\x04type\x18\x10 \x01(\x0c\"G\n\nRpbPutResp\x12\x1c\n\x07\x63ontent\x18\x01 \x03(\x0b\x32\x0b.RpbContent\x12\x0e\n\x06vclock\x18\x02 \x01(\x0c\x12\x0b\n\x03key\x18\x03 \x01(\x0c\"\xc3\x01\n\tRpbDelReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\n\n\x02rw\x18\x03 \x01(\r\x12\x0e\n\x06vclock\x18\x04 \x01(\x0c\x12\t\n\x01r\x18\x05 \x01(\r\x12\t\n\x01w\x18\x06 \x01(\r\x12\n\n\x02pr\x18\x07 \x01(\r\x12\n\n\x02pw\x18\x08 \x01(\r\x12\n\n\x02\x64w\x18\t \x01(\r\x12\x0f\n\x07timeout\x18\n \x01(\r\x12\x15\n\rsloppy_quorum\x18\x0b \x01(\x08\x12\r\n\x05n_val\x18\x0c \x01(\r\x12\x0c\n\x04type\x18\r \x01(\x0c\"B\n\x11RpbListBucketsReq\x12\x0f\n\x07timeout\x18\x01 \x01(\r\x12\x0e\n\x06stream\x18\x02 \x01(\x08\x12\x0c\n\x04type\x18\x03 \x01(\x0c\"3\n\x12RpbListBucketsResp\x12\x0f\n\x07\x62uckets\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08\"?\n\x0eRpbListKeysReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0f\n\x07timeout\x18\x02 \x01(\r\x12\x0c\n\x04type\x18\x03 \x01(\x0c\"-\n\x0fRpbListKeysResp\x12\x0c\n\x04keys\x18\x01 \x03(\x0c\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08\"5\n\x0cRpbMapRedReq\x12\x0f\n\x07request\x18\x01 \x02(\x0c\x12\x14\n\x0c\x63ontent_type\x18\x02 \x02(\x0c\">\n\rRpbMapRedResp\x12\r\n\x05phase\x18\x01 \x01(\r\x12\x10\n\x08response\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\"\xf9\x02\n\x0bRpbIndexReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\r\n\x05index\x18\x02 \x02(\x0c\x12*\n\x05qtype\x18\x03 \x02(\x0e\x32\x1b.RpbIndexReq.IndexQueryType\x12\x0b\n\x03key\x18\x04 \x01(\x0c\x12\x11\n\trange_min\x18\x05 \x01(\x0c\x12\x11\n\trange_max\x18\x06 \x01(\x0c\x12\x14\n\x0creturn_terms\x18\x07 \x01(\x08\x12\x0e\n\x06stream\x18\x08 \x01(\x08\x12\x13\n\x0bmax_results\x18\t \x01(\r\x12\x14\n\x0c\x63ontinuation\x18\n \x01(\x0c\x12\x0f\n\x07timeout\x18\x0b \x01(\r\x12\x0c\n\x04type\x18\x0c \x01(\x0c\x12\x12\n\nterm_regex\x18\r \x01(\x0c\x12\x17\n\x0fpagination_sort\x18\x0e \x01(\x08\x12\x15\n\rcover_context\x18\x0f \x01(\x0c\x12\x13\n\x0breturn_body\x18\x10 \x01(\x08\"#\n\x0eIndexQueryType\x12\x06\n\x02\x65q\x10\x00\x12\t\n\x05range\x10\x01\"[\n\x0cRpbIndexResp\x12\x0c\n\x04keys\x18\x01 \x03(\x0c\x12\x19\n\x07results\x18\x02 \x03(\x0b\x32\x08.RpbPair\x12\x14\n\x0c\x63ontinuation\x18\x03 \x01(\x0c\x12\x0c\n\x04\x64one\x18\x04 \x01(\x08\"X\n\x10RpbIndexBodyResp\x12 \n\x07objects\x18\x01 \x03(\x0b\x32\x0f.RpbIndexObject\x12\x14\n\x0c\x63ontinuation\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\"\xd8\x01\n\x0eRpbCSBucketReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x11\n\tstart_key\x18\x02 \x02(\x0c\x12\x0f\n\x07\x65nd_key\x18\x03 \x01(\x0c\x12\x18\n\nstart_incl\x18\x04 \x01(\x08:\x04true\x12\x17\n\x08\x65nd_incl\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x14\n\x0c\x63ontinuation\x18\x06 \x01(\x0c\x12\x13\n\x0bmax_results\x18\x07 \x01(\r\x12\x0f\n\x07timeout\x18\x08 \x01(\r\x12\x0c\n\x04type\x18\t \x01(\x0c\x12\x15\n\rcover_context\x18\n \x01(\x0c\"W\n\x0fRpbCSBucketResp\x12 \n\x07objects\x18\x01 \x03(\x0b\x32\x0f.RpbIndexObject\x12\x14\n\x0c\x63ontinuation\x18\x02 \x01(\x0c\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\":\n\x0eRpbIndexObject\x12\x0b\n\x03key\x18\x01 \x02(\x0c\x12\x1b\n\x06object\x18\x02 \x02(\x0b\x32\x0b.RpbGetResp\"\xf5\x01\n\nRpbContent\x12\r\n\x05value\x18\x01 \x02(\x0c\x12\x14\n\x0c\x63ontent_type\x18\x02 \x01(\x0c\x12\x0f\n\x07\x63harset\x18\x03 \x01(\x0c\x12\x18\n\x10\x63ontent_encoding\x18\x04 \x01(\x0c\x12\x0c\n\x04vtag\x18\x05 \x01(\x0c\x12\x17\n\x05links\x18\x06 \x03(\x0b\x32\x08.RpbLink\x12\x10\n\x08last_mod\x18\x07 \x01(\r\x12\x16\n\x0elast_mod_usecs\x18\x08 \x01(\r\x12\x1a\n\x08usermeta\x18\t \x03(\x0b\x32\x08.RpbPair\x12\x19\n\x07indexes\x18\n \x03(\x0b\x32\x08.RpbPair\x12\x0f\n\x07\x64\x65leted\x18\x0b \x01(\x08\"3\n\x07RpbLink\x12\x0e\n\x06\x62ucket\x18\x01 \x01(\x0c\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x0b\n\x03tag\x18\x03 \x01(\x0c\"z\n\x13RpbCounterUpdateReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\x0e\n\x06\x61mount\x18\x03 \x02(\x12\x12\t\n\x01w\x18\x04 \x01(\r\x12\n\n\x02\x64w\x18\x05 \x01(\r\x12\n\n\x02pw\x18\x06 \x01(\r\x12\x13\n\x0breturnvalue\x18\x07 \x01(\x08\"%\n\x14RpbCounterUpdateResp\x12\r\n\x05value\x18\x01 \x01(\x12\"q\n\x10RpbCounterGetReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\t\n\x01r\x18\x03 \x01(\r\x12\n\n\x02pr\x18\x04 \x01(\r\x12\x14\n\x0c\x62\x61sic_quorum\x18\x05 \x01(\x08\x12\x13\n\x0bnotfound_ok\x18\x06 \x01(\x08\"\"\n\x11RpbCounterGetResp\x12\r\n\x05value\x18\x01 \x01(\x12\"G\n\x1aRpbGetBucketKeyPreflistReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0b\n\x03key\x18\x02 \x02(\x0c\x12\x0c\n\x04type\x18\x03 \x01(\x0c\"J\n\x1bRpbGetBucketKeyPreflistResp\x12+\n\x08preflist\x18\x01 \x03(\x0b\x32\x19.RpbBucketKeyPreflistItem\"L\n\x18RpbBucketKeyPreflistItem\x12\x11\n\tpartition\x18\x01 \x02(\x03\x12\x0c\n\x04node\x18\x02 \x02(\x0c\x12\x0f\n\x07primary\x18\x03 \x02(\x08\"x\n\x0eRpbCoverageReq\x12\x0c\n\x04type\x18\x01 \x01(\x0c\x12\x0e\n\x06\x62ucket\x18\x02 \x02(\x0c\x12\x16\n\x0emin_partitions\x18\x03 \x01(\r\x12\x15\n\rreplace_cover\x18\x04 \x01(\x0c\x12\x19\n\x11unavailable_cover\x18\x05 \x03(\x0c\"5\n\x0fRpbCoverageResp\x12\"\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x11.RpbCoverageEntry\"Z\n\x10RpbCoverageEntry\x12\n\n\x02ip\x18\x01 \x02(\x0c\x12\x0c\n\x04port\x18\x02 \x02(\r\x12\x15\n\rkeyspace_desc\x18\x03 \x01(\x0c\x12\x15\n\rcover_context\x18\x04 \x02(\x0c\x42#\n\x17\x63om.basho.riak.protobufB\x08RiakKvPB') + + + +_RPBINDEXREQ_INDEXQUERYTYPE = _descriptor.EnumDescriptor( + name='IndexQueryType', + full_name='RpbIndexReq.IndexQueryType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='eq', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='range', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1688, + serialized_end=1723, +) + + +_RPBGETCLIENTIDRESP = _descriptor.Descriptor( + name='RpbGetClientIdResp', + full_name='RpbGetClientIdResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='client_id', full_name='RpbGetClientIdResp.client_id', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=29, + serialized_end=68, +) + + +_RPBSETCLIENTIDREQ = _descriptor.Descriptor( + name='RpbSetClientIdReq', + full_name='RpbSetClientIdReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='client_id', full_name='RpbSetClientIdReq.client_id', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=70, + serialized_end=108, +) + + +_RPBGETREQ = _descriptor.Descriptor( + name='RpbGetReq', + full_name='RpbGetReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbGetReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbGetReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='r', full_name='RpbGetReq.r', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pr', full_name='RpbGetReq.pr', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basic_quorum', full_name='RpbGetReq.basic_quorum', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notfound_ok', full_name='RpbGetReq.notfound_ok', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='if_modified', full_name='RpbGetReq.if_modified', index=6, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='head', full_name='RpbGetReq.head', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='deletedvclock', full_name='RpbGetReq.deletedvclock', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbGetReq.timeout', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sloppy_quorum', full_name='RpbGetReq.sloppy_quorum', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='RpbGetReq.n_val', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbGetReq.type', index=12, + number=13, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=111, + serialized_end=344, +) + + +_RPBGETRESP = _descriptor.Descriptor( + name='RpbGetResp', + full_name='RpbGetResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='content', full_name='RpbGetResp.content', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vclock', full_name='RpbGetResp.vclock', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unchanged', full_name='RpbGetResp.unchanged', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=346, + serialized_end=423, +) + + +_RPBPUTREQ = _descriptor.Descriptor( + name='RpbPutReq', + full_name='RpbPutReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbPutReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbPutReq.key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vclock', full_name='RpbPutReq.vclock', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content', full_name='RpbPutReq.content', index=3, + number=4, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='w', full_name='RpbPutReq.w', index=4, + number=5, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dw', full_name='RpbPutReq.dw', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='return_body', full_name='RpbPutReq.return_body', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pw', full_name='RpbPutReq.pw', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='if_not_modified', full_name='RpbPutReq.if_not_modified', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='if_none_match', full_name='RpbPutReq.if_none_match', index=9, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='return_head', full_name='RpbPutReq.return_head', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbPutReq.timeout', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='asis', full_name='RpbPutReq.asis', index=12, + number=13, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sloppy_quorum', full_name='RpbPutReq.sloppy_quorum', index=13, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='RpbPutReq.n_val', index=14, + number=15, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbPutReq.type', index=15, + number=16, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=426, + serialized_end=720, +) + + +_RPBPUTRESP = _descriptor.Descriptor( + name='RpbPutResp', + full_name='RpbPutResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='content', full_name='RpbPutResp.content', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vclock', full_name='RpbPutResp.vclock', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbPutResp.key', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=722, + serialized_end=793, +) + + +_RPBDELREQ = _descriptor.Descriptor( + name='RpbDelReq', + full_name='RpbDelReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbDelReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbDelReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rw', full_name='RpbDelReq.rw', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vclock', full_name='RpbDelReq.vclock', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='r', full_name='RpbDelReq.r', index=4, + number=5, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='w', full_name='RpbDelReq.w', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pr', full_name='RpbDelReq.pr', index=6, + number=7, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pw', full_name='RpbDelReq.pw', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dw', full_name='RpbDelReq.dw', index=8, + number=9, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbDelReq.timeout', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sloppy_quorum', full_name='RpbDelReq.sloppy_quorum', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='RpbDelReq.n_val', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbDelReq.type', index=12, + number=13, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=796, + serialized_end=991, +) + + +_RPBLISTBUCKETSREQ = _descriptor.Descriptor( + name='RpbListBucketsReq', + full_name='RpbListBucketsReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbListBucketsReq.timeout', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stream', full_name='RpbListBucketsReq.stream', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbListBucketsReq.type', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=993, + serialized_end=1059, +) + + +_RPBLISTBUCKETSRESP = _descriptor.Descriptor( + name='RpbListBucketsResp', + full_name='RpbListBucketsResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='buckets', full_name='RpbListBucketsResp.buckets', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbListBucketsResp.done', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1061, + serialized_end=1112, +) + + +_RPBLISTKEYSREQ = _descriptor.Descriptor( + name='RpbListKeysReq', + full_name='RpbListKeysReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbListKeysReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbListKeysReq.timeout', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbListKeysReq.type', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1114, + serialized_end=1177, +) + + +_RPBLISTKEYSRESP = _descriptor.Descriptor( + name='RpbListKeysResp', + full_name='RpbListKeysResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keys', full_name='RpbListKeysResp.keys', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbListKeysResp.done', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1179, + serialized_end=1224, +) + + +_RPBMAPREDREQ = _descriptor.Descriptor( + name='RpbMapRedReq', + full_name='RpbMapRedReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='request', full_name='RpbMapRedReq.request', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content_type', full_name='RpbMapRedReq.content_type', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1226, + serialized_end=1279, +) + + +_RPBMAPREDRESP = _descriptor.Descriptor( + name='RpbMapRedResp', + full_name='RpbMapRedResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='phase', full_name='RpbMapRedResp.phase', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='response', full_name='RpbMapRedResp.response', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbMapRedResp.done', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1281, + serialized_end=1343, +) + + +_RPBINDEXREQ = _descriptor.Descriptor( + name='RpbIndexReq', + full_name='RpbIndexReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbIndexReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='index', full_name='RpbIndexReq.index', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='qtype', full_name='RpbIndexReq.qtype', index=2, + number=3, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbIndexReq.key', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range_min', full_name='RpbIndexReq.range_min', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range_max', full_name='RpbIndexReq.range_max', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='return_terms', full_name='RpbIndexReq.return_terms', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stream', full_name='RpbIndexReq.stream', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_results', full_name='RpbIndexReq.max_results', index=8, + number=9, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='continuation', full_name='RpbIndexReq.continuation', index=9, + number=10, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbIndexReq.timeout', index=10, + number=11, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbIndexReq.type', index=11, + number=12, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='term_regex', full_name='RpbIndexReq.term_regex', index=12, + number=13, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pagination_sort', full_name='RpbIndexReq.pagination_sort', index=13, + number=14, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover_context', full_name='RpbIndexReq.cover_context', index=14, + number=15, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='return_body', full_name='RpbIndexReq.return_body', index=15, + number=16, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _RPBINDEXREQ_INDEXQUERYTYPE, + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1346, + serialized_end=1723, +) + + +_RPBINDEXRESP = _descriptor.Descriptor( + name='RpbIndexResp', + full_name='RpbIndexResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keys', full_name='RpbIndexResp.keys', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='results', full_name='RpbIndexResp.results', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='continuation', full_name='RpbIndexResp.continuation', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbIndexResp.done', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1725, + serialized_end=1816, +) + + +_RPBINDEXBODYRESP = _descriptor.Descriptor( + name='RpbIndexBodyResp', + full_name='RpbIndexBodyResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='objects', full_name='RpbIndexBodyResp.objects', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='continuation', full_name='RpbIndexBodyResp.continuation', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbIndexBodyResp.done', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1818, + serialized_end=1906, +) + + +_RPBCSBUCKETREQ = _descriptor.Descriptor( + name='RpbCSBucketReq', + full_name='RpbCSBucketReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbCSBucketReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_key', full_name='RpbCSBucketReq.start_key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key', full_name='RpbCSBucketReq.end_key', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_incl', full_name='RpbCSBucketReq.start_incl', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_incl', full_name='RpbCSBucketReq.end_incl', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='continuation', full_name='RpbCSBucketReq.continuation', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_results', full_name='RpbCSBucketReq.max_results', index=6, + number=7, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbCSBucketReq.timeout', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbCSBucketReq.type', index=8, + number=9, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover_context', full_name='RpbCSBucketReq.cover_context', index=9, + number=10, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1909, + serialized_end=2125, +) + + +_RPBCSBUCKETRESP = _descriptor.Descriptor( + name='RpbCSBucketResp', + full_name='RpbCSBucketResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='objects', full_name='RpbCSBucketResp.objects', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='continuation', full_name='RpbCSBucketResp.continuation', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='RpbCSBucketResp.done', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2127, + serialized_end=2214, +) + + +_RPBINDEXOBJECT = _descriptor.Descriptor( + name='RpbIndexObject', + full_name='RpbIndexObject', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='RpbIndexObject.key', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='object', full_name='RpbIndexObject.object', index=1, + number=2, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2216, + serialized_end=2274, +) + + +_RPBCONTENT = _descriptor.Descriptor( + name='RpbContent', + full_name='RpbContent', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='RpbContent.value', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content_type', full_name='RpbContent.content_type', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='charset', full_name='RpbContent.charset', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content_encoding', full_name='RpbContent.content_encoding', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vtag', full_name='RpbContent.vtag', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='links', full_name='RpbContent.links', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_mod', full_name='RpbContent.last_mod', index=6, + number=7, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_mod_usecs', full_name='RpbContent.last_mod_usecs', index=7, + number=8, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='usermeta', full_name='RpbContent.usermeta', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='indexes', full_name='RpbContent.indexes', index=9, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='deleted', full_name='RpbContent.deleted', index=10, + number=11, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2277, + serialized_end=2522, +) + + +_RPBLINK = _descriptor.Descriptor( + name='RpbLink', + full_name='RpbLink', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbLink.bucket', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbLink.key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='tag', full_name='RpbLink.tag', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2524, + serialized_end=2575, +) + + +_RPBCOUNTERUPDATEREQ = _descriptor.Descriptor( + name='RpbCounterUpdateReq', + full_name='RpbCounterUpdateReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbCounterUpdateReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbCounterUpdateReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='amount', full_name='RpbCounterUpdateReq.amount', index=2, + number=3, type=18, cpp_type=2, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='w', full_name='RpbCounterUpdateReq.w', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dw', full_name='RpbCounterUpdateReq.dw', index=4, + number=5, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pw', full_name='RpbCounterUpdateReq.pw', index=5, + number=6, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='returnvalue', full_name='RpbCounterUpdateReq.returnvalue', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2577, + serialized_end=2699, +) + + +_RPBCOUNTERUPDATERESP = _descriptor.Descriptor( + name='RpbCounterUpdateResp', + full_name='RpbCounterUpdateResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='RpbCounterUpdateResp.value', index=0, + number=1, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2701, + serialized_end=2738, +) + + +_RPBCOUNTERGETREQ = _descriptor.Descriptor( + name='RpbCounterGetReq', + full_name='RpbCounterGetReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbCounterGetReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbCounterGetReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='r', full_name='RpbCounterGetReq.r', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pr', full_name='RpbCounterGetReq.pr', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basic_quorum', full_name='RpbCounterGetReq.basic_quorum', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notfound_ok', full_name='RpbCounterGetReq.notfound_ok', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2740, + serialized_end=2853, +) + + +_RPBCOUNTERGETRESP = _descriptor.Descriptor( + name='RpbCounterGetResp', + full_name='RpbCounterGetResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='value', full_name='RpbCounterGetResp.value', index=0, + number=1, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2855, + serialized_end=2889, +) + + +_RPBGETBUCKETKEYPREFLISTREQ = _descriptor.Descriptor( + name='RpbGetBucketKeyPreflistReq', + full_name='RpbGetBucketKeyPreflistReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbGetBucketKeyPreflistReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='RpbGetBucketKeyPreflistReq.key', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbGetBucketKeyPreflistReq.type', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2891, + serialized_end=2962, +) + + +_RPBGETBUCKETKEYPREFLISTRESP = _descriptor.Descriptor( + name='RpbGetBucketKeyPreflistResp', + full_name='RpbGetBucketKeyPreflistResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='preflist', full_name='RpbGetBucketKeyPreflistResp.preflist', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=2964, + serialized_end=3038, +) + + +_RPBBUCKETKEYPREFLISTITEM = _descriptor.Descriptor( + name='RpbBucketKeyPreflistItem', + full_name='RpbBucketKeyPreflistItem', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='partition', full_name='RpbBucketKeyPreflistItem.partition', index=0, + number=1, type=3, cpp_type=2, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='node', full_name='RpbBucketKeyPreflistItem.node', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='primary', full_name='RpbBucketKeyPreflistItem.primary', index=2, + number=3, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=3040, + serialized_end=3116, +) + + +_RPBCOVERAGEREQ = _descriptor.Descriptor( + name='RpbCoverageReq', + full_name='RpbCoverageReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='RpbCoverageReq.type', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbCoverageReq.bucket', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='min_partitions', full_name='RpbCoverageReq.min_partitions', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replace_cover', full_name='RpbCoverageReq.replace_cover', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unavailable_cover', full_name='RpbCoverageReq.unavailable_cover', index=4, + number=5, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=3118, + serialized_end=3238, +) + + +_RPBCOVERAGERESP = _descriptor.Descriptor( + name='RpbCoverageResp', + full_name='RpbCoverageResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='RpbCoverageResp.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=3240, + serialized_end=3293, +) + + +_RPBCOVERAGEENTRY = _descriptor.Descriptor( + name='RpbCoverageEntry', + full_name='RpbCoverageEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ip', full_name='RpbCoverageEntry.ip', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='port', full_name='RpbCoverageEntry.port', index=1, + number=2, type=13, cpp_type=3, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='keyspace_desc', full_name='RpbCoverageEntry.keyspace_desc', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover_context', full_name='RpbCoverageEntry.cover_context', index=3, + number=4, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=3295, + serialized_end=3385, +) + +_RPBGETRESP.fields_by_name['content'].message_type = _RPBCONTENT +_RPBPUTREQ.fields_by_name['content'].message_type = _RPBCONTENT +_RPBPUTRESP.fields_by_name['content'].message_type = _RPBCONTENT +_RPBINDEXREQ.fields_by_name['qtype'].enum_type = _RPBINDEXREQ_INDEXQUERYTYPE +_RPBINDEXREQ_INDEXQUERYTYPE.containing_type = _RPBINDEXREQ; +_RPBINDEXRESP.fields_by_name['results'].message_type = riak.pb.riak_pb2._RPBPAIR +_RPBINDEXBODYRESP.fields_by_name['objects'].message_type = _RPBINDEXOBJECT +_RPBCSBUCKETRESP.fields_by_name['objects'].message_type = _RPBINDEXOBJECT +_RPBINDEXOBJECT.fields_by_name['object'].message_type = _RPBGETRESP +_RPBCONTENT.fields_by_name['links'].message_type = _RPBLINK +_RPBCONTENT.fields_by_name['usermeta'].message_type = riak.pb.riak_pb2._RPBPAIR +_RPBCONTENT.fields_by_name['indexes'].message_type = riak.pb.riak_pb2._RPBPAIR +_RPBGETBUCKETKEYPREFLISTRESP.fields_by_name['preflist'].message_type = _RPBBUCKETKEYPREFLISTITEM +_RPBCOVERAGERESP.fields_by_name['entries'].message_type = _RPBCOVERAGEENTRY +DESCRIPTOR.message_types_by_name['RpbGetClientIdResp'] = _RPBGETCLIENTIDRESP +DESCRIPTOR.message_types_by_name['RpbSetClientIdReq'] = _RPBSETCLIENTIDREQ +DESCRIPTOR.message_types_by_name['RpbGetReq'] = _RPBGETREQ +DESCRIPTOR.message_types_by_name['RpbGetResp'] = _RPBGETRESP +DESCRIPTOR.message_types_by_name['RpbPutReq'] = _RPBPUTREQ +DESCRIPTOR.message_types_by_name['RpbPutResp'] = _RPBPUTRESP +DESCRIPTOR.message_types_by_name['RpbDelReq'] = _RPBDELREQ +DESCRIPTOR.message_types_by_name['RpbListBucketsReq'] = _RPBLISTBUCKETSREQ +DESCRIPTOR.message_types_by_name['RpbListBucketsResp'] = _RPBLISTBUCKETSRESP +DESCRIPTOR.message_types_by_name['RpbListKeysReq'] = _RPBLISTKEYSREQ +DESCRIPTOR.message_types_by_name['RpbListKeysResp'] = _RPBLISTKEYSRESP +DESCRIPTOR.message_types_by_name['RpbMapRedReq'] = _RPBMAPREDREQ +DESCRIPTOR.message_types_by_name['RpbMapRedResp'] = _RPBMAPREDRESP +DESCRIPTOR.message_types_by_name['RpbIndexReq'] = _RPBINDEXREQ +DESCRIPTOR.message_types_by_name['RpbIndexResp'] = _RPBINDEXRESP +DESCRIPTOR.message_types_by_name['RpbIndexBodyResp'] = _RPBINDEXBODYRESP +DESCRIPTOR.message_types_by_name['RpbCSBucketReq'] = _RPBCSBUCKETREQ +DESCRIPTOR.message_types_by_name['RpbCSBucketResp'] = _RPBCSBUCKETRESP +DESCRIPTOR.message_types_by_name['RpbIndexObject'] = _RPBINDEXOBJECT +DESCRIPTOR.message_types_by_name['RpbContent'] = _RPBCONTENT +DESCRIPTOR.message_types_by_name['RpbLink'] = _RPBLINK +DESCRIPTOR.message_types_by_name['RpbCounterUpdateReq'] = _RPBCOUNTERUPDATEREQ +DESCRIPTOR.message_types_by_name['RpbCounterUpdateResp'] = _RPBCOUNTERUPDATERESP +DESCRIPTOR.message_types_by_name['RpbCounterGetReq'] = _RPBCOUNTERGETREQ +DESCRIPTOR.message_types_by_name['RpbCounterGetResp'] = _RPBCOUNTERGETRESP +DESCRIPTOR.message_types_by_name['RpbGetBucketKeyPreflistReq'] = _RPBGETBUCKETKEYPREFLISTREQ +DESCRIPTOR.message_types_by_name['RpbGetBucketKeyPreflistResp'] = _RPBGETBUCKETKEYPREFLISTRESP +DESCRIPTOR.message_types_by_name['RpbBucketKeyPreflistItem'] = _RPBBUCKETKEYPREFLISTITEM +DESCRIPTOR.message_types_by_name['RpbCoverageReq'] = _RPBCOVERAGEREQ +DESCRIPTOR.message_types_by_name['RpbCoverageResp'] = _RPBCOVERAGERESP +DESCRIPTOR.message_types_by_name['RpbCoverageEntry'] = _RPBCOVERAGEENTRY + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetClientIdResp(_message.Message): + DESCRIPTOR = _RPBGETCLIENTIDRESP + + # @@protoc_insertion_point(class_scope:RpbGetClientIdResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSetClientIdReq(_message.Message): + DESCRIPTOR = _RPBSETCLIENTIDREQ + + # @@protoc_insertion_point(class_scope:RpbSetClientIdReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetReq(_message.Message): + DESCRIPTOR = _RPBGETREQ + + # @@protoc_insertion_point(class_scope:RpbGetReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetResp(_message.Message): + DESCRIPTOR = _RPBGETRESP + + # @@protoc_insertion_point(class_scope:RpbGetResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbPutReq(_message.Message): + DESCRIPTOR = _RPBPUTREQ + + # @@protoc_insertion_point(class_scope:RpbPutReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbPutResp(_message.Message): + DESCRIPTOR = _RPBPUTRESP + + # @@protoc_insertion_point(class_scope:RpbPutResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbDelReq(_message.Message): + DESCRIPTOR = _RPBDELREQ + + # @@protoc_insertion_point(class_scope:RpbDelReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbListBucketsReq(_message.Message): + DESCRIPTOR = _RPBLISTBUCKETSREQ + + # @@protoc_insertion_point(class_scope:RpbListBucketsReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbListBucketsResp(_message.Message): + DESCRIPTOR = _RPBLISTBUCKETSRESP + + # @@protoc_insertion_point(class_scope:RpbListBucketsResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbListKeysReq(_message.Message): + DESCRIPTOR = _RPBLISTKEYSREQ + + # @@protoc_insertion_point(class_scope:RpbListKeysReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbListKeysResp(_message.Message): + DESCRIPTOR = _RPBLISTKEYSRESP + + # @@protoc_insertion_point(class_scope:RpbListKeysResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbMapRedReq(_message.Message): + DESCRIPTOR = _RPBMAPREDREQ + + # @@protoc_insertion_point(class_scope:RpbMapRedReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbMapRedResp(_message.Message): + DESCRIPTOR = _RPBMAPREDRESP + + # @@protoc_insertion_point(class_scope:RpbMapRedResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbIndexReq(_message.Message): + DESCRIPTOR = _RPBINDEXREQ + + # @@protoc_insertion_point(class_scope:RpbIndexReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbIndexResp(_message.Message): + DESCRIPTOR = _RPBINDEXRESP + + # @@protoc_insertion_point(class_scope:RpbIndexResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbIndexBodyResp(_message.Message): + DESCRIPTOR = _RPBINDEXBODYRESP + + # @@protoc_insertion_point(class_scope:RpbIndexBodyResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCSBucketReq(_message.Message): + DESCRIPTOR = _RPBCSBUCKETREQ + + # @@protoc_insertion_point(class_scope:RpbCSBucketReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCSBucketResp(_message.Message): + DESCRIPTOR = _RPBCSBUCKETRESP + + # @@protoc_insertion_point(class_scope:RpbCSBucketResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbIndexObject(_message.Message): + DESCRIPTOR = _RPBINDEXOBJECT + + # @@protoc_insertion_point(class_scope:RpbIndexObject) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbContent(_message.Message): + DESCRIPTOR = _RPBCONTENT + + # @@protoc_insertion_point(class_scope:RpbContent) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbLink(_message.Message): + DESCRIPTOR = _RPBLINK + + # @@protoc_insertion_point(class_scope:RpbLink) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCounterUpdateReq(_message.Message): + DESCRIPTOR = _RPBCOUNTERUPDATEREQ + + # @@protoc_insertion_point(class_scope:RpbCounterUpdateReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCounterUpdateResp(_message.Message): + DESCRIPTOR = _RPBCOUNTERUPDATERESP + + # @@protoc_insertion_point(class_scope:RpbCounterUpdateResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCounterGetReq(_message.Message): + DESCRIPTOR = _RPBCOUNTERGETREQ + + # @@protoc_insertion_point(class_scope:RpbCounterGetReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCounterGetResp(_message.Message): + DESCRIPTOR = _RPBCOUNTERGETRESP + + # @@protoc_insertion_point(class_scope:RpbCounterGetResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetBucketKeyPreflistReq(_message.Message): + DESCRIPTOR = _RPBGETBUCKETKEYPREFLISTREQ + + # @@protoc_insertion_point(class_scope:RpbGetBucketKeyPreflistReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetBucketKeyPreflistResp(_message.Message): + DESCRIPTOR = _RPBGETBUCKETKEYPREFLISTRESP + + # @@protoc_insertion_point(class_scope:RpbGetBucketKeyPreflistResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbBucketKeyPreflistItem(_message.Message): + DESCRIPTOR = _RPBBUCKETKEYPREFLISTITEM + + # @@protoc_insertion_point(class_scope:RpbBucketKeyPreflistItem) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCoverageReq(_message.Message): + DESCRIPTOR = _RPBCOVERAGEREQ + + # @@protoc_insertion_point(class_scope:RpbCoverageReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCoverageResp(_message.Message): + DESCRIPTOR = _RPBCOVERAGERESP + + # @@protoc_insertion_point(class_scope:RpbCoverageResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCoverageEntry(_message.Message): + DESCRIPTOR = _RPBCOVERAGEENTRY + + # @@protoc_insertion_point(class_scope:RpbCoverageEntry) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\010RiakKvPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/pb/riak_pb2.py b/riak/pb/riak_pb2.py new file mode 100644 index 00000000..72dba122 --- /dev/null +++ b/riak/pb/riak_pb2.py @@ -0,0 +1,807 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak.proto', + package='', + serialized_pb='\n\nriak.proto\"/\n\x0cRpbErrorResp\x12\x0e\n\x06\x65rrmsg\x18\x01 \x02(\x0c\x12\x0f\n\x07\x65rrcode\x18\x02 \x02(\r\"<\n\x14RpbGetServerInfoResp\x12\x0c\n\x04node\x18\x01 \x01(\x0c\x12\x16\n\x0eserver_version\x18\x02 \x01(\x0c\"%\n\x07RpbPair\x12\x0b\n\x03key\x18\x01 \x02(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\"/\n\x0fRpbGetBucketReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\x0c\"2\n\x10RpbGetBucketResp\x12\x1e\n\x05props\x18\x01 \x02(\x0b\x32\x0f.RpbBucketProps\"O\n\x0fRpbSetBucketReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x1e\n\x05props\x18\x02 \x02(\x0b\x32\x0f.RpbBucketProps\x12\x0c\n\x04type\x18\x03 \x01(\x0c\"1\n\x11RpbResetBucketReq\x12\x0e\n\x06\x62ucket\x18\x01 \x02(\x0c\x12\x0c\n\x04type\x18\x02 \x01(\x0c\"#\n\x13RpbGetBucketTypeReq\x12\x0c\n\x04type\x18\x01 \x02(\x0c\"C\n\x13RpbSetBucketTypeReq\x12\x0c\n\x04type\x18\x01 \x02(\x0c\x12\x1e\n\x05props\x18\x02 \x02(\x0b\x32\x0f.RpbBucketProps\"-\n\tRpbModFun\x12\x0e\n\x06module\x18\x01 \x02(\x0c\x12\x10\n\x08\x66unction\x18\x02 \x02(\x0c\"9\n\rRpbCommitHook\x12\x1a\n\x06modfun\x18\x01 \x01(\x0b\x32\n.RpbModFun\x12\x0c\n\x04name\x18\x02 \x01(\x0c\"\xc7\x05\n\x0eRpbBucketProps\x12\r\n\x05n_val\x18\x01 \x01(\r\x12\x12\n\nallow_mult\x18\x02 \x01(\x08\x12\x17\n\x0flast_write_wins\x18\x03 \x01(\x08\x12!\n\tprecommit\x18\x04 \x03(\x0b\x32\x0e.RpbCommitHook\x12\x1c\n\rhas_precommit\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\"\n\npostcommit\x18\x06 \x03(\x0b\x32\x0e.RpbCommitHook\x12\x1d\n\x0ehas_postcommit\x18\x07 \x01(\x08:\x05\x66\x61lse\x12 \n\x0c\x63hash_keyfun\x18\x08 \x01(\x0b\x32\n.RpbModFun\x12\x1b\n\x07linkfun\x18\t \x01(\x0b\x32\n.RpbModFun\x12\x12\n\nold_vclock\x18\n \x01(\r\x12\x14\n\x0cyoung_vclock\x18\x0b \x01(\r\x12\x12\n\nbig_vclock\x18\x0c \x01(\r\x12\x14\n\x0csmall_vclock\x18\r \x01(\r\x12\n\n\x02pr\x18\x0e \x01(\r\x12\t\n\x01r\x18\x0f \x01(\r\x12\t\n\x01w\x18\x10 \x01(\r\x12\n\n\x02pw\x18\x11 \x01(\r\x12\n\n\x02\x64w\x18\x12 \x01(\r\x12\n\n\x02rw\x18\x13 \x01(\r\x12\x14\n\x0c\x62\x61sic_quorum\x18\x14 \x01(\x08\x12\x13\n\x0bnotfound_ok\x18\x15 \x01(\x08\x12\x0f\n\x07\x62\x61\x63kend\x18\x16 \x01(\x0c\x12\x0e\n\x06search\x18\x17 \x01(\x08\x12)\n\x04repl\x18\x18 \x01(\x0e\x32\x1b.RpbBucketProps.RpbReplMode\x12\x14\n\x0csearch_index\x18\x19 \x01(\x0c\x12\x10\n\x08\x64\x61tatype\x18\x1a \x01(\x0c\x12\x12\n\nconsistent\x18\x1b \x01(\x08\x12\x12\n\nwrite_once\x18\x1c \x01(\x08\x12\x15\n\rhll_precision\x18\x1d \x01(\r\">\n\x0bRpbReplMode\x12\t\n\x05\x46\x41LSE\x10\x00\x12\x0c\n\x08REALTIME\x10\x01\x12\x0c\n\x08\x46ULLSYNC\x10\x02\x12\x08\n\x04TRUE\x10\x03\",\n\nRpbAuthReq\x12\x0c\n\x04user\x18\x01 \x02(\x0c\x12\x10\n\x08password\x18\x02 \x02(\x0c\x42!\n\x17\x63om.basho.riak.protobufB\x06RiakPB') + + + +_RPBBUCKETPROPS_RPBREPLMODE = _descriptor.EnumDescriptor( + name='RpbReplMode', + full_name='RpbBucketProps.RpbReplMode', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='FALSE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REALTIME', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FULLSYNC', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TRUE', index=3, number=3, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1259, + serialized_end=1321, +) + + +_RPBERRORRESP = _descriptor.Descriptor( + name='RpbErrorResp', + full_name='RpbErrorResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='errmsg', full_name='RpbErrorResp.errmsg', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='errcode', full_name='RpbErrorResp.errcode', index=1, + number=2, type=13, cpp_type=3, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=14, + serialized_end=61, +) + + +_RPBGETSERVERINFORESP = _descriptor.Descriptor( + name='RpbGetServerInfoResp', + full_name='RpbGetServerInfoResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='node', full_name='RpbGetServerInfoResp.node', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='server_version', full_name='RpbGetServerInfoResp.server_version', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=63, + serialized_end=123, +) + + +_RPBPAIR = _descriptor.Descriptor( + name='RpbPair', + full_name='RpbPair', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='RpbPair.key', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='RpbPair.value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=125, + serialized_end=162, +) + + +_RPBGETBUCKETREQ = _descriptor.Descriptor( + name='RpbGetBucketReq', + full_name='RpbGetBucketReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbGetBucketReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbGetBucketReq.type', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=164, + serialized_end=211, +) + + +_RPBGETBUCKETRESP = _descriptor.Descriptor( + name='RpbGetBucketResp', + full_name='RpbGetBucketResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='props', full_name='RpbGetBucketResp.props', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=213, + serialized_end=263, +) + + +_RPBSETBUCKETREQ = _descriptor.Descriptor( + name='RpbSetBucketReq', + full_name='RpbSetBucketReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbSetBucketReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='props', full_name='RpbSetBucketReq.props', index=1, + number=2, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbSetBucketReq.type', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=265, + serialized_end=344, +) + + +_RPBRESETBUCKETREQ = _descriptor.Descriptor( + name='RpbResetBucketReq', + full_name='RpbResetBucketReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='bucket', full_name='RpbResetBucketReq.bucket', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='RpbResetBucketReq.type', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=346, + serialized_end=395, +) + + +_RPBGETBUCKETTYPEREQ = _descriptor.Descriptor( + name='RpbGetBucketTypeReq', + full_name='RpbGetBucketTypeReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='RpbGetBucketTypeReq.type', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=397, + serialized_end=432, +) + + +_RPBSETBUCKETTYPEREQ = _descriptor.Descriptor( + name='RpbSetBucketTypeReq', + full_name='RpbSetBucketTypeReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='RpbSetBucketTypeReq.type', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='props', full_name='RpbSetBucketTypeReq.props', index=1, + number=2, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=434, + serialized_end=501, +) + + +_RPBMODFUN = _descriptor.Descriptor( + name='RpbModFun', + full_name='RpbModFun', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='module', full_name='RpbModFun.module', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='function', full_name='RpbModFun.function', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=503, + serialized_end=548, +) + + +_RPBCOMMITHOOK = _descriptor.Descriptor( + name='RpbCommitHook', + full_name='RpbCommitHook', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='modfun', full_name='RpbCommitHook.modfun', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='name', full_name='RpbCommitHook.name', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=550, + serialized_end=607, +) + + +_RPBBUCKETPROPS = _descriptor.Descriptor( + name='RpbBucketProps', + full_name='RpbBucketProps', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='n_val', full_name='RpbBucketProps.n_val', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='allow_mult', full_name='RpbBucketProps.allow_mult', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_write_wins', full_name='RpbBucketProps.last_write_wins', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='precommit', full_name='RpbBucketProps.precommit', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='has_precommit', full_name='RpbBucketProps.has_precommit', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='postcommit', full_name='RpbBucketProps.postcommit', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='has_postcommit', full_name='RpbBucketProps.has_postcommit', index=6, + number=7, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='chash_keyfun', full_name='RpbBucketProps.chash_keyfun', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='linkfun', full_name='RpbBucketProps.linkfun', index=8, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='old_vclock', full_name='RpbBucketProps.old_vclock', index=9, + number=10, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='young_vclock', full_name='RpbBucketProps.young_vclock', index=10, + number=11, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='big_vclock', full_name='RpbBucketProps.big_vclock', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='small_vclock', full_name='RpbBucketProps.small_vclock', index=12, + number=13, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pr', full_name='RpbBucketProps.pr', index=13, + number=14, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='r', full_name='RpbBucketProps.r', index=14, + number=15, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='w', full_name='RpbBucketProps.w', index=15, + number=16, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pw', full_name='RpbBucketProps.pw', index=16, + number=17, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='dw', full_name='RpbBucketProps.dw', index=17, + number=18, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rw', full_name='RpbBucketProps.rw', index=18, + number=19, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='basic_quorum', full_name='RpbBucketProps.basic_quorum', index=19, + number=20, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='notfound_ok', full_name='RpbBucketProps.notfound_ok', index=20, + number=21, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='backend', full_name='RpbBucketProps.backend', index=21, + number=22, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='search', full_name='RpbBucketProps.search', index=22, + number=23, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='repl', full_name='RpbBucketProps.repl', index=23, + number=24, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='search_index', full_name='RpbBucketProps.search_index', index=24, + number=25, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='datatype', full_name='RpbBucketProps.datatype', index=25, + number=26, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='consistent', full_name='RpbBucketProps.consistent', index=26, + number=27, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='write_once', full_name='RpbBucketProps.write_once', index=27, + number=28, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='hll_precision', full_name='RpbBucketProps.hll_precision', index=28, + number=29, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _RPBBUCKETPROPS_RPBREPLMODE, + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=610, + serialized_end=1321, +) + + +_RPBAUTHREQ = _descriptor.Descriptor( + name='RpbAuthReq', + full_name='RpbAuthReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='user', full_name='RpbAuthReq.user', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='password', full_name='RpbAuthReq.password', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1323, + serialized_end=1367, +) + +_RPBGETBUCKETRESP.fields_by_name['props'].message_type = _RPBBUCKETPROPS +_RPBSETBUCKETREQ.fields_by_name['props'].message_type = _RPBBUCKETPROPS +_RPBSETBUCKETTYPEREQ.fields_by_name['props'].message_type = _RPBBUCKETPROPS +_RPBCOMMITHOOK.fields_by_name['modfun'].message_type = _RPBMODFUN +_RPBBUCKETPROPS.fields_by_name['precommit'].message_type = _RPBCOMMITHOOK +_RPBBUCKETPROPS.fields_by_name['postcommit'].message_type = _RPBCOMMITHOOK +_RPBBUCKETPROPS.fields_by_name['chash_keyfun'].message_type = _RPBMODFUN +_RPBBUCKETPROPS.fields_by_name['linkfun'].message_type = _RPBMODFUN +_RPBBUCKETPROPS.fields_by_name['repl'].enum_type = _RPBBUCKETPROPS_RPBREPLMODE +_RPBBUCKETPROPS_RPBREPLMODE.containing_type = _RPBBUCKETPROPS; +DESCRIPTOR.message_types_by_name['RpbErrorResp'] = _RPBERRORRESP +DESCRIPTOR.message_types_by_name['RpbGetServerInfoResp'] = _RPBGETSERVERINFORESP +DESCRIPTOR.message_types_by_name['RpbPair'] = _RPBPAIR +DESCRIPTOR.message_types_by_name['RpbGetBucketReq'] = _RPBGETBUCKETREQ +DESCRIPTOR.message_types_by_name['RpbGetBucketResp'] = _RPBGETBUCKETRESP +DESCRIPTOR.message_types_by_name['RpbSetBucketReq'] = _RPBSETBUCKETREQ +DESCRIPTOR.message_types_by_name['RpbResetBucketReq'] = _RPBRESETBUCKETREQ +DESCRIPTOR.message_types_by_name['RpbGetBucketTypeReq'] = _RPBGETBUCKETTYPEREQ +DESCRIPTOR.message_types_by_name['RpbSetBucketTypeReq'] = _RPBSETBUCKETTYPEREQ +DESCRIPTOR.message_types_by_name['RpbModFun'] = _RPBMODFUN +DESCRIPTOR.message_types_by_name['RpbCommitHook'] = _RPBCOMMITHOOK +DESCRIPTOR.message_types_by_name['RpbBucketProps'] = _RPBBUCKETPROPS +DESCRIPTOR.message_types_by_name['RpbAuthReq'] = _RPBAUTHREQ + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbErrorResp(_message.Message): + DESCRIPTOR = _RPBERRORRESP + + # @@protoc_insertion_point(class_scope:RpbErrorResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetServerInfoResp(_message.Message): + DESCRIPTOR = _RPBGETSERVERINFORESP + + # @@protoc_insertion_point(class_scope:RpbGetServerInfoResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbPair(_message.Message): + DESCRIPTOR = _RPBPAIR + + # @@protoc_insertion_point(class_scope:RpbPair) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetBucketReq(_message.Message): + DESCRIPTOR = _RPBGETBUCKETREQ + + # @@protoc_insertion_point(class_scope:RpbGetBucketReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetBucketResp(_message.Message): + DESCRIPTOR = _RPBGETBUCKETRESP + + # @@protoc_insertion_point(class_scope:RpbGetBucketResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSetBucketReq(_message.Message): + DESCRIPTOR = _RPBSETBUCKETREQ + + # @@protoc_insertion_point(class_scope:RpbSetBucketReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbResetBucketReq(_message.Message): + DESCRIPTOR = _RPBRESETBUCKETREQ + + # @@protoc_insertion_point(class_scope:RpbResetBucketReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbGetBucketTypeReq(_message.Message): + DESCRIPTOR = _RPBGETBUCKETTYPEREQ + + # @@protoc_insertion_point(class_scope:RpbGetBucketTypeReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSetBucketTypeReq(_message.Message): + DESCRIPTOR = _RPBSETBUCKETTYPEREQ + + # @@protoc_insertion_point(class_scope:RpbSetBucketTypeReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbModFun(_message.Message): + DESCRIPTOR = _RPBMODFUN + + # @@protoc_insertion_point(class_scope:RpbModFun) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbCommitHook(_message.Message): + DESCRIPTOR = _RPBCOMMITHOOK + + # @@protoc_insertion_point(class_scope:RpbCommitHook) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbBucketProps(_message.Message): + DESCRIPTOR = _RPBBUCKETPROPS + + # @@protoc_insertion_point(class_scope:RpbBucketProps) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbAuthReq(_message.Message): + DESCRIPTOR = _RPBAUTHREQ + + # @@protoc_insertion_point(class_scope:RpbAuthReq) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\006RiakPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/pb/riak_search_pb2.py b/riak/pb/riak_search_pb2.py new file mode 100644 index 00000000..b20adbfc --- /dev/null +++ b/riak/pb/riak_search_pb2.py @@ -0,0 +1,224 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak_search.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + +import riak.pb.riak_pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak_search.proto', + package='', + serialized_pb='\n\x11riak_search.proto\x1a\nriak.proto\"(\n\x0cRpbSearchDoc\x12\x18\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x08.RpbPair\"\x9d\x01\n\x11RpbSearchQueryReq\x12\t\n\x01q\x18\x01 \x02(\x0c\x12\r\n\x05index\x18\x02 \x02(\x0c\x12\x0c\n\x04rows\x18\x03 \x01(\r\x12\r\n\x05start\x18\x04 \x01(\r\x12\x0c\n\x04sort\x18\x05 \x01(\x0c\x12\x0e\n\x06\x66ilter\x18\x06 \x01(\x0c\x12\n\n\x02\x64\x66\x18\x07 \x01(\x0c\x12\n\n\x02op\x18\x08 \x01(\x0c\x12\n\n\x02\x66l\x18\t \x03(\x0c\x12\x0f\n\x07presort\x18\n \x01(\x0c\"W\n\x12RpbSearchQueryResp\x12\x1b\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\r.RpbSearchDoc\x12\x11\n\tmax_score\x18\x02 \x01(\x02\x12\x11\n\tnum_found\x18\x03 \x01(\rB\'\n\x17\x63om.basho.riak.protobufB\x0cRiakSearchPB') + + + + +_RPBSEARCHDOC = _descriptor.Descriptor( + name='RpbSearchDoc', + full_name='RpbSearchDoc', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fields', full_name='RpbSearchDoc.fields', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=33, + serialized_end=73, +) + + +_RPBSEARCHQUERYREQ = _descriptor.Descriptor( + name='RpbSearchQueryReq', + full_name='RpbSearchQueryReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='q', full_name='RpbSearchQueryReq.q', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='index', full_name='RpbSearchQueryReq.index', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='RpbSearchQueryReq.rows', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start', full_name='RpbSearchQueryReq.start', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sort', full_name='RpbSearchQueryReq.sort', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='filter', full_name='RpbSearchQueryReq.filter', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='df', full_name='RpbSearchQueryReq.df', index=6, + number=7, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='op', full_name='RpbSearchQueryReq.op', index=7, + number=8, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='fl', full_name='RpbSearchQueryReq.fl', index=8, + number=9, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='presort', full_name='RpbSearchQueryReq.presort', index=9, + number=10, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=76, + serialized_end=233, +) + + +_RPBSEARCHQUERYRESP = _descriptor.Descriptor( + name='RpbSearchQueryResp', + full_name='RpbSearchQueryResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='docs', full_name='RpbSearchQueryResp.docs', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_score', full_name='RpbSearchQueryResp.max_score', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='num_found', full_name='RpbSearchQueryResp.num_found', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=235, + serialized_end=322, +) + +_RPBSEARCHDOC.fields_by_name['fields'].message_type = riak.pb.riak_pb2._RPBPAIR +_RPBSEARCHQUERYRESP.fields_by_name['docs'].message_type = _RPBSEARCHDOC +DESCRIPTOR.message_types_by_name['RpbSearchDoc'] = _RPBSEARCHDOC +DESCRIPTOR.message_types_by_name['RpbSearchQueryReq'] = _RPBSEARCHQUERYREQ +DESCRIPTOR.message_types_by_name['RpbSearchQueryResp'] = _RPBSEARCHQUERYRESP + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSearchDoc(_message.Message): + DESCRIPTOR = _RPBSEARCHDOC + + # @@protoc_insertion_point(class_scope:RpbSearchDoc) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSearchQueryReq(_message.Message): + DESCRIPTOR = _RPBSEARCHQUERYREQ + + # @@protoc_insertion_point(class_scope:RpbSearchQueryReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbSearchQueryResp(_message.Message): + DESCRIPTOR = _RPBSEARCHQUERYRESP + + # @@protoc_insertion_point(class_scope:RpbSearchQueryResp) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\014RiakSearchPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/pb/riak_ts_pb2.py b/riak/pb/riak_ts_pb2.py new file mode 100644 index 00000000..5033db67 --- /dev/null +++ b/riak/pb/riak_ts_pb2.py @@ -0,0 +1,934 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak_ts.proto + +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + +import riak.pb.riak_pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak_ts.proto', + package='', + serialized_pb='\n\rriak_ts.proto\x1a\nriak.proto\"[\n\nTsQueryReq\x12\x1f\n\x05query\x18\x01 \x01(\x0b\x32\x10.TsInterpolation\x12\x15\n\x06stream\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x15\n\rcover_context\x18\x03 \x01(\x0c\"^\n\x0bTsQueryResp\x12%\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\x14.TsColumnDescription\x12\x14\n\x04rows\x18\x02 \x03(\x0b\x32\x06.TsRow\x12\x12\n\x04\x64one\x18\x03 \x01(\x08:\x04true\"@\n\x08TsGetReq\x12\r\n\x05table\x18\x01 \x02(\x0c\x12\x14\n\x03key\x18\x02 \x03(\x0b\x32\x07.TsCell\x12\x0f\n\x07timeout\x18\x03 \x01(\r\"H\n\tTsGetResp\x12%\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\x14.TsColumnDescription\x12\x14\n\x04rows\x18\x02 \x03(\x0b\x32\x06.TsRow\"V\n\x08TsPutReq\x12\r\n\x05table\x18\x01 \x02(\x0c\x12%\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x14.TsColumnDescription\x12\x14\n\x04rows\x18\x03 \x03(\x0b\x32\x06.TsRow\"\x0b\n\tTsPutResp\"P\n\x08TsDelReq\x12\r\n\x05table\x18\x01 \x02(\x0c\x12\x14\n\x03key\x18\x02 \x03(\x0b\x32\x07.TsCell\x12\x0e\n\x06vclock\x18\x03 \x01(\x0c\x12\x0f\n\x07timeout\x18\x04 \x01(\r\"\x0b\n\tTsDelResp\"A\n\x0fTsInterpolation\x12\x0c\n\x04\x62\x61se\x18\x01 \x02(\x0c\x12 \n\x0einterpolations\x18\x02 \x03(\x0b\x32\x08.RpbPair\"@\n\x13TsColumnDescription\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12\x1b\n\x04type\x18\x02 \x02(\x0e\x32\r.TsColumnType\"\x1f\n\x05TsRow\x12\x16\n\x05\x63\x65lls\x18\x01 \x03(\x0b\x32\x07.TsCell\"{\n\x06TsCell\x12\x15\n\rvarchar_value\x18\x01 \x01(\x0c\x12\x14\n\x0csint64_value\x18\x02 \x01(\x12\x12\x17\n\x0ftimestamp_value\x18\x03 \x01(\x12\x12\x15\n\rboolean_value\x18\x04 \x01(\x08\x12\x14\n\x0c\x64ouble_value\x18\x05 \x01(\x01\"/\n\rTsListKeysReq\x12\r\n\x05table\x18\x01 \x02(\x0c\x12\x0f\n\x07timeout\x18\x02 \x01(\r\"4\n\x0eTsListKeysResp\x12\x14\n\x04keys\x18\x01 \x03(\x0b\x32\x06.TsRow\x12\x0c\n\x04\x64one\x18\x02 \x01(\x08\"q\n\rTsCoverageReq\x12\x1f\n\x05query\x18\x01 \x01(\x0b\x32\x10.TsInterpolation\x12\r\n\x05table\x18\x02 \x02(\x0c\x12\x15\n\rreplace_cover\x18\x03 \x01(\x0c\x12\x19\n\x11unavailable_cover\x18\x04 \x03(\x0c\"3\n\x0eTsCoverageResp\x12!\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x10.TsCoverageEntry\"[\n\x0fTsCoverageEntry\x12\n\n\x02ip\x18\x01 \x02(\x0c\x12\x0c\n\x04port\x18\x02 \x02(\r\x12\x15\n\rcover_context\x18\x03 \x02(\x0c\x12\x17\n\x05range\x18\x04 \x01(\x0b\x32\x08.TsRange\"\x93\x01\n\x07TsRange\x12\x12\n\nfield_name\x18\x01 \x02(\x0c\x12\x13\n\x0blower_bound\x18\x02 \x02(\x12\x12\x1d\n\x15lower_bound_inclusive\x18\x03 \x02(\x08\x12\x13\n\x0bupper_bound\x18\x04 \x02(\x12\x12\x1d\n\x15upper_bound_inclusive\x18\x05 \x02(\x08\x12\x0c\n\x04\x64\x65sc\x18\x06 \x02(\x0c*Y\n\x0cTsColumnType\x12\x0b\n\x07VARCHAR\x10\x00\x12\n\n\x06SINT64\x10\x01\x12\n\n\x06\x44OUBLE\x10\x02\x12\r\n\tTIMESTAMP\x10\x03\x12\x0b\n\x07\x42OOLEAN\x10\x04\x12\x08\n\x04\x42LOB\x10\x05\x42#\n\x17\x63om.basho.riak.protobufB\x08RiakTsPB') + +_TSCOLUMNTYPE = _descriptor.EnumDescriptor( + name='TsColumnType', + full_name='TsColumnType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='VARCHAR', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SINT64', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DOUBLE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='TIMESTAMP', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BOOLEAN', index=4, number=4, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BLOB', index=5, number=5, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1359, + serialized_end=1448, +) + +TsColumnType = enum_type_wrapper.EnumTypeWrapper(_TSCOLUMNTYPE) +VARCHAR = 0 +SINT64 = 1 +DOUBLE = 2 +TIMESTAMP = 3 +BOOLEAN = 4 +BLOB = 5 + + + +_TSQUERYREQ = _descriptor.Descriptor( + name='TsQueryReq', + full_name='TsQueryReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='query', full_name='TsQueryReq.query', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='stream', full_name='TsQueryReq.stream', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover_context', full_name='TsQueryReq.cover_context', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=29, + serialized_end=120, +) + + +_TSQUERYRESP = _descriptor.Descriptor( + name='TsQueryResp', + full_name='TsQueryResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='columns', full_name='TsQueryResp.columns', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='TsQueryResp.rows', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='TsQueryResp.done', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=122, + serialized_end=216, +) + + +_TSGETREQ = _descriptor.Descriptor( + name='TsGetReq', + full_name='TsGetReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table', full_name='TsGetReq.table', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='TsGetReq.key', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='TsGetReq.timeout', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=218, + serialized_end=282, +) + + +_TSGETRESP = _descriptor.Descriptor( + name='TsGetResp', + full_name='TsGetResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='columns', full_name='TsGetResp.columns', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='TsGetResp.rows', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=284, + serialized_end=356, +) + + +_TSPUTREQ = _descriptor.Descriptor( + name='TsPutReq', + full_name='TsPutReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table', full_name='TsPutReq.table', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='columns', full_name='TsPutReq.columns', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='TsPutReq.rows', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=358, + serialized_end=444, +) + + +_TSPUTRESP = _descriptor.Descriptor( + name='TsPutResp', + full_name='TsPutResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=446, + serialized_end=457, +) + + +_TSDELREQ = _descriptor.Descriptor( + name='TsDelReq', + full_name='TsDelReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table', full_name='TsDelReq.table', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='key', full_name='TsDelReq.key', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='vclock', full_name='TsDelReq.vclock', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='TsDelReq.timeout', index=3, + number=4, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=459, + serialized_end=539, +) + + +_TSDELRESP = _descriptor.Descriptor( + name='TsDelResp', + full_name='TsDelResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=541, + serialized_end=552, +) + + +_TSINTERPOLATION = _descriptor.Descriptor( + name='TsInterpolation', + full_name='TsInterpolation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='base', full_name='TsInterpolation.base', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interpolations', full_name='TsInterpolation.interpolations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=554, + serialized_end=619, +) + + +_TSCOLUMNDESCRIPTION = _descriptor.Descriptor( + name='TsColumnDescription', + full_name='TsColumnDescription', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='TsColumnDescription.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='type', full_name='TsColumnDescription.type', index=1, + number=2, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=621, + serialized_end=685, +) + + +_TSROW = _descriptor.Descriptor( + name='TsRow', + full_name='TsRow', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cells', full_name='TsRow.cells', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=687, + serialized_end=718, +) + + +_TSCELL = _descriptor.Descriptor( + name='TsCell', + full_name='TsCell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='varchar_value', full_name='TsCell.varchar_value', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sint64_value', full_name='TsCell.sint64_value', index=1, + number=2, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_value', full_name='TsCell.timestamp_value', index=2, + number=3, type=18, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='boolean_value', full_name='TsCell.boolean_value', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='double_value', full_name='TsCell.double_value', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=720, + serialized_end=843, +) + + +_TSLISTKEYSREQ = _descriptor.Descriptor( + name='TsListKeysReq', + full_name='TsListKeysReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table', full_name='TsListKeysReq.table', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='TsListKeysReq.timeout', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=845, + serialized_end=892, +) + + +_TSLISTKEYSRESP = _descriptor.Descriptor( + name='TsListKeysResp', + full_name='TsListKeysResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='keys', full_name='TsListKeysResp.keys', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='done', full_name='TsListKeysResp.done', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=894, + serialized_end=946, +) + + +_TSCOVERAGEREQ = _descriptor.Descriptor( + name='TsCoverageReq', + full_name='TsCoverageReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='query', full_name='TsCoverageReq.query', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table', full_name='TsCoverageReq.table', index=1, + number=2, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='replace_cover', full_name='TsCoverageReq.replace_cover', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='unavailable_cover', full_name='TsCoverageReq.unavailable_cover', index=3, + number=4, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=948, + serialized_end=1061, +) + + +_TSCOVERAGERESP = _descriptor.Descriptor( + name='TsCoverageResp', + full_name='TsCoverageResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='TsCoverageResp.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1063, + serialized_end=1114, +) + + +_TSCOVERAGEENTRY = _descriptor.Descriptor( + name='TsCoverageEntry', + full_name='TsCoverageEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='ip', full_name='TsCoverageEntry.ip', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='port', full_name='TsCoverageEntry.port', index=1, + number=2, type=13, cpp_type=3, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cover_context', full_name='TsCoverageEntry.cover_context', index=2, + number=3, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='range', full_name='TsCoverageEntry.range', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1116, + serialized_end=1207, +) + + +_TSRANGE = _descriptor.Descriptor( + name='TsRange', + full_name='TsRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='field_name', full_name='TsRange.field_name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lower_bound', full_name='TsRange.lower_bound', index=1, + number=2, type=18, cpp_type=2, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='lower_bound_inclusive', full_name='TsRange.lower_bound_inclusive', index=2, + number=3, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upper_bound', full_name='TsRange.upper_bound', index=3, + number=4, type=18, cpp_type=2, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='upper_bound_inclusive', full_name='TsRange.upper_bound_inclusive', index=4, + number=5, type=8, cpp_type=7, label=2, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='desc', full_name='TsRange.desc', index=5, + number=6, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=1210, + serialized_end=1357, +) + +_TSQUERYREQ.fields_by_name['query'].message_type = _TSINTERPOLATION +_TSQUERYRESP.fields_by_name['columns'].message_type = _TSCOLUMNDESCRIPTION +_TSQUERYRESP.fields_by_name['rows'].message_type = _TSROW +_TSGETREQ.fields_by_name['key'].message_type = _TSCELL +_TSGETRESP.fields_by_name['columns'].message_type = _TSCOLUMNDESCRIPTION +_TSGETRESP.fields_by_name['rows'].message_type = _TSROW +_TSPUTREQ.fields_by_name['columns'].message_type = _TSCOLUMNDESCRIPTION +_TSPUTREQ.fields_by_name['rows'].message_type = _TSROW +_TSDELREQ.fields_by_name['key'].message_type = _TSCELL +_TSINTERPOLATION.fields_by_name['interpolations'].message_type = riak.pb.riak_pb2._RPBPAIR +_TSCOLUMNDESCRIPTION.fields_by_name['type'].enum_type = _TSCOLUMNTYPE +_TSROW.fields_by_name['cells'].message_type = _TSCELL +_TSLISTKEYSRESP.fields_by_name['keys'].message_type = _TSROW +_TSCOVERAGEREQ.fields_by_name['query'].message_type = _TSINTERPOLATION +_TSCOVERAGERESP.fields_by_name['entries'].message_type = _TSCOVERAGEENTRY +_TSCOVERAGEENTRY.fields_by_name['range'].message_type = _TSRANGE +DESCRIPTOR.message_types_by_name['TsQueryReq'] = _TSQUERYREQ +DESCRIPTOR.message_types_by_name['TsQueryResp'] = _TSQUERYRESP +DESCRIPTOR.message_types_by_name['TsGetReq'] = _TSGETREQ +DESCRIPTOR.message_types_by_name['TsGetResp'] = _TSGETRESP +DESCRIPTOR.message_types_by_name['TsPutReq'] = _TSPUTREQ +DESCRIPTOR.message_types_by_name['TsPutResp'] = _TSPUTRESP +DESCRIPTOR.message_types_by_name['TsDelReq'] = _TSDELREQ +DESCRIPTOR.message_types_by_name['TsDelResp'] = _TSDELRESP +DESCRIPTOR.message_types_by_name['TsInterpolation'] = _TSINTERPOLATION +DESCRIPTOR.message_types_by_name['TsColumnDescription'] = _TSCOLUMNDESCRIPTION +DESCRIPTOR.message_types_by_name['TsRow'] = _TSROW +DESCRIPTOR.message_types_by_name['TsCell'] = _TSCELL +DESCRIPTOR.message_types_by_name['TsListKeysReq'] = _TSLISTKEYSREQ +DESCRIPTOR.message_types_by_name['TsListKeysResp'] = _TSLISTKEYSRESP +DESCRIPTOR.message_types_by_name['TsCoverageReq'] = _TSCOVERAGEREQ +DESCRIPTOR.message_types_by_name['TsCoverageResp'] = _TSCOVERAGERESP +DESCRIPTOR.message_types_by_name['TsCoverageEntry'] = _TSCOVERAGEENTRY +DESCRIPTOR.message_types_by_name['TsRange'] = _TSRANGE + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsQueryReq(_message.Message): + DESCRIPTOR = _TSQUERYREQ + + # @@protoc_insertion_point(class_scope:TsQueryReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsQueryResp(_message.Message): + DESCRIPTOR = _TSQUERYRESP + + # @@protoc_insertion_point(class_scope:TsQueryResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsGetReq(_message.Message): + DESCRIPTOR = _TSGETREQ + + # @@protoc_insertion_point(class_scope:TsGetReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsGetResp(_message.Message): + DESCRIPTOR = _TSGETRESP + + # @@protoc_insertion_point(class_scope:TsGetResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsPutReq(_message.Message): + DESCRIPTOR = _TSPUTREQ + + # @@protoc_insertion_point(class_scope:TsPutReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsPutResp(_message.Message): + DESCRIPTOR = _TSPUTRESP + + # @@protoc_insertion_point(class_scope:TsPutResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsDelReq(_message.Message): + DESCRIPTOR = _TSDELREQ + + # @@protoc_insertion_point(class_scope:TsDelReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsDelResp(_message.Message): + DESCRIPTOR = _TSDELRESP + + # @@protoc_insertion_point(class_scope:TsDelResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsInterpolation(_message.Message): + DESCRIPTOR = _TSINTERPOLATION + + # @@protoc_insertion_point(class_scope:TsInterpolation) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsColumnDescription(_message.Message): + DESCRIPTOR = _TSCOLUMNDESCRIPTION + + # @@protoc_insertion_point(class_scope:TsColumnDescription) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsRow(_message.Message): + DESCRIPTOR = _TSROW + + # @@protoc_insertion_point(class_scope:TsRow) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsCell(_message.Message): + DESCRIPTOR = _TSCELL + + # @@protoc_insertion_point(class_scope:TsCell) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsListKeysReq(_message.Message): + DESCRIPTOR = _TSLISTKEYSREQ + + # @@protoc_insertion_point(class_scope:TsListKeysReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsListKeysResp(_message.Message): + DESCRIPTOR = _TSLISTKEYSRESP + + # @@protoc_insertion_point(class_scope:TsListKeysResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsCoverageReq(_message.Message): + DESCRIPTOR = _TSCOVERAGEREQ + + # @@protoc_insertion_point(class_scope:TsCoverageReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsCoverageResp(_message.Message): + DESCRIPTOR = _TSCOVERAGERESP + + # @@protoc_insertion_point(class_scope:TsCoverageResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsCoverageEntry(_message.Message): + DESCRIPTOR = _TSCOVERAGEENTRY + + # @@protoc_insertion_point(class_scope:TsCoverageEntry) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class TsRange(_message.Message): + DESCRIPTOR = _TSRANGE + + # @@protoc_insertion_point(class_scope:TsRange) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\010RiakTsPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/pb/riak_yokozuna_pb2.py b/riak/pb/riak_yokozuna_pb2.py new file mode 100644 index 00000000..6cc20395 --- /dev/null +++ b/riak/pb/riak_yokozuna_pb2.py @@ -0,0 +1,386 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import * +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: riak_yokozuna.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='riak_yokozuna.proto', + package='', + serialized_pb='\n\x13riak_yokozuna.proto\"?\n\x10RpbYokozunaIndex\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12\x0e\n\x06schema\x18\x02 \x01(\x0c\x12\r\n\x05n_val\x18\x03 \x01(\r\"&\n\x16RpbYokozunaIndexGetReq\x12\x0c\n\x04name\x18\x01 \x01(\x0c\";\n\x17RpbYokozunaIndexGetResp\x12 \n\x05index\x18\x01 \x03(\x0b\x32\x11.RpbYokozunaIndex\"K\n\x16RpbYokozunaIndexPutReq\x12 \n\x05index\x18\x01 \x02(\x0b\x32\x11.RpbYokozunaIndex\x12\x0f\n\x07timeout\x18\x02 \x01(\r\")\n\x19RpbYokozunaIndexDeleteReq\x12\x0c\n\x04name\x18\x01 \x02(\x0c\"2\n\x11RpbYokozunaSchema\x12\x0c\n\x04name\x18\x01 \x02(\x0c\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"=\n\x17RpbYokozunaSchemaPutReq\x12\"\n\x06schema\x18\x01 \x02(\x0b\x32\x12.RpbYokozunaSchema\"\'\n\x17RpbYokozunaSchemaGetReq\x12\x0c\n\x04name\x18\x01 \x02(\x0c\">\n\x18RpbYokozunaSchemaGetResp\x12\"\n\x06schema\x18\x01 \x02(\x0b\x32\x12.RpbYokozunaSchemaB)\n\x17\x63om.basho.riak.protobufB\x0eRiakYokozunaPB') + + + + +_RPBYOKOZUNAINDEX = _descriptor.Descriptor( + name='RpbYokozunaIndex', + full_name='RpbYokozunaIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='RpbYokozunaIndex.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='schema', full_name='RpbYokozunaIndex.schema', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='n_val', full_name='RpbYokozunaIndex.n_val', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=23, + serialized_end=86, +) + + +_RPBYOKOZUNAINDEXGETREQ = _descriptor.Descriptor( + name='RpbYokozunaIndexGetReq', + full_name='RpbYokozunaIndexGetReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='RpbYokozunaIndexGetReq.name', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=88, + serialized_end=126, +) + + +_RPBYOKOZUNAINDEXGETRESP = _descriptor.Descriptor( + name='RpbYokozunaIndexGetResp', + full_name='RpbYokozunaIndexGetResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='RpbYokozunaIndexGetResp.index', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=128, + serialized_end=187, +) + + +_RPBYOKOZUNAINDEXPUTREQ = _descriptor.Descriptor( + name='RpbYokozunaIndexPutReq', + full_name='RpbYokozunaIndexPutReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='RpbYokozunaIndexPutReq.index', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timeout', full_name='RpbYokozunaIndexPutReq.timeout', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=189, + serialized_end=264, +) + + +_RPBYOKOZUNAINDEXDELETEREQ = _descriptor.Descriptor( + name='RpbYokozunaIndexDeleteReq', + full_name='RpbYokozunaIndexDeleteReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='RpbYokozunaIndexDeleteReq.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=266, + serialized_end=307, +) + + +_RPBYOKOZUNASCHEMA = _descriptor.Descriptor( + name='RpbYokozunaSchema', + full_name='RpbYokozunaSchema', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='RpbYokozunaSchema.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='content', full_name='RpbYokozunaSchema.content', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=309, + serialized_end=359, +) + + +_RPBYOKOZUNASCHEMAPUTREQ = _descriptor.Descriptor( + name='RpbYokozunaSchemaPutReq', + full_name='RpbYokozunaSchemaPutReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='schema', full_name='RpbYokozunaSchemaPutReq.schema', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=361, + serialized_end=422, +) + + +_RPBYOKOZUNASCHEMAGETREQ = _descriptor.Descriptor( + name='RpbYokozunaSchemaGetReq', + full_name='RpbYokozunaSchemaGetReq', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='RpbYokozunaSchemaGetReq.name', index=0, + number=1, type=12, cpp_type=9, label=2, + has_default_value=False, default_value="", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=424, + serialized_end=463, +) + + +_RPBYOKOZUNASCHEMAGETRESP = _descriptor.Descriptor( + name='RpbYokozunaSchemaGetResp', + full_name='RpbYokozunaSchemaGetResp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='schema', full_name='RpbYokozunaSchemaGetResp.schema', index=0, + number=1, type=11, cpp_type=10, label=2, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + extension_ranges=[], + serialized_start=465, + serialized_end=527, +) + +_RPBYOKOZUNAINDEXGETRESP.fields_by_name['index'].message_type = _RPBYOKOZUNAINDEX +_RPBYOKOZUNAINDEXPUTREQ.fields_by_name['index'].message_type = _RPBYOKOZUNAINDEX +_RPBYOKOZUNASCHEMAPUTREQ.fields_by_name['schema'].message_type = _RPBYOKOZUNASCHEMA +_RPBYOKOZUNASCHEMAGETRESP.fields_by_name['schema'].message_type = _RPBYOKOZUNASCHEMA +DESCRIPTOR.message_types_by_name['RpbYokozunaIndex'] = _RPBYOKOZUNAINDEX +DESCRIPTOR.message_types_by_name['RpbYokozunaIndexGetReq'] = _RPBYOKOZUNAINDEXGETREQ +DESCRIPTOR.message_types_by_name['RpbYokozunaIndexGetResp'] = _RPBYOKOZUNAINDEXGETRESP +DESCRIPTOR.message_types_by_name['RpbYokozunaIndexPutReq'] = _RPBYOKOZUNAINDEXPUTREQ +DESCRIPTOR.message_types_by_name['RpbYokozunaIndexDeleteReq'] = _RPBYOKOZUNAINDEXDELETEREQ +DESCRIPTOR.message_types_by_name['RpbYokozunaSchema'] = _RPBYOKOZUNASCHEMA +DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaPutReq'] = _RPBYOKOZUNASCHEMAPUTREQ +DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaGetReq'] = _RPBYOKOZUNASCHEMAGETREQ +DESCRIPTOR.message_types_by_name['RpbYokozunaSchemaGetResp'] = _RPBYOKOZUNASCHEMAGETRESP + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaIndex(_message.Message): + DESCRIPTOR = _RPBYOKOZUNAINDEX + + # @@protoc_insertion_point(class_scope:RpbYokozunaIndex) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaIndexGetReq(_message.Message): + DESCRIPTOR = _RPBYOKOZUNAINDEXGETREQ + + # @@protoc_insertion_point(class_scope:RpbYokozunaIndexGetReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaIndexGetResp(_message.Message): + DESCRIPTOR = _RPBYOKOZUNAINDEXGETRESP + + # @@protoc_insertion_point(class_scope:RpbYokozunaIndexGetResp) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaIndexPutReq(_message.Message): + DESCRIPTOR = _RPBYOKOZUNAINDEXPUTREQ + + # @@protoc_insertion_point(class_scope:RpbYokozunaIndexPutReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaIndexDeleteReq(_message.Message): + DESCRIPTOR = _RPBYOKOZUNAINDEXDELETEREQ + + # @@protoc_insertion_point(class_scope:RpbYokozunaIndexDeleteReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaSchema(_message.Message): + DESCRIPTOR = _RPBYOKOZUNASCHEMA + + # @@protoc_insertion_point(class_scope:RpbYokozunaSchema) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaSchemaPutReq(_message.Message): + DESCRIPTOR = _RPBYOKOZUNASCHEMAPUTREQ + + # @@protoc_insertion_point(class_scope:RpbYokozunaSchemaPutReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaSchemaGetReq(_message.Message): + DESCRIPTOR = _RPBYOKOZUNASCHEMAGETREQ + + # @@protoc_insertion_point(class_scope:RpbYokozunaSchemaGetReq) + +@add_metaclass(_reflection.GeneratedProtocolMessageType) +class RpbYokozunaSchemaGetResp(_message.Message): + DESCRIPTOR = _RPBYOKOZUNASCHEMAGETRESP + + # @@protoc_insertion_point(class_scope:RpbYokozunaSchemaGetResp) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n\027com.basho.riak.protobufB\016RiakYokozunaPB') +# @@protoc_insertion_point(module_scope) diff --git a/riak/resolver.py b/riak/resolver.py index c54779ca..6e245ff3 100644 --- a/riak/resolver.py +++ b/riak/resolver.py @@ -1,20 +1,16 @@ -""" -Copyright 2013 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. def default_resolver(riak_object): @@ -40,5 +36,5 @@ def last_written_resolver(riak_object): :param riak_object: an object-in-conflict that will be resolved :type riak_object: :class:`RiakObject ` """ - lm = lambda x: x.last_modified - riak_object.siblings = [max(riak_object.siblings, key=lm), ] + riak_object.siblings = [max(riak_object.siblings, + key=lambda x: x.last_modified), ] diff --git a/riak/riak_error.py b/riak/riak_error.py new file mode 100644 index 00000000..4fe0ce05 --- /dev/null +++ b/riak/riak_error.py @@ -0,0 +1,49 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class RiakError(Exception): + """ + Base class for exceptions generated in the Riak API. + """ + def __init__(self, *args, **kwargs): + super(RiakError, self).__init__(*args, **kwargs) + if len(args) > 0: + self.value = args[0] + else: + self.value = 'unknown' + + def __str__(self): + return repr(self.value) + + +class ConflictError(RiakError): + """ + Raised when an operation is attempted on a + :class:`~riak.riak_object.RiakObject` that has more than one + sibling. + """ + def __init__(self, message='Object in conflict'): + super(ConflictError, self).__init__(message) + + +class ListError(RiakError): + """ + Raised when a list operation is attempted and + riak.disable_list_exceptions is false. + """ + def __init__(self, message='Bucket and key list operations ' + 'are expensive and should not be ' + 'used in production.'): + super(ListError, self).__init__(message) diff --git a/riak/riak_object.py b/riak/riak_object.py index 800822d9..ab9650ca 100644 --- a/riak/riak_object.py +++ b/riak/riak_object.py @@ -1,27 +1,22 @@ -""" -Copyright 2012-2013 Basho Technologies -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from riak import ConflictError from riak.content import RiakContent import base64 from six import string_types, PY2 +from riak.mapreduce import RiakMapReduce def content_property(name, doc=None): @@ -288,7 +283,7 @@ def store(self, w=None, dw=None, pw=None, return_body=True, return self def reload(self, r=None, pr=None, timeout=None, basic_quorum=None, - notfound_ok=None): + notfound_ok=None, head_only=False): """ Reload the object from Riak. When this operation completes, the object could contain new metadata and a new value, if the object @@ -312,10 +307,13 @@ def reload(self, r=None, pr=None, timeout=None, basic_quorum=None, :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool + :param head_only: whether to fetch without value, so only metadata + (only available on PB transport) + :type head_only: bool :rtype: :class:`RiakObject` """ - self.client.get(self, r=r, pr=pr, timeout=timeout) + self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only) return self def delete(self, r=None, w=None, dw=None, pr=None, pw=None, @@ -410,5 +408,3 @@ def reduce(self, *args): mr = RiakMapReduce(self.client) mr.add(self.bucket.name, self.key) return mr.reduce(*args) - -from riak.mapreduce import RiakMapReduce diff --git a/riak/security.py b/riak/security.py index 7da79ea7..d048f008 100644 --- a/riak/security.py +++ b/riak/security.py @@ -1,56 +1,43 @@ -""" -Copyright 2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ssl import warnings -from six import PY2 from riak import RiakError from riak.util import str_to_long -OPENSSL_VERSION_101G = 268439679 -if PY2: +if hasattr(ssl, 'SSLContext'): + # For Python >= 2.7.9 and Python 3.x + USE_STDLIB_SSL = True +else: + # For Python 2.6 and <= 2.7.8 + USE_STDLIB_SSL = False + +if not USE_STDLIB_SSL: import OpenSSL.SSL from OpenSSL import crypto - sslver = OpenSSL.SSL.OPENSSL_VERSION_NUMBER - # Be sure to use at least OpenSSL 1.0.1g - if (sslver < OPENSSL_VERSION_101G) or \ - not hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): - verstring = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION) - msg = "Found {0} version, but expected at least OpenSSL 1.0.1g. " \ - "Security may not support TLS 1.2.".format(verstring) - warnings.warn(msg, UserWarning) - if hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): - DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_2_METHOD - elif hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): - DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_1_METHOD - elif hasattr(OpenSSL.SSL, 'TLSv1_METHOD'): - DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_METHOD - else: - DEFAULT_TLS_VERSION = OpenSSL.SSL.SSLv23_METHOD -else: - import ssl +OPENSSL_VERSION_101G = 268439679 +if hasattr(ssl, 'OPENSSL_VERSION_NUMBER'): + # For Python 2.7 and Python 3.x sslver = ssl.OPENSSL_VERSION_NUMBER # Be sure to use at least OpenSSL 1.0.1g - if sslver < OPENSSL_VERSION_101G or \ - not hasattr(ssl, 'PROTOCOL_TLSv1_2'): + tls_12 = hasattr(ssl, 'PROTOCOL_TLSv1_2') + if sslver < OPENSSL_VERSION_101G or not tls_12: verstring = ssl.OPENSSL_VERSION - msg = "Found {0} version, but expected at least OpenSSL 1.0.1g. " \ - "Security may not support TLS 1.2.".format(verstring) + msg = "{0} (>= 1.0.1g required), TLS 1.2 support: {1}" \ + .format(verstring, tls_12) warnings.warn(msg, UserWarning) if hasattr(ssl, 'PROTOCOL_TLSv1_2'): DEFAULT_TLS_VERSION = ssl.PROTOCOL_TLSv1_2 @@ -61,6 +48,25 @@ else: DEFAULT_TLS_VERSION = ssl.PROTOCOL_SSLv23 +else: + # For Python 2.6 + sslver = OpenSSL.SSL.OPENSSL_VERSION_NUMBER + # Be sure to use at least OpenSSL 1.0.1g + tls_12 = hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD') + if (sslver < OPENSSL_VERSION_101G) or tls_12: + verstring = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION) + msg = "{0} (>= 1.0.1g required), TLS 1.2 support: {1}" \ + .format(verstring, tls_12) + warnings.warn(msg, UserWarning) + if hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'): + DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_2_METHOD + elif hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'): + DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_1_METHOD + elif hasattr(OpenSSL.SSL, 'TLSv1_METHOD'): + DEFAULT_TLS_VERSION = OpenSSL.SSL.TLSv1_METHOD + else: + DEFAULT_TLS_VERSION = OpenSSL.SSL.SSLv23_METHOD + class SecurityError(RiakError): """ @@ -197,7 +203,7 @@ def ssl_version(self): """ return self._ssl_version - if PY2: + if not USE_STDLIB_SSL: @property def pkey(self): """ @@ -244,7 +250,7 @@ def _cached_cert(self, key, loader): if not isinstance(key_file, list): key_file = [key_file] for filename in key_file: - with open(filename, 'r') as f: + with open(filename, 'rb') as f: cert_list.append(loader(OpenSSL.SSL.FILETYPE_PEM, f.read())) # If it is not a list, just store the first element @@ -266,20 +272,20 @@ def _has_credential(self, key): return (getattr(self, internal_key) is not None) or \ (getattr(self, internal_key + "_file") is not None) - def _check_revoked_cert(self, ssl_socket): - """ - Checks whether the server certificate on the passed socket has been - revoked by checking the CRL. + def _check_revoked_cert(self, ssl_socket): + """ + Checks whether the server certificate on the passed socket has been + revoked by checking the CRL. - :param ssl_socket: the SSL/TLS socket - :rtype: bool - :raises SecurityError: when the certificate has been revoked - """ - if not self._has_credential('crl'): - return True - - servcert = ssl_socket.get_peer_certificate() - servserial = servcert.get_serial_number() - for rev in self.crl.get_revoked(): - if servserial == str_to_long(rev.get_serial(), 16): - raise SecurityError("Server certificate has been revoked") + :param ssl_socket: the SSL/TLS socket + :rtype: bool + :raises SecurityError: when the certificate has been revoked + """ + if not self._has_credential('crl'): + return True + + servcert = ssl_socket.get_peer_certificate() + servserial = servcert.get_serial_number() + for rev in self.crl.get_revoked(): + if servserial == str_to_long(rev.get_serial(), 16): + raise SecurityError("Server certificate has been revoked") diff --git a/riak/table.py b/riak/table.py new file mode 100644 index 00000000..d4006503 --- /dev/null +++ b/riak/table.py @@ -0,0 +1,110 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import string_types, PY2 + + +class Table(object): + """ + The ``Table`` object allows you to access properties on a Riak + timeseries table and query timeseries data. + """ + def __init__(self, client, name): + """ + Returns a new ``Table`` instance. + + :param client: A :class:`RiakClient ` + instance + :type client: :class:`RiakClient ` + :param name: The table's name + :type name: string + """ + if not isinstance(name, string_types): + raise TypeError('Table name must be a string') + + if PY2: + try: + name = name.encode('ascii') + except UnicodeError: + raise TypeError('Unicode table names are not supported.') + + self._client = client + self.name = name + + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + def new(self, rows, columns=None): + """ + A shortcut for manually instantiating a new + :class:`~riak.ts_object.TsObject` + + :param rows: An list of lists with timeseries data + :type rows: list + :param columns: An list of Column names and types. Optional. + :type columns: list + :rtype: :class:`~riak.ts_object.TsObject` + """ + from riak.ts_object import TsObject + + return TsObject(self._client, self, rows, columns) + + def describe(self): + """ + Retrieves a timeseries table's description. + + :rtype: :class:`TsObject ` + """ + return self._client.ts_describe(self) + + def get(self, key): + """ + Gets a value from a timeseries table. + + :param key: The timeseries value's key. + :type key: list + :rtype: :class:`TsObject ` + """ + return self._client.ts_get(self, key) + + def delete(self, key): + """ + Deletes a value from a timeseries table. + + :param key: The timeseries value's key. + :type key: list or dict + :rtype: boolean + """ + return self._client.ts_delete(self, key) + + def query(self, query, interpolations=None): + """ + Queries a timeseries table. + + :param query: The timeseries query. + :type query: string + :rtype: :class:`TsObject ` + """ + return self._client.ts_query(self, query, interpolations) + + def stream_keys(self, timeout=None): + """ + Streams keys from a timeseries table. + + :rtype: list + """ + return self._client.ts_stream_keys(self, timeout) diff --git a/riak/test_server.py b/riak/test_server.py index da727fec..99d68742 100644 --- a/riak/test_server.py +++ b/riak/test_server.py @@ -1,3 +1,17 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from __future__ import print_function import os.path import threading @@ -31,8 +45,8 @@ def __repr__(self): def __eq__(self, other): return self.str == other - def __cmp__(self, other): - return cmp(self.str, other) + def __lt__(self, other): + return self.str < other def erlang_config(hash, depth=1): diff --git a/riak/tests/__init__.py b/riak/tests/__init__.py index c4d64bf1..be547be6 100644 --- a/riak/tests/__init__.py +++ b/riak/tests/__init__.py @@ -1,4 +1,22 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging import os +import socket +import sys + from riak.test_server import TestServer from riak.security import SecurityCreds @@ -12,13 +30,30 @@ test_server.start() try: - __import__('riak_pb') + __import__('riak.pb') HAVE_PROTO = True except ImportError: HAVE_PROTO = False + +def hostname_resolves(hostname): + try: + socket.gethostbyname(hostname) + return 1 + except socket.error: + return 0 + + +distutils_debug = os.environ.get('DISTUTILS_DEBUG', '0') +if distutils_debug == '1': + logger = logging.getLogger() + logger.level = logging.DEBUG + logger.addHandler(logging.StreamHandler(sys.stdout)) + HOST = os.environ.get('RIAK_TEST_HOST', '127.0.0.1') +PROTOCOL = os.environ.get('RIAK_TEST_PROTOCOL', 'pbc') + PB_HOST = os.environ.get('RIAK_TEST_PB_HOST', HOST) PB_PORT = int(os.environ.get('RIAK_TEST_PB_PORT', '8087')) @@ -30,39 +65,57 @@ DUMMY_HTTP_PORT = int(os.environ.get('DUMMY_HTTP_PORT', '1023')) DUMMY_PB_PORT = int(os.environ.get('DUMMY_PB_PORT', '1022')) - -SKIP_SEARCH = int(os.environ.get('SKIP_SEARCH', '1')) +RUN_BTYPES = int(os.environ.get('RUN_BTYPES', '0')) +RUN_DATATYPES = int(os.environ.get('RUN_DATATYPES', '0')) +RUN_CLIENT = int(os.environ.get('RUN_CLIENT', '0')) +RUN_INDEXES = int(os.environ.get('RUN_INDEXES', '0')) +RUN_KV = int(os.environ.get('RUN_KV', '0')) +RUN_MAPREDUCE = int(os.environ.get('RUN_MAPREDUCE', '0')) +RUN_POOL = int(os.environ.get('RUN_POOL', '0')) +RUN_RESOLVE = int(os.environ.get('RUN_RESOLVE', '0')) +RUN_SEARCH = int(os.environ.get('RUN_SEARCH', '0')) +RUN_TIMESERIES = int(os.environ.get('RUN_TIMESERIES', '0')) RUN_YZ = int(os.environ.get('RUN_YZ', '0')) -SKIP_INDEXES = int(os.environ.get('SKIP_INDEXES', '1')) - -SKIP_POOL = os.environ.get('SKIP_POOL') -SKIP_RESOLVE = int(os.environ.get('SKIP_RESOLVE', '0')) -SKIP_BTYPES = int(os.environ.get('SKIP_BTYPES', '0')) +if PROTOCOL != 'pbc': + RUN_TIMESERIES = 0 RUN_SECURITY = int(os.environ.get('RUN_SECURITY', '0')) -SECURITY_USER = os.environ.get('RIAK_TEST_SECURITY_USER', 'testuser') -SECURITY_PASSWD = os.environ.get('RIAK_TEST_SECURITY_PASSWD', 'testpassword') +if RUN_SECURITY: + h = 'riak-test' + if hostname_resolves(h): + HOST = PB_HOST = HTTP_HOST = h + else: + raise AssertionError( + 'RUN_SECURITY requires that the host name' + + ' "riak-test" resolves to the IP address of a Riak node' + + ' with security enabled.') + +SECURITY_USER = os.environ.get('RIAK_TEST_SECURITY_USER', 'riakpass') +SECURITY_PASSWD = os.environ.get('RIAK_TEST_SECURITY_PASSWD', 'Test1234') + SECURITY_CACERT = os.environ.get('RIAK_TEST_SECURITY_CACERT', - 'riak/tests/resources/ca.crt') + 'tools/test-ca/certs/cacert.pem') SECURITY_REVOKED = os.environ.get('RIAK_TEST_SECURITY_REVOKED', - 'riak/tests/resources/server.crl') + 'tools/test-ca/crl/crl.pem') SECURITY_BAD_CERT = os.environ.get('RIAK_TEST_SECURITY_BAD_CERT', - 'riak/tests/resources/bad_ca.crt') + 'tools/test-ca/certs/badcert.pem') # Certificate-based Authentication only supported by PBC -# N.B., username and password must both still be supplied -SECURITY_KEY = os.environ.get('RIAK_TEST_SECURITY_KEY', - 'riak/tests/resources/client.key') +SECURITY_KEY = os.environ.get( + 'RIAK_TEST_SECURITY_KEY', + 'tools/test-ca/private/riakuser-client-cert-key.pem') SECURITY_CERT = os.environ.get('RIAK_TEST_SECURITY_CERT', - 'riak/tests/resources/client.crt') + 'tools/test-ca/certs/riakuser-client-cert.pem') SECURITY_CERT_USER = os.environ.get('RIAK_TEST_SECURITY_CERT_USER', - 'certuser') -SECURITY_CERT_PASSWD = os.environ.get('RIAK_TEST_SECURITY_CERT_PASSWD', - 'certpass') + 'riakuser') + +SECURITY_CIPHERS = 'DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:' + \ + 'DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:AES128-SHA256:' + \ + 'AES128-SHA:AES256-SHA256:AES256-SHA:RC4-SHA' SECURITY_CREDS = None if RUN_SECURITY: SECURITY_CREDS = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, - cacert_file=SECURITY_CACERT) -SKIP_DATATYPES = int(os.environ.get('SKIP_DATATYPES', '0')) + cacert_file=SECURITY_CACERT, + ciphers=SECURITY_CIPHERS) diff --git a/riak/tests/base.py b/riak/tests/base.py new file mode 100644 index 00000000..9aaf4e69 --- /dev/null +++ b/riak/tests/base.py @@ -0,0 +1,81 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import logging +import random +import riak + +from riak.client import RiakClient +from riak.tests import HOST, PROTOCOL, PB_PORT, HTTP_PORT, SECURITY_CREDS + + +class IntegrationTestBase(object): + host = None + pb_port = None + http_port = None + credentials = None + + @staticmethod + def randint(): + return random.randint(1, 999999) + + @staticmethod + def randname(length=12): + out = '' + for i in range(length): + out += chr(random.randint(ord('a'), ord('z'))) + return out + + @classmethod + def create_client(cls, host=None, http_port=None, pb_port=None, + protocol=None, credentials=None, **kwargs): + host = host or HOST + http_port = http_port or HTTP_PORT + pb_port = pb_port or PB_PORT + + if protocol is None: + if hasattr(cls, 'protocol') and (cls.protocol is not None): + protocol = cls.protocol + else: + protocol = PROTOCOL + + cls.protocol = protocol + + credentials = credentials or SECURITY_CREDS + + if hasattr(cls, 'client_options'): + kwargs.update(cls.client_options) + + logger = logging.getLogger() + logger.debug("RiakClient(protocol='%s', host='%s', pb_port='%d', " + "http_port='%d', credentials='%s', kwargs='%s')", + protocol, host, pb_port, http_port, credentials, kwargs) + + return RiakClient(protocol=protocol, + host=host, + http_port=http_port, + credentials=credentials, + pb_port=pb_port, + **kwargs) + + def setUp(self): + riak.disable_list_exceptions = True + self.bucket_name = self.randname() + self.key_name = self.randname() + self.client = self.create_client() + + def tearDown(self): + riak.disable_list_exceptions = False + self.client.close() diff --git a/riak/tests/test_six.py b/riak/tests/comparison.py similarity index 84% rename from riak/tests/test_six.py rename to riak/tests/comparison.py index c83f2b1e..aa1d21cc 100644 --- a/riak/tests/test_six.py +++ b/riak/tests/comparison.py @@ -1,20 +1,18 @@ -""" -Copyright 2014 Basho Technologies, Inc. +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# -*- coding: utf-8 -*- from six import PY2, PY3 import collections import warnings @@ -129,8 +127,8 @@ def assertItemsEqual(self, expected_seq, actual_seq, msg=None): diffMsg = '\n'.join(lines) standardMsg = self._truncateMessage(standardMsg, diffMsg) - def assert_raises_regex(self, exception, regexp, msg=None): + def assert_raises_regex(self, exception, regexp): if PY2: - return self.assertRaisesRegexp(exception, regexp, msg) + return self.assertRaisesRegexp(exception, regexp) else: - return self.assertRaisesRegex(exception, regexp, msg) + return self.assertRaisesRegex(exception, regexp) diff --git a/riak/tests/pool-grinder.py b/riak/tests/pool-grinder.py index 09bef278..19cb71d7 100755 --- a/riak/tests/pool-grinder.py +++ b/riak/tests/pool-grinder.py @@ -1,17 +1,30 @@ #!/usr/bin/env python +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function from six import PY2 -if PY2: - from Queue import Queue -else: - from queue import Queue from threading import Thread import sys -sys.path.append("../transports/") from pool import Pool from random import SystemRandom from time import sleep +if PY2: + from Queue import Queue +else: + from queue import Queue +sys.path.append("../transports/") class SimplePool(Pool): @@ -71,6 +84,7 @@ def _run(): else: return True + ret = True count = 0 while ret: diff --git a/riak/tests/resources/Makefile b/riak/tests/resources/Makefile deleted file mode 100644 index 9a726a91..00000000 --- a/riak/tests/resources/Makefile +++ /dev/null @@ -1,138 +0,0 @@ -# -# Copyright 2014 Basho Technologies, Inc. -# -# This file is provided to you under the Apache License, -# Version 2.0 (the "License"); you may not use this file -# except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -#under the License. - -CNTRY=US -STATE=Washington -CITY=Seattle -ORG=Basho -UNIT=Riak Python Client -EMAIL=clients@basho.com -PASSWD=#testpasswd -COMPANY=Basho Technologies -DAYS=3650 - -SSL=openssl -SSL_CONF=openssl.cnf - -CA_DIR=ca -CA_KEY=${CA_DIR}/ca.key -# Certification Signing Request -CA_CSR=${CA_DIR}/ca.csr -CA_CERT=${CA_DIR}/ca.crt -# Common Name -CA_CN=CA -CA_STRING="${CNTRY}\n${STATE}\n${CITY}\n${ORG}\n${UNIT}\n${CA_CN}\n${EMAIL}\n${PASSWD}\n${COMPANY}\n" - -SERVER_DIR=server -SERVER_KEY=${SERVER_DIR}/server.key -# Certification Signing Request -SERVER_CSR=${SERVER_DIR}/server.csr -SERVER_CERT=${SERVER_DIR}/server.crt -# Certificate Revocation List -SERVER_CRL=${SERVER_DIR}/server.crl -# Common Name (must match nodename) -SERVER_CN=riak@127.0.0.1 -SERVER_STRING="${CNTRY}\n${STATE}\n${CITY}\n${ORG}\n${UNIT}\n${SERVER_CN}\n${EMAIL}\n${PASSWD}\n${COMPANY}\n" - -CLIENT_DIR=client -CLIENT_KEY=${CLIENT_DIR}/client.key -CLIENT_CSR=${CLIENT_DIR}/client.csr -CLIENT_CERT=${CLIENT_DIR}/client.crt -# Common Name (must name Riak username) -CLIENT_CN=certuser -CLIENT_STRING="${CNTRY}\n${STATE}\n${CITY}\n${ORG}\n${UNIT}\n${CLIENT_CN}\n${EMAIL}\n${PASSWD}\n${COMPANY}\n" - -RANDOM=od -vAn -N3 -tu4 < /dev/urandom| awk '{print $1;}' -SERIAL=serial -CRL=crlnumber -NEWCERT_DIR=newcerts -INDEX=index.txt -INDEX_ATTR=index.txt.attr - -all: ${CA_CERT} ${SERVER_CERT} ${CLIENT_CERT} ${SERVER_CRL} - -install: - cp ${CA_CERT} . - cp ${SERVER_CERT} . - cp ${SERVER_KEY} . - cp ${SERVER_CRL} . - cp ${CLIENT_KEY} . - cp ${CLIENT_CERT} . - -# Certificate Serial Number -${SERIAL}: - printf "%06x" `${RANDOM}` > $@ - -# Certificate Revocation List Number -${CRL}: - printf "%06x" `${RANDOM}` > $@ - -${CA_KEY}: ${CA_DIR} ${SERIAL} ${CRL} ${INDEX} ${INDEX_ATTR} ${NEWCERT_DIR} - ${SSL} genrsa -out $@ 2048 - -${CA_CSR}: ${CA_KEY} - printf ${CA_STRING} | ${SSL} req -config ${SSL_CONF} -new -key $< -out $@ - -${CA_CERT}: ${CA_CSR} - ${SSL} x509 -req -days ${DAYS} -in $< -out $@ -signkey ${CA_KEY} - -${SERVER_KEY}: ${SERVER_DIR} ${SERIAL} ${CRL} - ${SSL} genrsa -out $@ 2048 - -${SERVER_CSR}: ${SERVER_KEY} - printf ${SERVER_STRING} | ${SSL} req -config ${SSL_CONF} -new -key $< -out $@ - -${SERVER_CERT}: ${SERVER_CSR} - yes | OPENSSL_CONF=${SSL_CONF} ${SSL} ca -days ${DAYS} -in $< -cert ${CA_CERT} -out $@ - -${SERVER_CRL}: ${CRL} ${SERVER_CERT} ${CA_KEY} ${CA_CERT} - rm -f ${INDEX} - touch ${INDEX} - OPENSSL_CONF=${SSL_CONF} ${SSL} ca -gencrl -keyfile ${CA_KEY} -cert ${CA_CERT} -out $@ - OPENSSL_CONF=${SSL_CONF} ${SSL} ca -revoke ${SERVER_CERT} -keyfile ${CA_KEY} -cert ${CA_CERT} - OPENSSL_CONF=${SSL_CONF} ${SSL} ca -gencrl -keyfile ${CA_KEY} -cert ${CA_CERT} -out $@ - -${CLIENT_KEY}: ${CLIENT_DIR} ${SERIAL} ${CRL} - ${SSL} genrsa -out $@ 2048 - -${CLIENT_CSR}: ${CLIENT_KEY} - printf ${CLIENT_STRING} | ${SSL} req -config ${SSL_CONF} -new -key $< -out $@ - -${CLIENT_CERT}: ${CLIENT_CSR} - yes | OPENSSL_CONF=${SSL_CONF} ${SSL} ca -days ${DAYS} -in $< -cert ${CA_CERT} -key ${CA_KEY} -out $@ - -clean: - rm -rf ${CA_DIR} ${SERVER_DIR} ${CLIENT_DIR} ${NEWCERT_DIR} ${SERIAL}* ${CRL}* ${INDEX}* - -${CA_DIR}: - mkdir -p $@ - -${SERVER_DIR}: - mkdir -p $@ - -${CLIENT_DIR}: - mkdir -p $@ - -${NEWCERT_DIR}: - mkdir -p $@ - -${INDEX}: - touch ${INDEX} - -${INDEX_ATTR}: - touch ${INDEX_ATTR} - diff --git a/riak/tests/resources/README.md b/riak/tests/resources/README.md deleted file mode 100644 index 2bc4a6dc..00000000 --- a/riak/tests/resources/README.md +++ /dev/null @@ -1,16 +0,0 @@ -**DO NOT USE THESE IN PRODUCTION** - -This directory has certificates and a key for testing Riak authentication. - -* server.key - a private key for a Riak server (PEM format) -* server.crt - the certificate for server.key (PEM format) -* server.crl - certificate revocation list -* ca.crt - a certificate for the CA that issued server.crt (PEM format) -* empty_ca.crt - a certificate for a CA that has and cannot ever issue a - certificate (I deleted its private key) -* client.crt - certificate for client authenication (PEM format) - -**DO NOT USE THESE IN PRODUCTION** - -Generation of values inspired by https://github.com/basho-labs/riak-ruby-ca - diff --git a/riak/tests/resources/bad_ca.crt b/riak/tests/resources/bad_ca.crt deleted file mode 100644 index 265001d9..00000000 --- a/riak/tests/resources/bad_ca.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -IIDfjCCAuegAwIBAgIJAO0pDelK8iopMA0GCSqGSIb3DQEBBQUAMIGHMQswCQYD -VQQGEwJVUzEQMA4GA1UECBMHRmxvcmlkYTEOMAwGA1UEBxMFTWlhbWkxGzAZBgNV -BAoTEkJhc2hvIFRlY2hub2xvZ2llczEZMBcGA1UECxMQUmlhayBSdWJ5IENsaWVu -dDEeMBwGCSqGSIb3DQEJARYPYnJ5Y2VAYmFzaG8uY29tMB4XDTE0MDIwNDIyNTAw -NFoXDTI0MDIwMjIyNTAwNFowgYcxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdGbG9y -aWRhMQ4wDAYDVQQHEwVNaWFtaTEbMBkGA1UEChMSQmFzaG8gVGVjaG5vbG9naWVz -MRkwFwYDVQQLExBSaWFrIFJ1YnkgQ2xpZW50MR4wHAYJKoZIhvcNAQkBFg9icnlj -ZUBiYXNoby5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAN4yelOGl+MW -FY7Pf9vZjNHDuVQfBkiY9myriNpr3YGGou0xEIJvikkhl4eQAzDsw52qTsESlfwK -+uFmCBvhPBgeWYRd2LnAvRSrD4c7fDp+2eVUL3EKDHKdVNwnobvMiN2GQRZT2E+J -gBX3Wx3VGDtI0+M1Q9QPI7J1iewE0rB/AgMBAAGjge8wgewwHQYDVR0OBBYEFAkx -E0bwW0jX8FhWFW9XMhzGkMkhMIG8BgNVHSMEgbQwgbGAFAkxE0bwW0jX8FhWFW9X -MhzGkMkhoYGNpIGKMIGHMQswCQYDVQQGEwJVUzEQMA4GA1UECBMHRmxvcmlkYTEO -MAwGA1UEBxMFTWlhbWkxGzAZBgNVBAoTEkJhc2hvIFRlY2hub2xvZ2llczEZMBcG -A1UECxMQUmlhayBSdWJ5IENsaWVudDEeMBwGCSqGSIb3DQEJARYPYnJ5Y2VAYmFz -aG8uY29tggkA7SkN6UryKikwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOB -gQCPgOgPnv33+/LrfSSDh/6OdtYAGdrxMkCsuPdwmyZlUl9W7gxFjX7EPxYycUgO -HNGuI10vOipgXrsJZUtQFi9OZ8+2m2Y4JHZR1xqSoHmXL/LoZYggY0BcwfjpSujL -pMhBUfzTLlULaaaBEGCVwxTabP+qzRma/d1FjkMUzbHrmQ== ------END CERTIFICATE----- diff --git a/riak/tests/resources/ca.crt b/riak/tests/resources/ca.crt deleted file mode 100644 index cefc7396..00000000 --- a/riak/tests/resources/ca.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDnjCCAoYCCQDb6VQV9V3A/zANBgkqhkiG9w0BAQUFADCBkDELMAkGA1UEBhMC -VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAMBgNV -BAoMBUJhc2hvMRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNVBAMM -AkNBMSAwHgYJKoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbTAeFw0xNDA5MjQy -MjU1MjVaFw0yNDA5MjEyMjU1MjVaMIGQMQswCQYDVQQGEwJVUzETMBEGA1UECAwK -V2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEOMAwGA1UECgwFQmFzaG8xGzAZ -BgNVBAsMElJpYWsgUHl0aG9uIENsaWVudDELMAkGA1UEAwwCQ0ExIDAeBgkqhkiG -9w0BCQEWEWNsaWVudHNAYmFzaG8uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA1NQZ/aMes16da5Qs1czlmn9kXunhrOVwhIljXijCTBNAZU7gkqLx -XcbNGIfPRfx5IWNSEcn8ZEAjMe7i29zuMdjvtZVhjOw5u0PJ1TGFqpmLRsMYnMqS -HRXNBQq/t+RsriunqxkHYFSfaNEepckosuQF+ao6eIfjbCnAo/0YMM4DY4zfFlZc -XWdVscQxq3piNJEt7Ob/p8TrBZM9bdkks+Sk/l1ZabYmbRo+AtCmzdvcsqI3uqAA -rm7rKkcuS+A/0z0g/vhJILFcVl+RDexTmVifM8iQE4buUi4CJMqy6fwmDBSlt4MB -8DW8MWNZ/RVGoC9hAhaAq3D9t1rudTpqnQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB -AQAc9Dgbq8Ca/6I2u7uN9hVk7hhgLTmOXWokhfY4tnpVNu4M1TB9dXSbvaIAQd6g -40GQ0W3nZaN1x1LdxgG4El+WxO12rhTjQEEge7mDQMcFjCXIJvrbqDyZ/J2tLG7k -Z9ZqigtTt1VpDE8OjqI/K50R2YU5/CwBDwa33QB6t6GWjL/72vrNoKkQhzd0olkk -xJjoBde7FSfXBuef3a2IMcUyU7ukm2DRvLUslG332ow3oQoL7na8fdsGQ9bDP+HI -lbq0xLvqQbgmbdwwxfa0r9nhqArsSG4q+k3kCpQcy1E2k1NdTn9yNDiMtcWpC+G0 -7eKc5VzGTi8NwadJLtpYDElq ------END CERTIFICATE----- diff --git a/riak/tests/resources/client.crt b/riak/tests/resources/client.crt deleted file mode 100644 index ef81a369..00000000 --- a/riak/tests/resources/client.crt +++ /dev/null @@ -1,95 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 11568451 (0xb08543) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=Washington, L=Seattle, O=Basho, OU=Riak Python Client, CN=CA/emailAddress=clients@basho.com - Validity - Not Before: Sep 24 22:55:25 2014 GMT - Not After : Sep 21 22:55:25 2024 GMT - Subject: C=US, ST=Washington, O=Basho, OU=Riak Python Client, CN=certuser/emailAddress=clients@basho.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:c7:e0:44:3a:eb:60:8a:86:70:66:0d:90:f6:29: - a9:85:17:21:3c:d0:7b:62:24:7d:a5:b6:d8:95:b1: - ef:8e:03:24:51:89:5a:34:8a:7a:e4:23:48:34:ce: - e9:f7:8c:d5:41:f7:fe:e5:7e:15:71:3f:6e:d3:07: - 10:cd:60:92:39:24:d4:89:b4:74:34:4b:0d:28:47: - c1:ff:72:d0:e6:e4:a8:c5:95:eb:60:b8:f1:af:d0: - e4:3a:8c:5f:5d:d7:e4:20:85:11:cc:b0:fc:05:95: - df:d1:0b:1f:b2:4a:9d:21:40:28:2d:c6:a7:37:ee: - 3c:f1:f9:c1:ee:7b:bd:ec:74:e4:9a:4f:d5:db:fe: - 91:e7:9f:95:1f:19:a1:c7:d3:3e:18:4b:d2:58:5b: - 26:80:f2:7e:1d:94:4e:c6:b3:4a:ae:b2:ea:50:b0: - eb:3d:c5:76:f1:18:ba:73:cf:87:ec:f4:bd:dc:4a: - 59:1d:c7:bc:79:88:c6:e8:2c:89:09:8c:1b:4b:93: - 8f:23:f5:2d:40:f8:70:66:0c:3d:c5:e7:99:cb:58: - f6:46:b4:60:bf:b7:02:f4:1b:04:30:ca:aa:30:6a: - a8:b0:ad:ec:ad:40:d2:fb:78:b2:51:2a:d3:40:4a: - 60:bb:24:40:6f:21:49:58:fa:56:b7:e9:5e:9f:b9: - 51:bd - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Cert Type: - SSL Client, SSL Server - X509v3 Key Usage: - Digital Signature, Non Repudiation, Key Encipherment - Netscape Comment: - Riak Python Client Testing Certificate - X509v3 Subject Key Identifier: - 7C:AD:B9:A3:4C:9B:59:3E:2F:F3:4B:07:64:92:2D:1C:28:99:AC:A2 - X509v3 Authority Key Identifier: - DirName:/C=US/ST=Washington/L=Seattle/O=Basho/OU=Riak Python Client/CN=CA/emailAddress=clients@basho.com - serial:DB:E9:54:15:F5:5D:C0:FF - - X509v3 Extended Key Usage: - TLS Web Server Authentication, TLS Web Client Authentication - Signature Algorithm: sha1WithRSAEncryption - ad:f7:51:6a:c5:1b:eb:93:81:a3:b2:de:3e:a6:15:9d:4f:e7: - f9:37:19:f7:0d:fc:e5:7e:02:11:92:be:da:e4:c3:78:ed:90: - a9:a7:57:f0:08:72:a5:90:cc:5e:27:9b:8d:ad:9f:38:95:26: - d3:79:c8:03:0b:7e:40:dd:a8:0b:13:98:2f:6c:52:01:a9:b9: - eb:fe:0c:19:2e:36:82:b0:fc:a5:46:88:64:fa:8d:d3:73:b4: - be:9c:f1:74:a8:a0:28:2a:81:9d:cd:62:8a:e0:12:5f:c3:c4: - 0b:d0:15:f6:02:0e:41:da:50:f2:c8:70:91:24:71:e9:89:e7: - ac:47:73:05:97:7c:3f:4e:24:22:05:06:29:1e:08:b3:49:97: - 3b:11:4f:56:ba:83:c1:0f:8d:20:ed:80:9a:0d:6d:53:ee:63: - bf:3a:24:e3:62:9c:eb:6f:b8:af:01:0d:89:63:47:b3:fc:f7: - 30:f6:3f:96:ed:2e:52:bc:75:c9:27:82:70:b6:e6:d2:f4:0c: - aa:fc:39:c7:54:97:44:98:3f:5f:e5:27:2d:33:d9:74:98:e0: - 96:aa:71:b3:5a:27:78:3b:ed:70:93:3a:bd:df:f9:35:78:68: - 70:36:d6:16:61:83:66:8a:f9:96:c3:e0:ca:a3:20:3e:50:1b: - 9a:fd:7c:4c ------BEGIN CERTIFICATE----- -MIIE7DCCA9SgAwIBAgIEALCFQzANBgkqhkiG9w0BAQUFADCBkDELMAkGA1UEBhMC -VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAMBgNV -BAoMBUJhc2hvMRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNVBAMM -AkNBMSAwHgYJKoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbTAeFw0xNDA5MjQy -MjU1MjVaFw0yNDA5MjEyMjU1MjVaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECAwK -V2FzaGluZ3RvbjEOMAwGA1UECgwFQmFzaG8xGzAZBgNVBAsMElJpYWsgUHl0aG9u -IENsaWVudDERMA8GA1UEAwwIY2VydHVzZXIxIDAeBgkqhkiG9w0BCQEWEWNsaWVu -dHNAYmFzaG8uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx+BE -OutgioZwZg2Q9imphRchPNB7YiR9pbbYlbHvjgMkUYlaNIp65CNINM7p94zVQff+ -5X4VcT9u0wcQzWCSOSTUibR0NEsNKEfB/3LQ5uSoxZXrYLjxr9DkOoxfXdfkIIUR -zLD8BZXf0QsfskqdIUAoLcanN+488fnB7nu97HTkmk/V2/6R55+VHxmhx9M+GEvS -WFsmgPJ+HZROxrNKrrLqULDrPcV28Ri6c8+H7PS93EpZHce8eYjG6CyJCYwbS5OP -I/UtQPhwZgw9xeeZy1j2RrRgv7cC9BsEMMqqMGqosK3srUDS+3iyUSrTQEpguyRA -byFJWPpWt+len7lRvQIDAQABo4IBVjCCAVIwCQYDVR0TBAIwADARBglghkgBhvhC -AQEEBAMCBsAwCwYDVR0PBAQDAgXgMDUGCWCGSAGG+EIBDQQoFiZSaWFrIFB5dGhv -biBDbGllbnQgVGVzdGluZyBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUfK25o0ybWT4v -80sHZJItHCiZrKIwga8GA1UdIwSBpzCBpKGBlqSBkzCBkDELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAMBgNVBAoM -BUJhc2hvMRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNVBAMMAkNB -MSAwHgYJKoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbYIJANvpVBX1XcD/MB0G -A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjANBgkqhkiG9w0BAQUFAAOCAQEA -rfdRasUb65OBo7LePqYVnU/n+TcZ9w385X4CEZK+2uTDeO2QqadX8AhypZDMXieb -ja2fOJUm03nIAwt+QN2oCxOYL2xSAam56/4MGS42grD8pUaIZPqN03O0vpzxdKig -KCqBnc1iiuASX8PEC9AV9gIOQdpQ8shwkSRx6YnnrEdzBZd8P04kIgUGKR4Is0mX -OxFPVrqDwQ+NIO2Amg1tU+5jvzok42Kc62+4rwENiWNHs/z3MPY/lu0uUrx1ySeC -cLbm0vQMqvw5x1SXRJg/X+UnLTPZdJjglqpxs1oneDvtcJM6vd/5NXhocDbWFmGD -Zor5lsPgyqMgPlAbmv18TA== ------END CERTIFICATE----- diff --git a/riak/tests/resources/client.key b/riak/tests/resources/client.key deleted file mode 100644 index f397c320..00000000 --- a/riak/tests/resources/client.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAx+BEOutgioZwZg2Q9imphRchPNB7YiR9pbbYlbHvjgMkUYla -NIp65CNINM7p94zVQff+5X4VcT9u0wcQzWCSOSTUibR0NEsNKEfB/3LQ5uSoxZXr -YLjxr9DkOoxfXdfkIIURzLD8BZXf0QsfskqdIUAoLcanN+488fnB7nu97HTkmk/V -2/6R55+VHxmhx9M+GEvSWFsmgPJ+HZROxrNKrrLqULDrPcV28Ri6c8+H7PS93EpZ -Hce8eYjG6CyJCYwbS5OPI/UtQPhwZgw9xeeZy1j2RrRgv7cC9BsEMMqqMGqosK3s -rUDS+3iyUSrTQEpguyRAbyFJWPpWt+len7lRvQIDAQABAoIBAARWO2TD4q5eyGWO -ecy9jq4SmGgHZgX1ILzNwNlOqRH9w3X5cvmc35m/ojtzGeSDK8VoqiE0oSZ559+w -UY8DP2j6AZqTTcosrrZnCDCQvzOjVn4jCJ5qpOPZtnoGvbL9kjeaa45zcKR2YgrZ -IPDvaYVlLPoBS3ONOclOUATqrm5I+SWzsl45pLrbKUxDpMYcoezY6ok1q1o79i+7 -QBkBliO2IXquMFF/JJwHua4r1R9KqdPKFTynbCpaZ477flCHEWcfWPgFpOpfT6Go -7rqCiR5ug8CyBjNsHnYx1pVZs8I0c2F7WVUQilEh/PqmQcGoy6L5OOCOeQCVMw1i -QfiGIkECgYEA+4W2tGikhAdNRixlvPQeELmGTQzl5rBT7HwIvyjcv8waYBNg5Roy -MKH03R1C5hGFVw+p3JpWQb4uMsX6SHJUZYzLZiqjjknZQ8/fYGJZLhCCu1w3H7wk -Fr1kZLy8hKcbmr/c3Xd4VUhp9mIHehRRYqfccBgeUkBfPFjRINPoY7kCgYEAy28u -Lm06jq+7xvNA2VllPPI+QCQsxtp24w8aiLqzDP3pVpr8q9JMrzv/MOPHctj0D5WN -8FGjBQipzpK0W/OA0FrpRM/NT7/+DF3nqgBQMqqzRX/z6UKGs6DgP9MPBvLEgeQV -UtOCwF5jr+/+6NlrnftcuVYMZ/a5adbB8a7+KCUCgYEA8XaYw/GBns5zvN56fT/O -bTfuWqH7Q9AbgXhB3WKZKfgDiiCQcOEJNe8FaBDjXIONgtsiswnnrQ2qxEuTz2ES -7LqRue9NPRhgX918EMfZ3YM0PjJ1KR4xdzMy4hLe2Gqk8ZcnreU3vIfcUhAiJWzE -BPYpheNhmIz74K9TdTR6cOECgYBchW4tc0QEjOwL6an+r+eZNlsVdN5geg9D6SSa -Nr8kE37CAq8TvgteTx0asR2OoBkv1Ua+m4JW0b/Y0WPxxec5237n6tJniNwT58lq -ycWvpW7vFuhpl/YHUA1tOaJF+Ldik8cW6lc3Aja4V6BJakjFiwJ60CXISq+88Q03 -y+yPeQKBgCFx40pPW6o9thCM96P/K1AtVXAjTUxMZmIcZbch01r0ahwdu/ITBwon -fCrTAYaqDEAs4y85XVWmdWn9bjUwSk3M6wewY86rtOp3f+PmyCxdyyNR4xq/C7nn -W3GZ3hNu55OI0yMWbd3tLjtyUY0x9Mg/Oyz+k7VRQkhNKagyEoDp ------END RSA PRIVATE KEY----- diff --git a/riak/tests/resources/openssl.cnf b/riak/tests/resources/openssl.cnf deleted file mode 100644 index e23b3a7b..00000000 --- a/riak/tests/resources/openssl.cnf +++ /dev/null @@ -1,355 +0,0 @@ -# -# OpenSSL example configuration file. -# This is mostly being used for generation of certificate requests. -# - -# This definition stops the following lines choking if HOME isn't -# defined. -HOME = . -RANDFILE = $ENV::HOME/.rnd - -# Extra OBJECT IDENTIFIER info: -#oid_file = $ENV::HOME/.oid -oid_section = new_oids - -# To use this configuration file with the "-extfile" option of the -# "openssl x509" utility, name here the section containing the -# X.509v3 extensions to use: -# extensions = -# (Alternatively, use a configuration file that has only -# X.509v3 extensions in its main [= default] section.) - -[ new_oids ] - -# We can add new OIDs in here for use by 'ca', 'req' and 'ts'. -# Add a simple OID like this: -# testoid1=1.2.3.4 -# Or use config file substitution like this: -# testoid2=${testoid1}.5.6 - -# Policies used by the TSA examples. -tsa_policy1 = 1.2.3.4.1 -tsa_policy2 = 1.2.3.4.5.6 -tsa_policy3 = 1.2.3.4.5.7 - -#################################################################### -[ ca ] -default_ca = CA_default # The default ca section - -#################################################################### -[ CA_default ] - -dir = . # Where everything is kept -certs = $dir/certs # Where the issued certs are kept -crl_dir = $dir/crl # Where the issued crl are kept -database = $dir/index.txt # database index file. -#unique_subject = no # Set to 'no' to allow creation of - # several ctificates with same subject. -new_certs_dir = $dir/newcerts # default place for new certs. - -certificate = $dir/cacert.pem # The CA certificate -serial = $dir/serial # The current serial number -crlnumber = $dir/crlnumber # the current crl number - # must be commented out to leave a V1 CRL -crl = $dir/crl.pem # The current CRL -private_key = $dir/ca/ca.key # The private key -RANDFILE = $dir/private/.rand # private random number file - -x509_extensions = usr_cert # The extentions to add to the cert - -# Comment out the following two lines for the "traditional" -# (and highly broken) format. -name_opt = ca_default # Subject Name options -cert_opt = ca_default # Certificate field options - -# Extension copying option: use with caution. -# copy_extensions = copy - -# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs -# so this is commented out by default to leave a V1 CRL. -# crlnumber must also be commented out to leave a V1 CRL. -# crl_extensions = crl_ext - -default_days = 365 # how long to certify for -default_crl_days= 30 # how long before next CRL -default_md = default # use public key default MD -preserve = no # keep passed DN ordering - -# A few difference way of specifying how similar the request should look -# For type CA, the listed attributes must be the same, and the optional -# and supplied fields are just that :-) -policy = policy_match - -# For the CA policy -[ policy_match ] -countryName = match -stateOrProvinceName = match -organizationName = match -organizationalUnitName = optional -commonName = supplied -emailAddress = optional - -# For the 'anything' policy -# At this point in time, you must list all acceptable 'object' -# types. -[ policy_anything ] -countryName = optional -stateOrProvinceName = optional -localityName = optional -organizationName = optional -organizationalUnitName = optional -commonName = supplied -emailAddress = optional - -#################################################################### -[ req ] -default_bits = 2048 -default_keyfile = privkey.pem -distinguished_name = req_distinguished_name -attributes = req_attributes -x509_extensions = v3_ca # The extentions to add to the self signed cert - -# Passwords for private keys if not present they will be prompted for -# input_password = secret -# output_password = secret - -# This sets a mask for permitted string types. There are several options. -# default: PrintableString, T61String, BMPString. -# pkix : PrintableString, BMPString (PKIX recommendation before 2004) -# utf8only: only UTF8Strings (PKIX recommendation after 2004). -# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings). -# MASK:XXXX a literal mask value. -# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings. -string_mask = utf8only - -# req_extensions = v3_req # The extensions to add to a certificate request - -[ req_distinguished_name ] -countryName = Country Name (2 letter code) -countryName_default = US -countryName_min = 2 -countryName_max = 2 - -stateOrProvinceName = State or Province Name (full name) -stateOrProvinceName_default = Washington - -localityName = Locality Name (eg, city) -localityName_default = Seattle - -0.organizationName = Organization Name (eg, company) -0.organizationName_default = Basho - -# we can do this but it is not needed normally :-) -#1.organizationName = Second Organization Name (eg, company) -#1.organizationName_default = Clients - -organizationalUnitName = Organizational Unit Name (eg, section) -organizationalUnitName_default = Riak Python Client - -commonName = Common Name (e.g. server FQDN or YOUR name) -commonName_max = 64 - -emailAddress = Email Address -emailAddress_max = 64 -emailAddress_default = clients@basho.com - -# SET-ex3 = SET extension number 3 - -[ req_attributes ] -challengePassword = A challenge password -challengePassword_min = 4 -challengePassword_max = 20 - -unstructuredName = Basho Technologies - -[ usr_cert ] - -# These extensions are added when 'ca' signs a request. - -# This goes against PKIX guidelines but some CAs do it and some software -# requires this to avoid interpreting an end user certificate as a CA. - -basicConstraints=CA:FALSE - -# Here are some examples of the usage of nsCertType. If it is omitted -# the certificate can be used for anything *except* object signing. - -nsCertType = server, client - -# This is OK for an SSL server. -# nsCertType = server - -# For an object signing certificate this would be used. -# nsCertType = objsign - -# For normal client use this is typical -# nsCertType = client, email - -# and for everything including object signing: -# nsCertType = client, email, objsign - -# This is typical in keyUsage for a client certificate. -keyUsage = nonRepudiation, digitalSignature, keyEncipherment - -# This will be displayed in Netscape's comment listbox. -nsComment = "Riak Python Client Testing Certificate" - -# PKIX recommendations harmless if included in all certificates. -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid,issuer - -# This stuff is for subjectAltName and issuerAltname. -# Import the email address. -# subjectAltName=email:copy -# An alternative to produce certificates that aren't -# deprecated according to PKIX. -# subjectAltName=email:move - -# Copy subject details -# issuerAltName=issuer:copy - -#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem -#nsBaseUrl -#nsRevocationUrl -#nsRenewalUrl -#nsCaPolicyUrl -#nsSslServerName - -# This is required for TSA certificates. -# extendedKeyUsage = critical,timeStamping -extendedKeyUsage = serverAuth,clientAuth - -[ v3_req ] - -# Extensions to add to a certificate request - -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment - -[ v3_ca ] - - -# Extensions for a typical CA - - -# PKIX recommendation. - -subjectKeyIdentifier=hash - -authorityKeyIdentifier=keyid:always,issuer - -# This is what PKIX recommends but some broken software chokes on critical -# extensions. -#basicConstraints = critical,CA:true -# So we do this instead. -basicConstraints = CA:true - -# Key usage: this is typical for a CA certificate. However since it will -# prevent it being used as an test self-signed certificate it is best -# left out by default. -# keyUsage = cRLSign, keyCertSign - -# Some might want this also -# nsCertType = sslCA, emailCA - -# Include email address in subject alt name: another PKIX recommendation -# subjectAltName=email:copy -# Copy issuer details -# issuerAltName=issuer:copy - -# DER hex encoding of an extension: beware experts only! -# obj=DER:02:03 -# Where 'obj' is a standard or added object -# You can even override a supported extension: -# basicConstraints= critical, DER:30:03:01:01:FF - -[ crl_ext ] - -# CRL extensions. -# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL. - -# issuerAltName=issuer:copy -authorityKeyIdentifier=keyid:always - -[ proxy_cert_ext ] -# These extensions should be added when creating a proxy certificate - -# This goes against PKIX guidelines but some CAs do it and some software -# requires this to avoid interpreting an end user certificate as a CA. - -basicConstraints=CA:FALSE - -# Here are some examples of the usage of nsCertType. If it is omitted -# the certificate can be used for anything *except* object signing. - -# This is OK for an SSL server. -# nsCertType = server - -# For an object signing certificate this would be used. -# nsCertType = objsign - -# For normal client use this is typical -# nsCertType = client, email - -# and for everything including object signing: -# nsCertType = client, email, objsign - -# This is typical in keyUsage for a client certificate. -# keyUsage = nonRepudiation, digitalSignature, keyEncipherment - -# This will be displayed in Netscape's comment listbox. -nsComment = "OpenSSL Generated Certificate" - -# PKIX recommendations harmless if included in all certificates. -subjectKeyIdentifier=hash -authorityKeyIdentifier=keyid,issuer - -# This stuff is for subjectAltName and issuerAltname. -# Import the email address. -# subjectAltName=email:copy -# An alternative to produce certificates that aren't -# deprecated according to PKIX. -# subjectAltName=email:move - -# Copy subject details -# issuerAltName=issuer:copy - -#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem -#nsBaseUrl -#nsRevocationUrl -#nsRenewalUrl -#nsCaPolicyUrl -#nsSslServerName - -# This really needs to be in place for it to be a proxy certificate. -proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo - -#################################################################### -[ tsa ] - -default_tsa = tsa_config1 # the default TSA section - -[ tsa_config1 ] - -# These are used by the TSA reply generation only. -dir = ./demoCA # TSA root directory -serial = $dir/tsaserial # The current serial number (mandatory) -crypto_device = builtin # OpenSSL engine to use for signing -signer_cert = $dir/tsacert.pem # The TSA signing certificate - # (optional) -certs = $dir/cacert.pem # Certificate chain to include in reply - # (optional) -signer_key = $dir/private/tsakey.pem # The TSA private key (optional) - -default_policy = tsa_policy1 # Policy if request did not specify it - # (optional) -other_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional) -digests = md5, sha1 # Acceptable message digests (mandatory) -accuracy = secs:1, millisecs:500, microsecs:100 # (optional) -clock_precision_digits = 0 # number of digits after dot. (optional) -ordering = yes # Is ordering defined for timestamps? - # (optional, default: no) -tsa_name = yes # Must the TSA name be included in the reply? - # (optional, default: no) -ess_cert_id_chain = no # Must the ESS cert id chain be included? - # (optional, default: no) diff --git a/riak/tests/resources/server.crl b/riak/tests/resources/server.crl deleted file mode 100644 index 317b7a5c..00000000 --- a/riak/tests/resources/server.crl +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN X509 CRL----- -MIICBjCB7wIBATANBgkqhkiG9w0BAQUFADCBkDELMAkGA1UEBhMCVVMxEzARBgNV -BAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAMBgNVBAoMBUJhc2hv -MRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNVBAMMAkNBMSAwHgYJ -KoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbRcNMTQwOTI0MjI1NTI1WhcNMTQx -MDI0MjI1NTI1WjAXMBUCBACwhUIXDTE0MDkyNDIyNTUyNVqgETAPMA0GA1UdFAQG -AgQA8ocDMA0GCSqGSIb3DQEBBQUAA4IBAQC4ReWlkd6Ld7Unk3WPyUsvh8gUn5PJ -J5usc9KVO3iiLZEK57LGtwVFoUPVrt9F4Mg9+0qp1NTzGmgLzkkTyUGzEbTJce/L -3m5zTJW6x8wOFT2b/dQRoO6WUsSaJ4ZkUX04RZc7NQ8SWozxW6mZSrQrEqXNtjUo -1ifsnNyN5OxPZ/PV8DQN5Rtl87j5ETTUJ62tDucnEwoFHqN4AY0riLqLWmyHHokx -gQtQscv7LmCZHPF8hgwYxzatxAEBR1MhZu1jku/j7Im7EDwxGyfvMbPZOhuVkuGI -y7SFCmvcwIR4APHtB3io93UngiQ64PxBOFQxNh9P2tf2fi0dI8oJvm8F ------END X509 CRL----- diff --git a/riak/tests/resources/server.crt b/riak/tests/resources/server.crt deleted file mode 100644 index 052697f2..00000000 --- a/riak/tests/resources/server.crt +++ /dev/null @@ -1,95 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 11568450 (0xb08542) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=Washington, L=Seattle, O=Basho, OU=Riak Python Client, CN=CA/emailAddress=clients@basho.com - Validity - Not Before: Sep 24 22:55:25 2014 GMT - Not After : Sep 21 22:55:25 2024 GMT - Subject: C=US, ST=Washington, O=Basho, OU=Riak Python Client, CN=riak@127.0.0.1/emailAddress=clients@basho.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:eb:38:1c:40:d6:8d:e9:65:4d:d1:8f:1a:c3:1f: - 03:7f:a0:1c:cc:c3:e2:53:fb:b0:27:60:2d:2a:0e: - ad:5c:67:7c:c3:62:f1:79:d5:04:c0:83:b6:5a:41: - f2:a7:8d:f8:4a:50:17:35:c7:6f:75:af:72:e7:44: - 65:99:e3:cb:c7:88:86:66:64:20:ce:6f:f8:14:5d: - 96:dc:19:7a:5c:4e:24:f7:50:df:d7:71:f5:2e:ce: - 73:d4:a2:5e:98:52:0e:66:e3:88:22:d9:8d:88:8e: - ac:96:2d:b1:0c:05:e8:59:30:4c:0e:fa:e7:8d:29: - 7f:b4:93:93:92:9c:8b:07:b9:b1:da:02:c4:d2:41: - 57:df:d1:ab:4c:15:e2:9f:da:65:5d:48:88:fa:51: - 0b:79:b9:3c:99:0a:16:de:66:58:13:cb:98:48:bd: - 2b:bd:d2:56:35:bf:16:c5:42:5d:39:1f:3a:26:8e: - 0c:7f:a4:a0:cb:4b:90:d9:49:a0:1d:52:c9:64:d6: - 10:01:25:ae:15:a5:aa:92:dd:cf:91:92:16:0d:9d: - 95:ec:1f:e6:3c:8c:00:7f:30:c4:e1:f5:87:c9:5e: - 08:a2:2b:8c:63:eb:d0:46:9d:83:66:42:d8:60:ed: - 77:c2:6e:93:ad:89:bc:3d:5b:a5:c9:5f:dd:8f:69: - c5:a9 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Cert Type: - SSL Client, SSL Server - X509v3 Key Usage: - Digital Signature, Non Repudiation, Key Encipherment - Netscape Comment: - Riak Python Client Testing Certificate - X509v3 Subject Key Identifier: - 6C:09:ED:0E:F7:5B:0C:A0:8E:7C:31:2F:78:F6:78:45:8D:69:EF:36 - X509v3 Authority Key Identifier: - DirName:/C=US/ST=Washington/L=Seattle/O=Basho/OU=Riak Python Client/CN=CA/emailAddress=clients@basho.com - serial:DB:E9:54:15:F5:5D:C0:FF - - X509v3 Extended Key Usage: - TLS Web Server Authentication, TLS Web Client Authentication - Signature Algorithm: sha1WithRSAEncryption - 85:7a:8e:95:68:8e:e5:4f:be:89:b4:9b:ab:bc:43:b0:4f:7d: - ad:14:e4:a4:3b:c2:a8:b3:42:78:f7:91:78:34:96:1a:93:57: - d5:4f:23:7d:b7:62:cf:0c:cf:59:09:4b:99:93:41:b6:ed:a7: - d9:51:6f:4f:83:c3:93:2f:9d:59:96:c0:63:47:1d:9b:e9:5d: - 2f:aa:4e:7c:bf:9a:5e:12:66:4a:83:df:e2:e3:14:49:ad:96: - 61:9c:55:fa:7e:ed:3a:7d:a2:bf:fd:8d:e6:5f:fb:d8:c0:a2: - c3:32:a7:c7:e6:65:77:d7:94:cf:54:67:e0:b9:86:bc:28:1c: - 19:71:1a:e7:23:42:81:52:50:29:07:10:6f:d3:c0:42:92:ba: - 36:9e:f5:8e:0f:ab:3d:a4:e7:79:16:e5:ec:fd:fd:dc:fd:1f: - 35:87:67:d8:dd:15:68:74:01:b9:cb:57:a0:9b:7d:bd:b6:12: - 8d:7f:b8:5e:c5:f3:fb:4d:74:72:78:59:1d:f2:b6:80:ae:fe: - ee:4d:c6:a6:89:89:ca:ba:48:67:4d:35:e5:9f:bc:cc:ea:ef: - 26:3e:02:37:5f:b3:c6:ba:2e:ac:0a:fd:60:f5:85:2e:fd:81: - 68:78:b9:47:81:c7:f8:0e:6f:0a:08:a6:b8:41:97:ac:db:3b: - 75:aa:24:c8 ------BEGIN CERTIFICATE----- -MIIE8jCCA9qgAwIBAgIEALCFQjANBgkqhkiG9w0BAQUFADCBkDELMAkGA1UEBhMC -VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAMBgNV -BAoMBUJhc2hvMRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNVBAMM -AkNBMSAwHgYJKoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbTAeFw0xNDA5MjQy -MjU1MjVaFw0yNDA5MjEyMjU1MjVaMIGKMQswCQYDVQQGEwJVUzETMBEGA1UECAwK -V2FzaGluZ3RvbjEOMAwGA1UECgwFQmFzaG8xGzAZBgNVBAsMElJpYWsgUHl0aG9u -IENsaWVudDEXMBUGA1UEAwwOcmlha0AxMjcuMC4wLjExIDAeBgkqhkiG9w0BCQEW -EWNsaWVudHNAYmFzaG8uY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEA6zgcQNaN6WVN0Y8awx8Df6AczMPiU/uwJ2AtKg6tXGd8w2LxedUEwIO2WkHy -p434SlAXNcdvda9y50RlmePLx4iGZmQgzm/4FF2W3Bl6XE4k91Df13H1Ls5z1KJe -mFIOZuOIItmNiI6sli2xDAXoWTBMDvrnjSl/tJOTkpyLB7mx2gLE0kFX39GrTBXi -n9plXUiI+lELebk8mQoW3mZYE8uYSL0rvdJWNb8WxUJdOR86Jo4Mf6Sgy0uQ2Umg -HVLJZNYQASWuFaWqkt3PkZIWDZ2V7B/mPIwAfzDE4fWHyV4IoiuMY+vQRp2DZkLY -YO13wm6TrYm8PVulyV/dj2nFqQIDAQABo4IBVjCCAVIwCQYDVR0TBAIwADARBglg -hkgBhvhCAQEEBAMCBsAwCwYDVR0PBAQDAgXgMDUGCWCGSAGG+EIBDQQoFiZSaWFr -IFB5dGhvbiBDbGllbnQgVGVzdGluZyBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUbAnt -DvdbDKCOfDEvePZ4RY1p7zYwga8GA1UdIwSBpzCBpKGBlqSBkzCBkDELMAkGA1UE -BhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24xEDAOBgNVBAcMB1NlYXR0bGUxDjAM -BgNVBAoMBUJhc2hvMRswGQYDVQQLDBJSaWFrIFB5dGhvbiBDbGllbnQxCzAJBgNV -BAMMAkNBMSAwHgYJKoZIhvcNAQkBFhFjbGllbnRzQGJhc2hvLmNvbYIJANvpVBX1 -XcD/MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjANBgkqhkiG9w0BAQUF -AAOCAQEAhXqOlWiO5U++ibSbq7xDsE99rRTkpDvCqLNCePeReDSWGpNX1U8jfbdi -zwzPWQlLmZNBtu2n2VFvT4PDky+dWZbAY0cdm+ldL6pOfL+aXhJmSoPf4uMUSa2W -YZxV+n7tOn2iv/2N5l/72MCiwzKnx+Zld9eUz1Rn4LmGvCgcGXEa5yNCgVJQKQcQ -b9PAQpK6Np71jg+rPaTneRbl7P393P0fNYdn2N0VaHQBuctXoJt9vbYSjX+4XsXz -+010cnhZHfK2gK7+7k3GpomJyrpIZ0015Z+8zOrvJj4CN1+zxrourAr9YPWFLv2B -aHi5R4HH+A5vCgimuEGXrNs7daokyA== ------END CERTIFICATE----- diff --git a/riak/tests/resources/server.key b/riak/tests/resources/server.key deleted file mode 100644 index 23531a6d..00000000 --- a/riak/tests/resources/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA6zgcQNaN6WVN0Y8awx8Df6AczMPiU/uwJ2AtKg6tXGd8w2Lx -edUEwIO2WkHyp434SlAXNcdvda9y50RlmePLx4iGZmQgzm/4FF2W3Bl6XE4k91Df -13H1Ls5z1KJemFIOZuOIItmNiI6sli2xDAXoWTBMDvrnjSl/tJOTkpyLB7mx2gLE -0kFX39GrTBXin9plXUiI+lELebk8mQoW3mZYE8uYSL0rvdJWNb8WxUJdOR86Jo4M -f6Sgy0uQ2UmgHVLJZNYQASWuFaWqkt3PkZIWDZ2V7B/mPIwAfzDE4fWHyV4IoiuM -Y+vQRp2DZkLYYO13wm6TrYm8PVulyV/dj2nFqQIDAQABAoIBAB09jW2V/nA8Mq9R -Xd9RtKqh6dKeqz0Ldbt/Xj3zMyPgjYbwftpJW9zewsV3m0WvBWWfjTMXRsjXda5A -N20o/0UfuK1z1wwyqSFG/SEBXc+puzivahQrS3J1IbsWMDI5SScLM/o5FzoZmmTU -60R2W4ry0RGvqSdIOGLnUZoRMctU0j85+M6gZy2a4CB8eVdH9Awp6IDnn2s6VNOI -ZVDUJMa0I5INQD3JCN6u12nYSf82jGT3OKmB+VbMxl4moWJ1IqwSDBcy+YyJY37J -nEtrtShw1UOvwBU8BQKMEF2XQ3uZxGvVPt5WDXKJh7OQur88YzqphtTj3BoGyu/H -x/qaZIECgYEA9tWB0wffwCdZizqvZCfUTx3SPqYwc96Siase773Dr+Gzcg/MwBV6 -y4veIXRb5VWZOoaytHGKhpoYzmiLpuPcuqRzOpSv2gMhMmtl6vMIO+l30snI9lKF -BDcFpvxEGG1ztoxTSA3lfNRbUCToQr7669e7fHlU+zBOyEZh3TAoHxECgYEA8/Qw -QAujrjgLGORqbWH0sysyJSlbb8RSmDlzmDXc1SqOOhNU9rOVJAvVA7+0qx+ODhgX -f5/qD9x6tkg9B5z93LBWgs5O51mI1goVueo8SG4Nw78LLzswTGxVYghFBlFJX5QE -XLut6Neg7o/9uItRpe7i7oesmaaNks1Y0gL87RkCgYEAwtjr8MBoenEVmHis920R -hZrO2rGp0e3C03YHp8yu6upKEyIxyPerxX4VzWbjG/gkSzUFYLe4WGTQbC/O/eEl -3xft9jJlVr3duMVa4MsUlubHtdegEgI2Cou8pILC5l2QWGwRWfHOVGn14yAfUIEd -5oqX77x/vwH1TtdwbYCUS2ECgYAvz2+VXbKadklWe42QGl6GrrSJK+3LncJCKyBI -eGXrMQfmwdso7lEQW3FH5s5Vqz1/7aDNVl3c9ezmxImRcGcgVT7fK+ey141FxXw0 -j395AniYIFzkyMjScXjaWZyNfGjQ3oVsVyviMkBMANRM0qER0BuRe+2Lv2SHnM8H -eaZwIQKBgCb5Mc1D7pyyGW5SusfVjxUWMKBdsUaOsgr9jbBvjH3URHHoBpNd1Her -muJZdrKjEs4k1TUMs3GX2bGkHW/fSVPAhsk0DjaMfh1Q/PrudRur15eyVkM5sup9 -Rd8MzUiQ1ybJgrhV0T6ssXRY7cPwbxtX/wMQnmwAvuAHO3X8o0Zq ------END RSA PRIVATE KEY----- diff --git a/riak/tests/suite.py b/riak/tests/suite.py index 97f3532c..782be4a0 100644 --- a/riak/tests/suite.py +++ b/riak/tests/suite.py @@ -1,10 +1,19 @@ -import os.path -import platform +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +import os.path +import unittest def additional_tests(): diff --git a/riak/tests/test_2i.py b/riak/tests/test_2i.py index 66dd8bee..01f02aee 100644 --- a/riak/tests/test_2i.py +++ b/riak/tests/test_2i.py @@ -1,15 +1,26 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # -*- coding: utf-8 -*- -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +import unittest from riak import RiakError -from . import SKIP_INDEXES +from riak.tests import RUN_INDEXES +from riak.tests.base import IntegrationTestBase -class TwoITests(object): +class TwoITests(IntegrationTestBase, unittest.TestCase): def is_2i_supported(self): # Immediate test to see if 2i is even supported w/ the backend try: @@ -20,7 +31,7 @@ def is_2i_supported(self): return False return True # it failed, but is supported! - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEXES is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_secondary_index_store(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -101,7 +112,7 @@ def test_secondary_index_store(self): # Clean up... bucket.get('mykey1').delete() - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEXES is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_set_indexes(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -119,7 +130,7 @@ def test_set_indexes(self): self.assertEqual(1, len(result)) self.assertEqual('foo', str(result[0])) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEXES is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_remove_indexes(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -179,7 +190,7 @@ def test_remove_indexes(self): self.assertEqual(1, len([x for x in bar.indexes if x[0] == 'baz_bin'])) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEXES is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_secondary_index_query(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -208,7 +219,7 @@ def test_secondary_index_query(self): self.assertEqual(3, len(results)) self.assertEqual(set([o2.key, o3.key, o4.key]), vals) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEXES is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_secondary_index_invalid_name(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -218,7 +229,7 @@ def test_secondary_index_invalid_name(self): with self.assertRaises(RiakError): bucket.new('k', 'a').add_index('field1', 'value1') - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_set_index(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -236,7 +247,7 @@ def test_set_index(self): obj.set_index('bar2_int', 10) self.assertEqual(set((('bar_int', 3), ('bar2_int', 10))), obj.indexes) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_stream_index(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I not supported") @@ -249,7 +260,7 @@ def test_stream_index(self): self.assertEqual(sorted([o1.key, o2.key, o3.key]), sorted(keys)) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_return_terms(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -273,7 +284,7 @@ def test_index_return_terms(self): self.assertEqual([(1002, o2.key), (1003, o3.key), (1004, o4.key)], sorted(spairs)) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_pagination(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -308,7 +319,7 @@ def test_index_pagination(self): self.assertEqual(3, pagecount) self.assertEqual([o1.key, o2.key, o3.key, o4.key], presults) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_pagination_return_terms(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -333,7 +344,7 @@ def test_index_pagination_return_terms(self): self.assertLessEqual(2, len(results)) self.assertEqual([('val3', o3.key), ('val4', o4.key)], page2) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_pagination_stream(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -376,7 +387,7 @@ def test_index_pagination_stream(self): self.assertEqual(3, pagecount) self.assertEqual([o1.key, o2.key, o3.key, o4.key], presults) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_pagination_stream_return_terms(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -408,7 +419,7 @@ def test_index_pagination_stream_return_terms(self): self.assertLessEqual(2, len(results)) self.assertEqual([('val3', o3.key), ('val4', o4.key)], results) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_eq_query_return_terms(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -418,7 +429,7 @@ def test_index_eq_query_return_terms(self): results = bucket.get_index('field2_int', 1001, return_terms=True) self.assertEqual([(1001, o1.key)], results) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_eq_query_stream_return_terms(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -431,25 +442,26 @@ def test_index_eq_query_stream_return_terms(self): self.assertEqual([(1001, o1.key)], results) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_timeout(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") bucket, o1, o2, o3, o4 = self._create_index_objects() - with self.assertRaises(RiakError): - bucket.get_index('field1_bin', 'val1', timeout=1) - - with self.assertRaises(RiakError): - for i in bucket.stream_index('field1_bin', 'val1', timeout=1): - pass + # Disable timeouts since they are too racy + # with self.assertRaises(RiakError): + # bucket.get_index('field1_bin', 'val1', timeout=1) + # + # with self.assertRaises(RiakError): + # for i in bucket.stream_index('field1_bin', 'val1', timeout=1): + # pass # This should not raise self.assertEqual([o1.key], bucket.get_index('field1_bin', 'val1', timeout='infinity')) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_regex(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") @@ -464,7 +476,7 @@ def test_index_regex(self): self.assertEqual([('val2', o2.key)], results) - @unittest.skipIf(SKIP_INDEXES, 'SKIP_INDEX is defined') + @unittest.skipUnless(RUN_INDEXES, 'RUN_INDEXES is 0') def test_index_falsey_endkey_gh378(self): if not self.is_2i_supported(): raise unittest.SkipTest("2I is not supported") diff --git a/riak/tests/test_all.py b/riak/tests/test_all.py deleted file mode 100644 index 48130e7e..00000000 --- a/riak/tests/test_all.py +++ /dev/null @@ -1,423 +0,0 @@ -# -*- coding: utf-8 -*- -import random -import platform -from six import PY2 -from threading import Thread -if PY2: - from Queue import Queue -else: - from queue import Queue -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest - -from riak import RiakError -from riak.client import RiakClient -from riak.riak_object import RiakObject - -from riak.tests.test_yokozuna import YZSearchTests -from riak.tests.test_search import SearchTests, \ - EnableSearchTests, SolrSearchTests -from riak.tests.test_mapreduce import MapReduceAliasTests, \ - ErlangMapReduceTests, JSMapReduceTests, LinkTests, MapReduceStreamTests -from riak.tests.test_kv import BasicKVTests, KVFileTests, \ - BucketPropsTest, CounterTests -from riak.tests.test_2i import TwoITests -from riak.tests.test_btypes import BucketTypeTests -from riak.tests.test_security import SecurityTests -from riak.tests.test_datatypes import DatatypeIntegrationTests - -from riak.tests import HOST, PB_HOST, PB_PORT, HTTP_HOST, HTTP_PORT, \ - HAVE_PROTO, DUMMY_HTTP_PORT, DUMMY_PB_PORT, \ - SKIP_SEARCH, RUN_YZ, SECURITY_CREDS, SKIP_POOL, test_six - -testrun_search_bucket = None -testrun_props_bucket = None -testrun_sibs_bucket = None -testrun_yz = {'btype': None, 'bucket': None, 'index': None} -testrun_yz_index = {'btype': None, 'bucket': None, 'index': None} -testrun_yz_mr = {'btype': None, 'bucket': None, 'index': None} - - -def setUpModule(): - global testrun_search_bucket, testrun_props_bucket, \ - testrun_sibs_bucket, testrun_yz, testrun_yz_index, testrun_yz_mr - - c = RiakClient(host=PB_HOST, http_port=HTTP_PORT, - pb_port=PB_PORT, credentials=SECURITY_CREDS) - - testrun_props_bucket = 'propsbucket' - testrun_sibs_bucket = 'sibsbucket' - c.bucket(testrun_sibs_bucket).allow_mult = True - - if (not SKIP_SEARCH and not RUN_YZ): - testrun_search_bucket = 'searchbucket' - b = c.bucket(testrun_search_bucket) - b.enable_search() - - if RUN_YZ: - # YZ index on bucket of the same name - testrun_yz = {'btype': None, 'bucket': 'yzbucket', - 'index': 'yzbucket'} - # YZ index on bucket of a different name - testrun_yz_index = {'btype': None, 'bucket': 'yzindexbucket', - 'index': 'yzindex'} - # Add bucket and type for Search 2.0 -> MapReduce - testrun_yz_mr = {'btype': 'pytest-mr', 'bucket': 'mrbucket', - 'index': 'mrbucket'} - - for yz in (testrun_yz, testrun_yz_index, testrun_yz_mr): - c.create_search_index(yz['index']) - if yz['btype'] is not None: - t = c.bucket_type(yz['btype']) - b = t.bucket(yz['bucket']) - else: - b = c.bucket(yz['bucket']) - # Keep trying to set search bucket property until it succeeds - index_set = False - while not index_set: - try: - b.set_property('search_index', yz['index']) - index_set = True - except RiakError: - pass - - -def tearDownModule(): - global testrun_search_bucket, testrun_props_bucket, \ - testrun_sibs_bucket, testrun_yz_bucket - - c = RiakClient(host=HTTP_HOST, http_port=HTTP_PORT, - pb_port=PB_PORT, credentials=SECURITY_CREDS) - - c.bucket(testrun_sibs_bucket).clear_properties() - c.bucket(testrun_props_bucket).clear_properties() - - if not SKIP_SEARCH and not RUN_YZ: - b = c.bucket(testrun_search_bucket) - b.clear_properties() - - if RUN_YZ: - for yz in (testrun_yz, testrun_yz_index, testrun_yz_mr): - if yz['btype'] is not None: - t = c.bucket_type(yz['btype']) - b = t.bucket(yz['bucket']) - else: - b = c.bucket(yz['bucket']) - b.set_property('search_index', '_dont_index_') - c.delete_search_index(yz['index']) - for keys in b.stream_keys(): - for key in keys: - b.delete(key) - - -class BaseTestCase(object): - - host = None - pb_port = None - http_port = None - credentials = None - - @staticmethod - def randint(): - return random.randint(1, 999999) - - @staticmethod - def randname(length=12): - out = '' - for i in range(length): - out += chr(random.randint(ord('a'), ord('z'))) - return out - - def create_client(self, host=None, http_port=None, pb_port=None, - protocol=None, credentials=None, - **client_args): - host = host or self.host or HOST - http_port = http_port or self.http_port or HTTP_PORT - pb_port = pb_port or self.pb_port or PB_PORT - protocol = protocol or self.protocol - credentials = credentials or SECURITY_CREDS - return RiakClient(protocol=protocol, - host=host, - http_port=http_port, - credentials=credentials, - pb_port=pb_port, **client_args) - - def setUp(self): - self.bucket_name = self.randname() - self.key_name = self.randname() - self.search_bucket = testrun_search_bucket - self.sibs_bucket = testrun_sibs_bucket - self.props_bucket = testrun_props_bucket - self.yz = testrun_yz - self.yz_index = testrun_yz_index - self.yz_mr = testrun_yz_mr - self.credentials = SECURITY_CREDS - - self.client = self.create_client() - - -class ClientTests(object): - def test_request_retries(self): - # We guess at some ports that will be unused by Riak or - # anything else. - client = self.create_client(http_port=DUMMY_HTTP_PORT, - pb_port=DUMMY_PB_PORT) - - # If retries are exhausted, the final result should also be an - # error. - self.assertRaises(IOError, client.ping) - - def test_request_retries_configurable(self): - # We guess at some ports that will be unused by Riak or - # anything else. - client = self.create_client(http_port=DUMMY_HTTP_PORT, - pb_port=DUMMY_PB_PORT) - - # Change the retry count - client.retries = 10 - self.assertEqual(10, client.retries) - - # The retry count should be a thread local - retries = Queue() - - def _target(): - retries.put(client.retries) - retries.join() - - th = Thread(target=_target) - th.start() - self.assertEqual(3, retries.get(block=True)) - retries.task_done() - th.join() - - # Modify the retries in a with statement - with client.retry_count(5): - self.assertEqual(5, client.retries) - self.assertRaises(IOError, client.ping) - - def test_timeout_validation(self): - bucket = self.client.bucket(self.bucket_name) - key = self.key_name - obj = bucket.new(key) - for bad in [0, -1, False, "foo"]: - with self.assertRaises(ValueError): - self.client.get_buckets(timeout=bad) - - with self.assertRaises(ValueError): - for i in self.client.stream_buckets(timeout=bad): - pass - - with self.assertRaises(ValueError): - self.client.get_keys(bucket, timeout=bad) - - with self.assertRaises(ValueError): - for i in self.client.stream_keys(bucket, timeout=bad): - pass - - with self.assertRaises(ValueError): - self.client.put(obj, timeout=bad) - - with self.assertRaises(ValueError): - self.client.get(obj, timeout=bad) - - with self.assertRaises(ValueError): - self.client.delete(obj, timeout=bad) - - with self.assertRaises(ValueError): - self.client.mapred([], [], bad) - - with self.assertRaises(ValueError): - for i in self.client.stream_mapred([], [], bad): - pass - - with self.assertRaises(ValueError): - self.client.get_index(bucket, 'field1_bin', 'val1', 'val4', - timeout=bad) - - with self.assertRaises(ValueError): - for i in self.client.stream_index(bucket, 'field1_bin', 'val1', - 'val4', timeout=bad): - pass - - def test_multiget_bucket(self): - """ - Multiget operations can be invoked on buckets. - """ - keys = [self.key_name, self.randname(), self.randname()] - for key in keys: - if PY2: - self.client.bucket(self.bucket_name)\ - .new(key, encoded_data=key, content_type="text/plain")\ - .store() - else: - self.client.bucket(self.bucket_name)\ - .new(key, data=key, - content_type="text/plain").store() - results = self.client.bucket(self.bucket_name).multiget(keys) - for obj in results: - self.assertIsInstance(obj, RiakObject) - self.assertTrue(obj.exists) - if PY2: - self.assertEqual(obj.key, obj.encoded_data) - else: - self.assertEqual(obj.key, obj.data) - - def test_multiget_errors(self): - """ - Unrecoverable errors are captured along with the bucket/key - and not propagated. - """ - keys = [self.key_name, self.randname(), self.randname()] - client = self.create_client(http_port=DUMMY_HTTP_PORT, - pb_port=DUMMY_PB_PORT) - results = client.bucket(self.bucket_name).multiget(keys) - for failure in results: - self.assertIsInstance(failure, tuple) - self.assertEqual(failure[0], 'default') - self.assertEqual(failure[1], self.bucket_name) - self.assertIn(failure[2], keys) - if PY2: - self.assertIsInstance(failure[3], StandardError) - else: - self.assertIsInstance(failure[3], Exception) - - def test_multiget_notfounds(self): - """ - Not founds work in multiget just the same as get. - """ - keys = [("default", self.bucket_name, self.key_name), - ("default", self.bucket_name, self.randname())] - results = self.client.multiget(keys) - for obj in results: - self.assertIsInstance(obj, RiakObject) - self.assertFalse(obj.exists) - - def test_multiget_pool_size(self): - """ - The pool size for multigets can be configured at client initiation - time. Multiget still works as expected. - """ - client = self.create_client(multiget_pool_size=2) - self.assertEqual(2, client._multiget_pool._size) - - keys = [self.key_name, self.randname(), self.randname()] - for key in keys: - if PY2: - client.bucket(self.bucket_name)\ - .new(key, encoded_data=key, content_type="text/plain")\ - .store() - else: - client.bucket(self.bucket_name)\ - .new(key, data=key, content_type="text/plain")\ - .store() - - results = client.bucket(self.bucket_name).multiget(keys) - for obj in results: - self.assertIsInstance(obj, RiakObject) - self.assertTrue(obj.exists) - if PY2: - self.assertEqual(obj.key, obj.encoded_data) - else: - self.assertEqual(obj.key, obj.data) - - @unittest.skipIf(SKIP_POOL, 'SKIP_POOL is set') - def test_pool_close(self): - """ - Iterate over the connection pool and close all connections. - """ - # Do something to add to the connection pool - self.test_multiget_bucket() - if self.client.protocol == 'pbc': - self.assertGreater(len(self.client._pb_pool.resources), 1) - else: - self.assertGreater(len(self.client._http_pool.resources), 1) - # Now close them all up - self.client.close() - self.assertEqual(len(self.client._http_pool.resources), 0) - self.assertEqual(len(self.client._pb_pool.resources), 0) - - -class RiakPbcTransportTestCase(BasicKVTests, - KVFileTests, - BucketPropsTest, - TwoITests, - LinkTests, - ErlangMapReduceTests, - JSMapReduceTests, - MapReduceAliasTests, - MapReduceStreamTests, - EnableSearchTests, - SearchTests, - YZSearchTests, - ClientTests, - CounterTests, - BucketTypeTests, - SecurityTests, - DatatypeIntegrationTests, - BaseTestCase, - unittest.TestCase, - test_six.Comparison): - - def setUp(self): - if not HAVE_PROTO: - self.skipTest('protobuf is unavailable') - self.host = PB_HOST - self.pb_port = PB_PORT - self.protocol = 'pbc' - super(RiakPbcTransportTestCase, self).setUp() - - def test_uses_client_id_if_given(self): - zero_client_id = "\0\0\0\0" - c = self.create_client(client_id=zero_client_id) - self.assertEqual(zero_client_id, c.client_id) - - -class RiakHttpTransportTestCase(BasicKVTests, - KVFileTests, - BucketPropsTest, - TwoITests, - LinkTests, - ErlangMapReduceTests, - JSMapReduceTests, - MapReduceAliasTests, - MapReduceStreamTests, - EnableSearchTests, - SolrSearchTests, - SearchTests, - YZSearchTests, - ClientTests, - CounterTests, - BucketTypeTests, - SecurityTests, - DatatypeIntegrationTests, - BaseTestCase, - unittest.TestCase, - test_six.Comparison): - - def setUp(self): - self.host = HTTP_HOST - self.http_port = HTTP_PORT - self.protocol = 'http' - super(RiakHttpTransportTestCase, self).setUp() - - def test_no_returnbody(self): - bucket = self.client.bucket(self.bucket_name) - o = bucket.new(self.key_name, "bar").store(return_body=False) - self.assertEqual(o.vclock, None) - - def test_too_many_link_headers_shouldnt_break_http(self): - bucket = self.client.bucket(self.bucket_name) - o = bucket.new("lots_of_links", "My god, it's full of links!") - for i in range(0, 400): - link = ("other", "key%d" % i, "next") - o.add_link(link) - - o.store() - stored_object = bucket.get("lots_of_links") - self.assertEqual(len(stored_object.links), 400) - - -if __name__ == '__main__': - unittest.main() diff --git a/riak/tests/test_btypes.py b/riak/tests/test_btypes.py index c55b3e18..67cd6568 100644 --- a/riak/tests/test_btypes.py +++ b/riak/tests/test_btypes.py @@ -1,16 +1,28 @@ -import platform +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest - -from . import SKIP_BTYPES -from riak.bucket import RiakBucket, BucketType from riak import RiakError, RiakObject +from riak.bucket import RiakBucket, BucketType +from riak.tests import RUN_BTYPES +from riak.tests.base import IntegrationTestBase +from riak.tests.comparison import Comparison -class BucketTypeTests(object): +@unittest.skipUnless(RUN_BTYPES, "RUN_BTYPES is 0") +class BucketTypeTests(IntegrationTestBase, unittest.TestCase, Comparison): def test_btype_init(self): btype = self.client.bucket_type('foo') self.assertIsInstance(btype, BucketType) @@ -40,10 +52,9 @@ def test_btype_repr(self): self.assertEqual("", repr(defbtype)) self.assertEqual("", repr(othertype)) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_btype_get_props(self): defbtype = self.client.bucket_type("default") - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') with self.assertRaises(ValueError): defbtype.get_properties() @@ -52,10 +63,9 @@ def test_btype_get_props(self): self.assertIn('n_val', props) self.assertEqual(3, props['n_val']) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_btype_set_props(self): defbtype = self.client.bucket_type("default") - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') with self.assertRaises(ValueError): defbtype.set_properties({'allow_mult': True}) @@ -71,15 +81,13 @@ def test_btype_set_props(self): finally: btype.set_properties(oldprops) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_btype_set_props_immutable(self): - btype = self.client.bucket_type("pytest-maps") + btype = self.client.bucket_type("maps") with self.assertRaises(RiakError): btype.set_property('datatype', 'counter') - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_btype_list_buckets(self): - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) obj = bucket.new(self.key_name) obj.data = [1, 2, 3] @@ -92,9 +100,8 @@ def test_btype_list_buckets(self): self.assertIn(bucket, buckets) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_btype_list_keys(self): - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) obj = bucket.new(self.key_name) @@ -108,7 +115,6 @@ def test_btype_list_keys(self): self.assertIn(self.key_name, keys) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_default_btype_list_buckets(self): default_btype = self.client.bucket_type("default") bucket = default_btype.bucket(self.bucket_name) @@ -125,7 +131,6 @@ def test_default_btype_list_buckets(self): self.assertItemsEqual(buckets, self.client.get_buckets()) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_default_btype_list_keys(self): btype = self.client.bucket_type("default") bucket = btype.bucket(self.bucket_name) @@ -144,9 +149,8 @@ def test_default_btype_list_keys(self): oldapikeys = self.client.get_keys(self.client.bucket(self.bucket_name)) self.assertItemsEqual(keys, oldapikeys) - @unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set") def test_multiget_bucket_types(self): - btype = self.client.bucket_type('pytest') + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) for i in range(100): @@ -159,3 +163,30 @@ def test_multiget_bucket_types(self): self.assertIsInstance(mobj, RiakObject) self.assertEqual(bucket, mobj.bucket) self.assertEqual(btype, mobj.bucket.bucket_type) + + def test_write_once_bucket_type(self): + bt = 'write_once' + skey = 'write_once-init' + btype = self.client.bucket_type(bt) + bucket = btype.bucket(bt) + try: + sobj = bucket.get(skey) + except RiakError as e: + raise unittest.SkipTest(e) + if not sobj.exists: + for i in range(100): + o = bucket.new(self.key_name + str(i)) + o.data = {'id': i} + o.store() + o = bucket.new(skey, data={'id': skey}) + o.store() + + mget = bucket.multiget([self.key_name + str(i) for i in range(100)]) + for mobj in mget: + self.assertIsInstance(mobj, RiakObject) + self.assertEqual(bucket, mobj.bucket) + self.assertEqual(btype, mobj.bucket.bucket_type) + + props = btype.get_properties() + self.assertIn('write_once', props) + self.assertEqual(True, props['write_once']) diff --git a/riak/tests/test_client.py b/riak/tests/test_client.py new file mode 100644 index 00000000..001520d2 --- /dev/null +++ b/riak/tests/test_client.py @@ -0,0 +1,342 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from six import PY2 +from threading import Thread +from riak.riak_object import RiakObject +from riak.transports.tcp import TcpTransport +from riak.tests import DUMMY_HTTP_PORT, DUMMY_PB_PORT, \ + RUN_POOL, RUN_CLIENT +from riak.tests.base import IntegrationTestBase + +if PY2: + from Queue import Queue +else: + from queue import Queue + + +@unittest.skipUnless(RUN_CLIENT, 'RUN_CLIENT is 0') +class ClientTests(IntegrationTestBase, unittest.TestCase): + def test_can_set_tcp_keepalive(self): + if self.protocol == 'pbc': + topts = {'socket_keepalive': True} + c = self.create_client(transport_options=topts) + for i, r in enumerate(c._tcp_pool.resources): + self.assertIsInstance(r, TcpTransport) + self.assertTrue(r._socket_keepalive) + c.close() + else: + pass + + def test_uses_client_id_if_given(self): + if self.protocol == 'pbc': + zero_client_id = "\0\0\0\0" + c = self.create_client(client_id=zero_client_id) + self.assertEqual(zero_client_id, c.client_id) + c.close() + else: + pass + + def test_request_retries(self): + # We guess at some ports that will be unused by Riak or + # anything else. + client = self.create_client(http_port=DUMMY_HTTP_PORT, + pb_port=DUMMY_PB_PORT) + + # If retries are exhausted, the final result should also be an + # error. + self.assertRaises(IOError, client.ping) + client.close() + + def test_request_retries_configurable(self): + # We guess at some ports that will be unused by Riak or + # anything else. + client = self.create_client(http_port=DUMMY_HTTP_PORT, + pb_port=DUMMY_PB_PORT) + + # Change the retry count + client.retries = 10 + self.assertEqual(10, client.retries) + + # The retry count should be a thread local + retries = Queue() + + def _target(): + retries.put(client.retries) + retries.join() + + th = Thread(target=_target) + th.start() + self.assertEqual(3, retries.get(block=True)) + retries.task_done() + th.join() + + # Modify the retries in a with statement + with client.retry_count(5): + self.assertEqual(5, client.retries) + self.assertRaises(IOError, client.ping) + client.close() + + def test_timeout_validation(self): + bucket = self.client.bucket(self.bucket_name) + key = self.key_name + obj = bucket.new(key) + for bad in [0, -1, False, "foo"]: + with self.assertRaises(ValueError): + self.client.get_buckets(timeout=bad) + + with self.assertRaises(ValueError): + for i in self.client.stream_buckets(timeout=bad): + pass + + with self.assertRaises(ValueError): + self.client.get_keys(bucket, timeout=bad) + + with self.assertRaises(ValueError): + for i in self.client.stream_keys(bucket, timeout=bad): + pass + + with self.assertRaises(ValueError): + self.client.put(obj, timeout=bad) + + with self.assertRaises(ValueError): + self.client.get(obj, timeout=bad) + + with self.assertRaises(ValueError): + self.client.delete(obj, timeout=bad) + + with self.assertRaises(ValueError): + self.client.mapred([], [], bad) + + with self.assertRaises(ValueError): + for i in self.client.stream_mapred([], [], bad): + pass + + with self.assertRaises(ValueError): + self.client.get_index(bucket, 'field1_bin', 'val1', 'val4', + timeout=bad) + + with self.assertRaises(ValueError): + for i in self.client.stream_index(bucket, 'field1_bin', 'val1', + 'val4', timeout=bad): + pass + + def test_close_stops_operation_requests(self): + c = self.create_client() + c.ping() + c.close() + self.assertRaises(RuntimeError, c.ping) + + def test_multiget_bucket(self): + """ + Multiget operations can be invoked on buckets. + """ + keys = [self.key_name, self.randname(), self.randname()] + for key in keys: + if PY2: + self.client.bucket(self.bucket_name)\ + .new(key, encoded_data=key, content_type="text/plain")\ + .store() + else: + self.client.bucket(self.bucket_name)\ + .new(key, data=key, + content_type="text/plain").store() + results = self.client.bucket(self.bucket_name).multiget(keys) + for obj in results: + self.assertIsInstance(obj, RiakObject) + self.assertTrue(obj.exists) + if PY2: + self.assertEqual(obj.key, obj.encoded_data) + else: + self.assertEqual(obj.key, obj.data) + + def test_multiget_errors(self): + """ + Unrecoverable errors are captured along with the bucket/key + and not propagated. + """ + keys = [self.key_name, self.randname(), self.randname()] + client = self.create_client(http_port=DUMMY_HTTP_PORT, + pb_port=DUMMY_PB_PORT) + results = client.bucket(self.bucket_name).multiget(keys) + for failure in results: + self.assertIsInstance(failure, tuple) + self.assertEqual(failure[0], 'default') + self.assertEqual(failure[1], self.bucket_name) + self.assertIn(failure[2], keys) + if PY2: + self.assertIsInstance(failure[3], StandardError) # noqa + else: + self.assertIsInstance(failure[3], Exception) + client.close() + + def test_multiput_errors(self): + """ + Unrecoverable errors are captured along with the bucket/key + and not propagated. + """ + client = self.create_client(http_port=DUMMY_HTTP_PORT, + pb_port=DUMMY_PB_PORT) + bucket = client.bucket(self.bucket_name) + k1 = self.randname() + k2 = self.randname() + o1 = RiakObject(client, bucket, k1) + o2 = RiakObject(client, bucket, k2) + + if PY2: + o1.encoded_data = k1 + o2.encoded_data = k2 + else: + o1.data = k1 + o2.data = k2 + + objs = [o1, o2] + for robj in objs: + robj.content_type = 'text/plain' + + results = client.multiput(objs, return_body=True) + for failure in results: + self.assertIsInstance(failure, tuple) + self.assertIsInstance(failure[0], RiakObject) + if PY2: + self.assertIsInstance(failure[1], StandardError) # noqa + else: + self.assertIsInstance(failure[1], Exception) + client.close() + + def test_multiget_notfounds(self): + """ + Not founds work in multiget just the same as get. + """ + keys = [("default", self.bucket_name, self.key_name), + ("default", self.bucket_name, self.randname())] + results = self.client.multiget(keys) + for obj in results: + self.assertIsInstance(obj, RiakObject) + self.assertFalse(obj.exists) + + def test_multiget_pool_size(self): + """ + The pool size for multigets can be configured at client initiation + time. Multiget still works as expected. + """ + client = self.create_client(multiget_pool_size=2) + self.assertEqual(2, client._multiget_pool._size) + + keys = [self.key_name, self.randname(), self.randname()] + for key in keys: + if PY2: + client.bucket(self.bucket_name)\ + .new(key, encoded_data=key, content_type="text/plain")\ + .store() + else: + client.bucket(self.bucket_name)\ + .new(key, data=key, content_type="text/plain")\ + .store() + + results = client.bucket(self.bucket_name).multiget(keys) + for obj in results: + self.assertIsInstance(obj, RiakObject) + self.assertTrue(obj.exists) + if PY2: + self.assertEqual(obj.key, obj.encoded_data) + else: + self.assertEqual(obj.key, obj.data) + client.close() + + def test_multiput_pool_size(self): + """ + The pool size for multiputs can be configured at client initiation + time. Multiput still works as expected. + """ + client = self.create_client(multiput_pool_size=2) + self.assertEqual(2, client._multiput_pool._size) + + bucket = client.bucket(self.bucket_name) + k1 = self.randname() + k2 = self.randname() + o1 = RiakObject(client, bucket, k1) + o2 = RiakObject(client, bucket, k2) + + if PY2: + o1.encoded_data = k1 + o2.encoded_data = k2 + else: + o1.data = k1 + o2.data = k2 + + objs = [o1, o2] + for robj in objs: + robj.content_type = 'text/plain' + + results = client.multiput(objs, return_body=True) + for obj in results: + self.assertIsInstance(obj, RiakObject) + self.assertTrue(obj.exists) + self.assertEqual(obj.content_type, 'text/plain') + if PY2: + self.assertEqual(obj.key, obj.encoded_data) + else: + self.assertEqual(obj.key, obj.data) + client.close() + + def test_multiput_pool_options(self): + sz = 4 + client = self.create_client(multiput_pool_size=sz) + self.assertEqual(sz, client._multiput_pool._size) + + bucket = client.bucket(self.bucket_name) + k1 = self.randname() + k2 = self.randname() + o1 = RiakObject(client, bucket, k1) + o2 = RiakObject(client, bucket, k2) + + if PY2: + o1.encoded_data = k1 + o2.encoded_data = k2 + else: + o1.data = k1 + o2.data = k2 + + objs = [o1, o2] + for robj in objs: + robj.content_type = 'text/plain' + + results = client.multiput(objs, return_body=False) + for obj in results: + if client.protocol == 'pbc': + self.assertIsInstance(obj, RiakObject) + self.assertFalse(obj.exists) + self.assertEqual(obj.content_type, 'text/plain') + else: + self.assertIsNone(obj) + client.close() + + @unittest.skipUnless(RUN_POOL, 'RUN_POOL is 0') + def test_pool_close(self): + """ + Iterate over the connection pool and close all connections. + """ + # Do something to add to the connection pool + self.test_multiget_bucket() + if self.client.protocol == 'pbc': + self.assertGreater(len(self.client._tcp_pool.resources), 1) + else: + self.assertGreater(len(self.client._http_pool.resources), 1) + # Now close them all up + self.client.close() + self.assertEqual(len(self.client._http_pool.resources), 0) + self.assertEqual(len(self.client._tcp_pool.resources), 0) diff --git a/riak/tests/test_comparison.py b/riak/tests/test_comparison.py index 3d30f4fd..8aac4ef8 100644 --- a/riak/tests/test_comparison.py +++ b/riak/tests/test_comparison.py @@ -1,13 +1,23 @@ -import platform - -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import unittest from riak.riak_object import RiakObject from riak.bucket import RiakBucket, BucketType -from riak.tests.test_all import BaseTestCase +from riak.tests.base import IntegrationTestBase class BucketTypeRichComparisonTest(unittest.TestCase): @@ -136,12 +146,14 @@ def test_object_valid_key(self): self.assertIsNone(b, 'empty object key not allowed') -class RiakClientComparisonTest(unittest.TestCase, BaseTestCase): +class RiakClientComparisonTest(IntegrationTestBase, unittest.TestCase): def test_client_eq(self): self.protocol = 'http' a = self.create_client(host='host1', http_port=11) b = self.create_client(host='host1', http_port=11) self.assertEqual(a, b) + a.close() + b.close() def test_client_nq(self): self.protocol = 'http' @@ -150,6 +162,9 @@ def test_client_nq(self): c = self.create_client(host='host1', http_port=12) self.assertNotEqual(a, b, 'matched with different hosts') self.assertNotEqual(a, c, 'matched with different ports') + a.close() + b.close() + c.close() def test_client_hash(self): self.protocol = 'http' @@ -158,6 +173,10 @@ def test_client_hash(self): c = self.create_client(host='host2', http_port=11) self.assertEqual(hash(a), hash(b), 'same object has different hashes') self.assertNotEqual(hash(a), hash(c), 'different object has same hash') + a.close() + b.close() + c.close() + if __name__ == '__main__': unittest.main() diff --git a/riak/tests/test_datatypes.py b/riak/tests/test_datatypes.py index 76a3e132..17aa4bf2 100644 --- a/riak/tests/test_datatypes.py +++ b/riak/tests/test_datatypes.py @@ -1,17 +1,28 @@ -# -*- coding: utf-8 -*- -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -from riak import RiakBucket, BucketType, RiakObject +# -*- coding: utf-8 -*- +import unittest import riak.datatypes as datatypes -from . import SKIP_DATATYPES -from riak.tests import test_six +from riak import RiakError, RiakBucket, BucketType, RiakObject +from riak.tests import RUN_DATATYPES +from riak.tests.base import IntegrationTestBase +from riak.tests.comparison import Comparison -class DatatypeUnitTests(object): + +class DatatypeUnitTestBase(object): dtype = None bucket = RiakBucket(None, 'test', BucketType(None, 'datatypes')) @@ -49,8 +60,7 @@ def test_op_output(self): self.check_op_output(op) -class FlagUnitTests(DatatypeUnitTests, - unittest.TestCase): +class FlagUnitTests(DatatypeUnitTestBase, unittest.TestCase): dtype = datatypes.Flag def op(self, dtype): @@ -69,8 +79,7 @@ def test_disables_require_context(self): self.assertTrue(dtype.modified) -class RegisterUnitTests(DatatypeUnitTests, - unittest.TestCase): +class RegisterUnitTests(DatatypeUnitTestBase, unittest.TestCase): dtype = datatypes.Register def op(self, dtype): @@ -80,8 +89,7 @@ def check_op_output(self, op): self.assertEqual(('assign', 'foobarbaz'), op) -class CounterUnitTests(DatatypeUnitTests, - unittest.TestCase): +class CounterUnitTests(DatatypeUnitTestBase, unittest.TestCase): dtype = datatypes.Counter def op(self, dtype): @@ -91,9 +99,7 @@ def check_op_output(self, op): self.assertEqual(('increment', 5), op) -class SetUnitTests(DatatypeUnitTests, - unittest.TestCase, - test_six.Comparison): +class SetUnitTests(DatatypeUnitTestBase, unittest.TestCase, Comparison): dtype = datatypes.Set def op(self, dtype): @@ -112,14 +118,25 @@ def test_removes_require_context(self): dtype = self.dtype(self.bucket, 'key') with self.assertRaises(datatypes.ContextRequired): dtype.discard('foo') - dtype._context = 'blah' dtype.discard('foo') self.assertTrue(dtype.modified) -class MapUnitTests(DatatypeUnitTests, - unittest.TestCase): +class HllUnitTests(DatatypeUnitTestBase, unittest.TestCase, Comparison): + dtype = datatypes.Hll + + def op(self, dtype): + dtype._context = 'hll_context' + dtype.add('foo') + dtype.add('bar') + + def check_op_output(self, op): + self.assertIn('adds', op) + self.assertItemsEqual(op['adds'], ['bar', 'foo']) + + +class MapUnitTests(DatatypeUnitTestBase, unittest.TestCase): dtype = datatypes.Map def op(self, dtype): @@ -127,6 +144,8 @@ def op(self, dtype): dtype.registers['b'].assign('testing') dtype.flags['c'].enable() dtype.maps['d'][('e', 'set')].add('deep value') + dtype.maps['f'].counters['g'] + dtype.maps['h'].maps['i'].flags['j'] def check_op_output(self, op): self.assertIn(('update', ('a', 'counter'), ('increment', 2)), op) @@ -135,6 +154,9 @@ def check_op_output(self, op): self.assertIn(('update', ('d', 'map'), [('update', ('e', 'set'), {'adds': ['deep value']})]), op) + self.assertNotIn(('update', ('f', 'map'), None), op) + self.assertNotIn(('update', ('h', 'map'), [('update', ('i', 'map'), + None)]), op) def test_removes_require_context(self): dtype = self.dtype(self.bucket, 'key') @@ -152,10 +174,73 @@ def test_removes_require_context(self): self.assertTrue(dtype.modified) -class DatatypeIntegrationTests(object): - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') +@unittest.skipUnless(RUN_DATATYPES, 'RUN_DATATYPES is 0') +class HllDatatypeIntegrationTests(IntegrationTestBase, + unittest.TestCase): + @classmethod + def setUpClass(cls): + super(HllDatatypeIntegrationTests, cls).setUpClass() + client = cls.create_client() + try: + btype = client.bucket_type('hlls') + btype.get_properties() + except RiakError as e: + raise unittest.SkipTest(e) + finally: + client.close() + + def test_fetch_bucket_type_props(self): + btype = self.client.bucket_type('hlls') + props = btype.get_properties() + self.assertEqual(14, props['hll_precision']) + + def test_set_same_hll_precision(self): + btype = self.client.bucket_type('hlls') + btype.set_property('hll_precision', 14) + props = btype.get_properties() + self.assertEqual(14, props['hll_precision']) + + def test_set_larger_hll_precision(self): + btype = self.client.bucket_type('hlls') + with self.assertRaises(RiakError): + btype.set_property('hll_precision', 15) + + def test_set_invalid_hll_precision(self): + btype = self.client.bucket_type('hlls') + with self.assertRaises(ValueError): + btype.set_property('hll_precision', 3) + with self.assertRaises(ValueError): + btype.set_property('hll_precision', 17) + with self.assertRaises(ValueError): + btype.set_property('hll_precision', 0) + + def test_dt_hll(self): + btype = self.client.bucket_type('hlls') + props = btype.get_properties() + self.assertEqual(14, props['hll_precision']) + bucket = btype.bucket(self.bucket_name) + myhll = datatypes.Hll(bucket, self.key_name) + myhll.add('user1') + myhll.add('user2') + myhll.add('foo') + myhll.add('bar') + myhll.add('baz') + myhll.add('user1') + self.assertEqual(5, len(myhll._adds)) + + myhll.store() + self.assertEqual(5, myhll.value) + + otherhll = bucket.get(self.key_name) + self.assertEqual(5, otherhll.value) + + +@unittest.skipUnless(RUN_DATATYPES, 'RUN_DATATYPES is 0') +class DatatypeIntegrationTests(IntegrationTestBase, + unittest.TestCase, + Comparison): def test_dt_counter(self): - btype = self.client.bucket_type('pytest-counters') + btype = self.client.bucket_type('counters') bucket = btype.bucket(self.bucket_name) mycount = datatypes.Counter(bucket, self.key_name) mycount.increment(5) @@ -170,9 +255,8 @@ def test_dt_counter(self): mycount.reload() self.assertEqual(2, mycount.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) myset = datatypes.Set(bucket, self.key_name) myset.add('Sean') @@ -193,9 +277,8 @@ def test_dt_set(self): self.assertIn('Brett', myset) self.assertNotIn('Sean', myset) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_map(self): - btype = self.client.bucket_type('pytest-maps') + btype = self.client.bucket_type('maps') bucket = btype.bucket(self.bucket_name) mymap = datatypes.Map(bucket, self.key_name) @@ -229,9 +312,8 @@ def test_dt_map(self): self.assertIn('f', mymap.sets) self.assertItemsEqual(['thing1', 'thing2'], mymap.sets['f'].value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_remove_without_context(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -241,9 +323,8 @@ def test_dt_set_remove_without_context(self): with self.assertRaises(datatypes.ContextRequired): set.discard("Y") - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_remove_fetching_context(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -258,9 +339,8 @@ def test_dt_set_remove_fetching_context(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_add_twice(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -275,9 +355,8 @@ def test_dt_set_add_twice(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_add_wins_in_same_op(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -293,9 +372,8 @@ def test_dt_set_add_wins_in_same_op(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_add_wins_in_same_op_reversed(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -311,9 +389,8 @@ def test_dt_set_add_wins_in_same_op_reversed(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_remove_old_context(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -333,9 +410,8 @@ def test_dt_set_remove_old_context(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y', 'Z'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_remove_updated_context(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) set = datatypes.Set(bucket, self.key_name) @@ -354,9 +430,8 @@ def test_dt_set_remove_updated_context(self): set2 = bucket.get(self.key_name) self.assertItemsEqual(['X', 'Y'], set2.value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_map_remove_set_update_same_op(self): - btype = self.client.bucket_type('pytest-maps') + btype = self.client.bucket_type('maps') bucket = btype.bucket(self.bucket_name) map = datatypes.Map(bucket, self.key_name) @@ -372,9 +447,8 @@ def test_dt_map_remove_set_update_same_op(self): map2 = bucket.get(self.key_name) self.assertItemsEqual(["Z"], map2.sets['set']) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_map_remove_counter_increment_same_op(self): - btype = self.client.bucket_type('pytest-maps') + btype = self.client.bucket_type('maps') bucket = btype.bucket(self.bucket_name) map = datatypes.Map(bucket, self.key_name) @@ -390,9 +464,8 @@ def test_dt_map_remove_counter_increment_same_op(self): map2 = bucket.get(self.key_name) self.assertEqual(2, map2.counters['counter'].value) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_map_remove_map_update_same_op(self): - btype = self.client.bucket_type('pytest-maps') + btype = self.client.bucket_type('maps') bucket = btype.bucket(self.bucket_name) map = datatypes.Map(bucket, self.key_name) @@ -408,9 +481,8 @@ def test_dt_map_remove_map_update_same_op(self): map2 = bucket.get(self.key_name) self.assertItemsEqual(["Z"], map2.maps['map'].sets['set']) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_set_return_body_true_default(self): - btype = self.client.bucket_type('pytest-sets') + btype = self.client.bucket_type('sets') bucket = btype.bucket(self.bucket_name) myset = bucket.new(self.key_name) myset.add('X') @@ -426,9 +498,8 @@ def test_dt_set_return_body_true_default(self): myset.store() self.assertItemsEqual(myset.value, ['Y']) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_dt_map_return_body_true_default(self): - btype = self.client.bucket_type('pytest-maps') + btype = self.client.bucket_type('maps') bucket = btype.bucket(self.bucket_name) mymap = bucket.new(self.key_name) mymap.sets['a'].add('X') @@ -451,21 +522,20 @@ def test_dt_map_return_body_true_default(self): self.assertEqual(mymap.value, {}) - @unittest.skipIf(SKIP_DATATYPES, 'SKIP_DATATYPES is set') def test_delete_datatype(self): - ctype = self.client.bucket_type('pytest-counters') + ctype = self.client.bucket_type('counters') cbucket = ctype.bucket(self.bucket_name) counter = cbucket.new(self.key_name) counter.increment(5) counter.store() - stype = self.client.bucket_type('pytest-sets') + stype = self.client.bucket_type('sets') sbucket = stype.bucket(self.bucket_name) set_ = sbucket.new(self.key_name) set_.add("Brett") set_.store() - mtype = self.client.bucket_type('pytest-maps') + mtype = self.client.bucket_type('maps') mbucket = mtype.bucket(self.bucket_name) map_ = mbucket.new(self.key_name) map_.sets['people'].add('Sean') diff --git a/riak/tests/test_datetime.py b/riak/tests/test_datetime.py new file mode 100644 index 00000000..f3367179 --- /dev/null +++ b/riak/tests/test_datetime.py @@ -0,0 +1,44 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import datetime +import unittest + +from riak.util import epoch, epoch_tz, \ + unix_time_millis + +# NB: without tzinfo, this is UTC +ts0 = datetime.datetime(2015, 1, 1, 12, 1, 2, 987000) +ts0_ts = 1420113662987 +ts0_ts_pst = 1420142462987 + + +class DatetimeUnitTests(unittest.TestCase): + def test_get_unix_time_without_tzinfo(self): + self.assertIsNone(epoch.tzinfo) + self.assertIsNotNone(epoch_tz.tzinfo) + self.assertIsNone(ts0.tzinfo) + utm = unix_time_millis(ts0) + self.assertEqual(utm, ts0_ts) + + def test_get_unix_time_with_tzinfo(self): + try: + import pytz + tz = pytz.timezone('America/Los_Angeles') + ts0_pst = tz.localize(ts0) + utm = unix_time_millis(ts0_pst) + self.assertEqual(utm, ts0_ts_pst) + except ImportError: + pass diff --git a/riak/tests/test_feature_detection.py b/riak/tests/test_feature_detection.py index 8efc43f6..bf0c0c7b 100644 --- a/riak/tests/test_feature_detection.py +++ b/riak/tests/test_feature_detection.py @@ -1,27 +1,19 @@ -""" -Copyright 2012-2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -import platform - -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import unittest from riak.transports.feature_detect import FeatureDetection @@ -61,6 +53,8 @@ def test_pre_10(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_10(self): t = DummyTransport("1.0.3") @@ -78,6 +72,8 @@ def test_10(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_11(self): t = DummyTransport("1.1.4") @@ -95,6 +91,8 @@ def test_11(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_12(self): t = DummyTransport("1.2.0") @@ -112,6 +110,8 @@ def test_12(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_12_loose(self): t = DummyTransport("1.2.1p3") @@ -129,6 +129,8 @@ def test_12_loose(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_14(self): t = DummyTransport("1.4.0rc1") @@ -146,6 +148,8 @@ def test_14(self): self.assertFalse(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_144(self): t = DummyTransport("1.4.6") @@ -163,6 +167,8 @@ def test_144(self): self.assertTrue(t.index_term_regex()) self.assertFalse(t.bucket_types()) self.assertFalse(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) def test_20(self): t = DummyTransport("2.0.1") @@ -180,6 +186,28 @@ def test_20(self): self.assertTrue(t.index_term_regex()) self.assertTrue(t.bucket_types()) self.assertTrue(t.datatypes()) + self.assertFalse(t.preflists()) + self.assertFalse(t.write_once()) + + def test_21(self): + t = DummyTransport("2.1.0") + self.assertTrue(t.phaseless_mapred()) + self.assertTrue(t.pb_indexes()) + self.assertTrue(t.pb_search()) + self.assertTrue(t.pb_conditionals()) + self.assertTrue(t.quorum_controls()) + self.assertTrue(t.tombstone_vclocks()) + self.assertTrue(t.pb_head()) + self.assertTrue(t.pb_clear_bucket_props()) + self.assertTrue(t.pb_all_bucket_props()) + self.assertTrue(t.counters()) + self.assertTrue(t.stream_indexes()) + self.assertTrue(t.index_term_regex()) + self.assertTrue(t.bucket_types()) + self.assertTrue(t.datatypes()) + self.assertTrue(t.preflists()) + self.assertTrue(t.write_once()) + if __name__ == '__main__': unittest.main() diff --git a/riak/tests/test_filters.py b/riak/tests/test_filters.py index 73d4771c..f4a77db0 100644 --- a/riak/tests/test_filters.py +++ b/riak/tests/test_filters.py @@ -1,12 +1,23 @@ -import platform +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import unittest + from riak.mapreduce import RiakKeyFilter from riak import key_filter -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest - class FilterTests(unittest.TestCase): def test_simple(self): diff --git a/riak/tests/test_kv.py b/riak/tests/test_kv.py index e443412e..63206c95 100644 --- a/riak/tests/test_kv.py +++ b/riak/tests/test_kv.py @@ -1,7 +1,37 @@ # -*- coding: utf-8 -*- +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy import os -import platform +import sys +import unittest + from six import string_types, PY2, PY3 +from time import sleep +from riak import ConflictError, RiakError, ListError +from riak import RiakClient, RiakBucket, BucketType +from riak.resolver import default_resolver, last_written_resolver +from riak.tests import RUN_KV, RUN_RESOLVE, PROTOCOL +from riak.tests.base import IntegrationTestBase +from riak.tests.comparison import Comparison + +try: + import simplejson as json +except ImportError: + import json + if PY2: import cPickle test_pickle_dumps = cPickle.dumps @@ -10,25 +40,30 @@ import pickle test_pickle_dumps = pickle.dumps test_pickle_loads = pickle.loads -import copy -from time import sleep -from riak import ConflictError, RiakBucket, RiakError -from riak.resolver import default_resolver, last_written_resolver -try: - import simplejson as json -except ImportError: - import json -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest -from . import SKIP_RESOLVE +testrun_sibs_bucket = 'sibsbucket' +testrun_props_bucket = 'propsbucket' -class NotJsonSerializable(object): +def setUpModule(): + if not RUN_KV: + return + c = IntegrationTestBase.create_client() + c.bucket(testrun_sibs_bucket).allow_mult = True + c.close() + +def tearDownModule(): + if not RUN_KV: + return + c = IntegrationTestBase.create_client() + c.bucket(testrun_sibs_bucket).clear_properties() + c.bucket(testrun_props_bucket).clear_properties() + c.close() + + +class NotJsonSerializable(object): def __init__(self, *args, **kwargs): self.args = list(args) self.kwargs = kwargs @@ -51,7 +86,67 @@ def __eq__(self, other): return True -class BasicKVTests(object): +class KVUnitTests(unittest.TestCase): + def test_list_keys_exception(self): + c = RiakClient() + bt = BucketType(c, 'test') + b = RiakBucket(c, 'test', bt) + with self.assertRaises(ListError): + b.get_keys() + + def test_stream_buckets_exception(self): + c = RiakClient() + with self.assertRaises(ListError): + bs = [] + for bl in c.stream_buckets(): + bs.extend(bl) + + def test_stream_keys_exception(self): + c = RiakClient() + with self.assertRaises(ListError): + ks = [] + for kl in c.stream_keys('test'): + ks.extend(kl) + + def test_ts_stream_keys_exception(self): + c = RiakClient() + with self.assertRaises(ListError): + ks = [] + for kl in c.ts_stream_keys('test'): + ks.extend(kl) + + +@unittest.skipUnless(RUN_KV, 'RUN_KV is 0') +class BasicKVTests(IntegrationTestBase, unittest.TestCase, Comparison): + def test_no_returnbody(self): + bucket = self.client.bucket(self.bucket_name) + o = bucket.new(self.key_name, "bar").store(return_body=False) + self.assertEqual(o.vclock, None) + + @unittest.skipUnless(PROTOCOL == 'pbc', 'Only available on pbc') + def test_get_no_returnbody(self): + bucket = self.client.bucket(self.bucket_name) + o = bucket.new(self.key_name, "Ain't no body") + o.store() + + stored_object = bucket.get(self.key_name, head_only=True) + self.assertFalse(stored_object.data) + + list_of_objects = bucket.multiget([self.key_name], head_only=True) + for stored_object in list_of_objects: + self.assertFalse(stored_object.data) + + def test_many_link_headers_should_work_fine(self): + bucket = self.client.bucket(self.bucket_name) + o = bucket.new("lots_of_links", "My god, it's full of links!") + for i in range(0, 300): + link = ("other", "key%d" % i, "next") + o.add_link(link) + + o.store() + stored_object = bucket.get("lots_of_links") + self.assertEqual(len(stored_object.links), 300) + def test_is_alive(self): self.assertTrue(self.client.is_alive()) @@ -69,7 +164,7 @@ def test_store_and_get(self): # unicode objects are fine, as long as they don't # contain any non-ASCII chars if PY2: - self.client.bucket(unicode(self.bucket_name)) + self.client.bucket(unicode(self.bucket_name)) # noqa else: self.client.bucket(self.bucket_name) if PY2: @@ -127,9 +222,8 @@ def test_string_bucket_name(self): with self.assert_raises_regex(TypeError, 'must be a string'): self.client.bucket(bad) - if PY2: - with self.assert_raises_regex(TypeError, 'must be a string'): - RiakBucket(self.client, bad, None) + with self.assert_raises_regex(TypeError, 'must be a string'): + RiakBucket(self.client, bad, None) # Unicode bucket names are not supported in Python 2.x, # if they can't be encoded to ASCII. This should be changed in a @@ -148,17 +242,29 @@ def test_string_bucket_name(self): def test_generate_key(self): # Ensure that Riak generates a random key when # the key passed to bucket.new() is None. - bucket = self.client.bucket('random_key_bucket') - existing_keys = bucket.get_keys() + bucket = self.client.bucket(self.bucket_name) o = bucket.new(None, data={}) self.assertIsNone(o.key) o.store() self.assertIsNotNone(o.key) self.assertNotIn('/', o.key) - self.assertNotIn(o.key, existing_keys) - self.assertEqual(len(bucket.get_keys()), len(existing_keys) + 1) + existing_keys = bucket.get_keys() + self.assertEqual(len(existing_keys), 1) + + def maybe_store_keys(self): + skey = 'rkb-init' + bucket = self.client.bucket('random_key_bucket') + sobj = bucket.get(skey) + if sobj.exists: + return + for key in range(1, 1000): + o = bucket.new(None, data={}) + o.store() + o = bucket.new(skey, data={}) + o.store() def test_stream_keys(self): + self.maybe_store_keys() bucket = self.client.bucket('random_key_bucket') regular_keys = bucket.get_keys() self.assertNotEqual(len(regular_keys), 0) @@ -171,6 +277,7 @@ def test_stream_keys(self): self.assertEqual(sorted(regular_keys), sorted(streamed_keys)) def test_stream_keys_timeout(self): + self.maybe_store_keys() bucket = self.client.bucket('random_key_bucket') streamed_keys = [] with self.assertRaises(RiakError): @@ -181,6 +288,7 @@ def test_stream_keys_timeout(self): streamed_keys += keylist def test_stream_keys_abort(self): + self.maybe_store_keys() bucket = self.client.bucket('random_key_bucket') regular_keys = bucket.get_keys() self.assertNotEqual(len(regular_keys), 0) @@ -318,22 +426,28 @@ def test_bucket_delete(self): self.assertFalse(obj.exists) def test_set_bucket_properties(self): - bucket = self.client.bucket(self.props_bucket) + bucket = self.client.bucket(testrun_props_bucket) # Test setting allow mult... bucket.allow_mult = True # Test setting nval... bucket.n_val = 1 - bucket2 = self.create_client().bucket(self.props_bucket) + c2 = self.create_client() + bucket2 = c2.bucket(testrun_props_bucket) self.assertTrue(bucket2.allow_mult) self.assertEqual(bucket2.n_val, 1) # Test setting multiple properties... bucket.set_properties({"allow_mult": False, "n_val": 2}) - bucket3 = self.create_client().bucket(self.props_bucket) + c3 = self.create_client() + bucket3 = c3.bucket(testrun_props_bucket) self.assertFalse(bucket3.allow_mult) self.assertEqual(bucket3.n_val, 2) + # clean up! + c2.close() + c3.close() + def test_if_none_match(self): bucket = self.client.bucket(self.bucket_name) obj = bucket.get(self.key_name) @@ -351,7 +465,7 @@ def test_if_none_match(self): def test_siblings(self): # Set up the bucket, clear any existing object... - bucket = self.client.bucket(self.sibs_bucket) + bucket = self.client.bucket(testrun_sibs_bucket) obj = bucket.get(self.key_name) bucket.allow_mult = True @@ -365,7 +479,6 @@ def test_siblings(self): # Make sure the object has five siblings... obj = bucket.get(self.key_name) - obj.reload() self.assertEqual(len(obj.siblings), 5) # When the object is in conflict, using the shortcut methods @@ -381,16 +494,15 @@ def test_siblings(self): # Resolve the conflict, and then do a get... resolved_sibling = obj.siblings[3] obj.siblings = [resolved_sibling] + self.assertEqual(len(obj.siblings), 1) obj.store() - obj.reload() self.assertEqual(len(obj.siblings), 1) self.assertEqual(obj.data, resolved_sibling.data) - @unittest.skipIf(SKIP_RESOLVE == '1', - "skip requested for resolvers test") + @unittest.skipUnless(RUN_RESOLVE, "RUN_RESOLVE is 0") def test_resolution(self): - bucket = self.client.bucket(self.sibs_bucket) + bucket = self.client.bucket(testrun_sibs_bucket) obj = bucket.get(self.key_name) bucket.allow_mult = True @@ -426,8 +538,7 @@ def test_resolution(self): # Define our own custom resolver on the object that returns # the maximum value, overriding the bucket and client resolvers def max_value_resolver(obj): - datafun = lambda s: s.data - obj.siblings = [max(obj.siblings, key=datafun), ] + obj.siblings = [max(obj.siblings, key=lambda s: s.data), ] obj.resolver = max_value_resolver obj.reload() @@ -445,17 +556,16 @@ def max_value_resolver(obj): self.assertEqual(bucket.resolver, default_resolver) # reset self.assertEqual(self.client.resolver, default_resolver) # reset - @unittest.skipIf(SKIP_RESOLVE == '1', - "skip requested for resolvers test") + @unittest.skipUnless(RUN_RESOLVE, "RUN_RESOLVE is 0") def test_resolution_default(self): # If no resolver is setup, be sure to resolve to default_resolver - bucket = self.client.bucket(self.sibs_bucket) + bucket = self.client.bucket(testrun_sibs_bucket) self.assertEqual(self.client.resolver, default_resolver) self.assertEqual(bucket.resolver, default_resolver) def test_tombstone_siblings(self): # Set up the bucket, clear any existing object... - bucket = self.client.bucket(self.sibs_bucket) + bucket = self.client.bucket(testrun_sibs_bucket) obj = bucket.get(self.key_name) bucket.allow_mult = True @@ -468,7 +578,11 @@ def test_tombstone_siblings(self): vals = set(self.generate_siblings(obj, count=4)) obj = bucket.get(self.key_name) - self.assertEqual(len(obj.siblings), 5) + + # TODO this used to be 5, only + siblen = len(obj.siblings) + self.assertTrue(siblen == 4 or siblen == 5) + non_tombstones = 0 for sib in obj.siblings: if sib.exists: @@ -558,6 +672,21 @@ def test_get_params(self): basic_quorum=True) self.assertFalse(missing.exists) + def test_preflist(self): + nodes = ['riak@127.0.0.1', 'dev1@127.0.0.1'] + bucket = self.client.bucket(self.bucket_name) + bucket.new(self.key_name, data={"foo": "one", + "bar": "baz"}).store() + try: + preflist = bucket.get_preflist(self.key_name) + preflist2 = self.client.get_preflist(bucket, self.key_name) + for pref in (preflist, preflist2): + self.assertEqual(len(pref), 3) + self.assertIn(pref[0]['node'], nodes) + [self.assertTrue(node['primary']) for node in pref] + except NotImplementedError as e: + raise unittest.SkipTest(e) + def generate_siblings(self, original, count=5, delay=None): vals = [] for _ in range(count): @@ -577,9 +706,10 @@ def generate_siblings(self, original, count=5, delay=None): return vals -class BucketPropsTest(object): +@unittest.skipUnless(RUN_KV, 'RUN_KV is 0') +class BucketPropsTest(IntegrationTestBase, unittest.TestCase): def test_rw_settings(self): - bucket = self.client.bucket(self.props_bucket) + bucket = self.client.bucket(testrun_props_bucket) self.assertEqual(bucket.r, "quorum") self.assertEqual(bucket.w, "quorum") self.assertEqual(bucket.dw, "quorum") @@ -604,7 +734,7 @@ def test_rw_settings(self): bucket.clear_properties() def test_primary_quora(self): - bucket = self.client.bucket(self.props_bucket) + bucket = self.client.bucket(testrun_props_bucket) self.assertEqual(bucket.pr, 0) self.assertEqual(bucket.pw, 0) @@ -618,7 +748,7 @@ def test_primary_quora(self): bucket.clear_properties() def test_clear_bucket_properties(self): - bucket = self.client.bucket(self.props_bucket) + bucket = self.client.bucket(testrun_props_bucket) bucket.allow_mult = True self.assertTrue(bucket.allow_mult) bucket.n_val = 1 @@ -630,20 +760,23 @@ def test_clear_bucket_properties(self): self.assertEqual(bucket.n_val, 3) -class KVFileTests(object): +@unittest.skipUnless(RUN_KV, 'RUN_KV is 0') +class KVFileTests(IntegrationTestBase, unittest.TestCase): def test_store_binary_object_from_file(self): bucket = self.client.bucket(self.bucket_name) - filepath = os.path.join(os.path.dirname(__file__), 'test_all.py') - obj = bucket.new_from_file(self.key_name, filepath) + obj = bucket.new_from_file(self.key_name, __file__) obj.store() obj = bucket.get(self.key_name) self.assertNotEqual(obj.encoded_data, None) - self.assertEqual(obj.content_type, "text/x-python") + is_win32 = sys.platform == 'win32' + self.assertTrue(obj.content_type == 'text/x-python' or + (is_win32 and obj.content_type == 'text/plain') or + obj.content_type == 'application/x-python-code') def test_store_binary_object_from_file_should_use_default_mimetype(self): bucket = self.client.bucket(self.bucket_name) filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), - os.pardir, os.pardir, 'THANKS') + os.pardir, os.pardir, 'README.md') obj = bucket.new_from_file(self.key_name, filepath) obj.store() obj = bucket.get(self.key_name) @@ -658,7 +791,8 @@ def test_store_binary_object_from_file_should_fail_if_file_not_found(self): self.assertFalse(obj.exists) -class CounterTests(object): +@unittest.skipUnless(RUN_KV, 'RUN_KV is 0') +class CounterTests(IntegrationTestBase, unittest.TestCase): def test_counter_requires_allow_mult(self): bucket = self.client.bucket(self.bucket_name) if bucket.allow_mult: @@ -669,7 +803,7 @@ def test_counter_requires_allow_mult(self): bucket.update_counter(self.key_name, 10) def test_counter_ops(self): - bucket = self.client.bucket(self.sibs_bucket) + bucket = self.client.bucket(testrun_sibs_bucket) self.assertTrue(bucket.allow_mult) # Non-existent counter has no value diff --git a/riak/tests/test_mapreduce.py b/riak/tests/test_mapreduce.py index f22a24f6..bfdfc7dd 100644 --- a/riak/tests/test_mapreduce.py +++ b/riak/tests/test_mapreduce.py @@ -1,21 +1,53 @@ # -*- coding: utf-8 -*- +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function + +import unittest + from six import PY2 from riak.mapreduce import RiakMapReduce -from riak import key_filter, RiakError +from riak import key_filter, RiakClient, RiakError, ListError +from riak.tests import RUN_MAPREDUCE, RUN_SECURITY, RUN_YZ +from riak.tests.base import IntegrationTestBase from riak.tests.test_yokozuna import wait_for_yz_index -from riak.tests import RUN_SECURITY -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest +from riak.tests.yz_setup import yzSetUp, yzTearDown + + +testrun_yz_mr = {'btype': 'mr', + 'bucket': 'mrbucket', + 'index': 'mrbucket'} + -from . import RUN_YZ +def setUpModule(): + yzSetUp(testrun_yz_mr) -class LinkTests(object): +def tearDownModule(): + yzTearDown(testrun_yz_mr) + + +class MapReduceUnitTests(unittest.TestCase): + def test_mapred_bucket_exception(self): + c = RiakClient() + with self.assertRaises(ListError): + c.add('bucket') + + +@unittest.skipUnless(RUN_MAPREDUCE, 'RUN_MAPREDUCE is 0') +class LinkTests(IntegrationTestBase, unittest.TestCase): def test_store_and_get_links(self): # Create the object... bucket = self.client.bucket(self.bucket_name) @@ -81,7 +113,8 @@ def test_link_walking(self): self.assertEqual(len(results), 1) -class ErlangMapReduceTests(object): +@unittest.skipUnless(RUN_MAPREDUCE, 'RUN_MAPREDUCE is 0') +class ErlangMapReduceTests(IntegrationTestBase, unittest.TestCase): def test_erlang_map_reduce(self): # Create the object... bucket = self.client.bucket(self.bucket_name) @@ -100,16 +133,16 @@ def test_erlang_map_reduce(self): def test_erlang_map_reduce_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() bucket.new("bar", 2).store() bucket.new("baz", 4).store() # Run the map... result = self.client \ - .add(self.bucket_name, "foo", bucket_type="pytest") \ - .add(self.bucket_name, "bar", bucket_type="pytest") \ - .add(self.bucket_name, "baz", bucket_type="pytest") \ + .add(self.bucket_name, "foo", bucket_type='no_siblings') \ + .add(self.bucket_name, "bar", bucket_type='no_siblings') \ + .add(self.bucket_name, "baz", bucket_type='no_siblings') \ .map(["riak_kv_mapreduce", "map_object_value"]) \ .reduce(["riak_kv_mapreduce", "reduce_set_union"]) \ .run() @@ -139,11 +172,13 @@ def test_erlang_source_map_reduce(self): else: print("test_erlang_source_map_reduce {}".format(e.value)) if strfun_allowed: - self.assertEqual(result, ['2', '3', '4']) + self.assertIn('2', result) + self.assertIn('3', result) + self.assertIn('4', result) def test_erlang_source_map_reduce_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() bucket.new("bar", 3).store() @@ -152,9 +187,9 @@ def test_erlang_source_map_reduce_bucket_type(self): # Run the map... try: result = self.client \ - .add(self.bucket_name, "foo", bucket_type="pytest") \ - .add(self.bucket_name, "bar", bucket_type="pytest") \ - .add(self.bucket_name, "baz", bucket_type="pytest") \ + .add(self.bucket_name, "foo", bucket_type='no_siblings') \ + .add(self.bucket_name, "bar", bucket_type='no_siblings') \ + .add(self.bucket_name, "baz", bucket_type='no_siblings') \ .map("""fun(Object, _KD, _A) -> Value = riak_object:get_value(Object), [Value] @@ -163,7 +198,9 @@ def test_erlang_source_map_reduce_bucket_type(self): if e.value.startswith('May have tried'): strfun_allowed = False if strfun_allowed: - self.assertEqual(result, ['2', '3', '4']) + self.assertIn('2', result) + self.assertIn('3', result) + self.assertIn('4', result) def test_client_exceptional_paths(self): bucket = self.client.bucket(self.bucket_name) @@ -187,7 +224,9 @@ def test_client_exceptional_paths(self): mr.add_key_filter("tokenize", "-", 1) -class JSMapReduceTests(object): +@unittest.skipUnless(RUN_MAPREDUCE, 'RUN_MAPREDUCE is 0') +class JSMapReduceTests(IntegrationTestBase, unittest.TestCase): + def test_javascript_source_map(self): # Create the object... bucket = self.client.bucket(self.bucket_name) @@ -238,12 +277,12 @@ def test_javascript_named_map(self): def test_javascript_named_map_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() # Run the map... result = self.client \ - .add(self.bucket_name, "foo", bucket_type="pytest") \ + .add(self.bucket_name, "foo", bucket_type='no_siblings') \ .map("Riak.mapValuesJson") \ .run() self.assertEqual(result, [2]) @@ -266,16 +305,16 @@ def test_javascript_source_map_reduce(self): def test_javascript_source_map_reduce_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() bucket.new("bar", 3).store() bucket.new("baz", 4).store() # Run the map... result = self.client \ - .add(self.bucket_name, "foo", bucket_type="pytest") \ - .add(self.bucket_name, "bar", bucket_type="pytest") \ - .add(self.bucket_name, "baz", bucket_type="pytest") \ + .add(self.bucket_name, "foo", bucket_type='no_siblings') \ + .add(self.bucket_name, "bar", bucket_type='no_siblings') \ + .add(self.bucket_name, "baz", bucket_type='no_siblings') \ .map("function (v) { return [1]; }") \ .reduce("Riak.reduceSum") \ .run() @@ -299,16 +338,16 @@ def test_javascript_named_map_reduce(self): def test_javascript_named_map_reduce_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() bucket.new("bar", 3).store() bucket.new("baz", 4).store() # Run the map... result = self.client \ - .add(self.bucket_name, "foo", bucket_type="pytest") \ - .add(self.bucket_name, "bar", bucket_type="pytest") \ - .add(self.bucket_name, "baz", bucket_type="pytest") \ + .add(self.bucket_name, "foo", bucket_type='no_siblings') \ + .add(self.bucket_name, "bar", bucket_type='no_siblings') \ + .add(self.bucket_name, "baz", bucket_type='no_siblings') \ .map("Riak.mapValuesJson") \ .reduce("Riak.reduceSum") \ .run() @@ -330,14 +369,14 @@ def test_javascript_bucket_map_reduce(self): def test_javascript_bucket_map_reduceP_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket("bucket_%s" % self.randint()) bucket.new("foo", 2).store() bucket.new("bar", 3).store() bucket.new("baz", 4).store() # Run the map... result = self.client \ - .add(bucket.name, bucket_type="pytest") \ + .add(bucket.name, bucket_type='no_siblings') \ .map("Riak.mapValuesJson") \ .reduce("Riak.reduceSum") \ .run() @@ -361,16 +400,16 @@ def test_javascript_arg_map_reduce(self): def test_javascript_arg_map_reduce_bucket_type(self): # Create the object... - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket(self.bucket_name) bucket.new("foo", 2).store() # Run the map... result = self.client \ - .add(self.bucket_name, "foo", 5, bucket_type="pytest") \ - .add(self.bucket_name, "foo", 10, bucket_type="pytest") \ - .add(self.bucket_name, "foo", 15, bucket_type="pytest") \ - .add(self.bucket_name, "foo", -15, bucket_type="pytest") \ - .add(self.bucket_name, "foo", -5, bucket_type="pytest") \ + .add(self.bucket_name, "foo", 5, bucket_type='no_siblings') \ + .add(self.bucket_name, "foo", 10, bucket_type='no_siblings') \ + .add(self.bucket_name, "foo", 15, bucket_type='no_siblings') \ + .add(self.bucket_name, "foo", -15, bucket_type='no_siblings') \ + .add(self.bucket_name, "foo", -5, bucket_type='no_siblings') \ .map("function(v, arg) { return [arg]; }") \ .reduce("Riak.reduceSum") \ .run() @@ -392,14 +431,14 @@ def test_key_filters(self): self.assertEqual(result, ["yahoo-20090613"]) def test_key_filters_bucket_type(self): - btype = self.client.bucket_type("pytest") + btype = self.client.bucket_type('no_siblings') bucket = btype.bucket("kftest") bucket.new("basho-20101215", 1).store() bucket.new("google-20110103", 2).store() bucket.new("yahoo-20090613", 3).store() result = self.client \ - .add("kftest", bucket_type="pytest") \ + .add("kftest", bucket_type='no_siblings') \ .add_key_filters([["tokenize", "-", 2]]) \ .add_key_filter("ends_with", "0613") \ .map("function (v, keydata) { return [v.key]; }") \ @@ -503,13 +542,13 @@ def test_mr_list_add_mix(self): u'"fooval2"', u'"fooval3"']) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') + @unittest.skipUnless(RUN_YZ, 'RUN_YZ is 0') def test_mr_search(self): """ Try a successful map/reduce from search results. """ - btype = self.client.bucket_type(self.yz_mr['btype']) - bucket = btype.bucket(self.yz_mr['bucket']) + btype = self.client.bucket_type(testrun_yz_mr['btype']) + bucket = btype.bucket(testrun_yz_mr['bucket']) bucket.new("Pebbles", {"name_s": "Fruity Pebbles", "maker_s": "Post", "sugar_i": 9, @@ -537,7 +576,7 @@ def test_mr_search(self): "fruit_b": False}).store() # Wait for Solr to catch up wait_for_yz_index(bucket, "Crunch") - mr = RiakMapReduce(self.client).search(self.yz_mr['bucket'], + mr = RiakMapReduce(self.client).search(testrun_yz_mr['bucket'], 'fruit_b:false') mr.map("""function(v) { var solr_doc = JSON.parse(v.values[0].data); @@ -547,7 +586,8 @@ def test_mr_search(self): self.assertEqual(result, [100]) -class MapReduceAliasTests(object): +@unittest.skipUnless(RUN_MAPREDUCE, 'RUN_MAPREDUCE is 0') +class MapReduceAliasTests(IntegrationTestBase, unittest.TestCase): """This tests the map reduce aliases""" def test_map_values(self): @@ -742,7 +782,8 @@ def test_filter_not_found(self): self.assertEqual(sorted(result), [1, 2]) -class MapReduceStreamTests(object): +@unittest.skipUnless(RUN_MAPREDUCE, 'RUN_MAPREDUCE is 0') +class MapReduceStreamTests(IntegrationTestBase, unittest.TestCase): def test_stream_results(self): bucket = self.client.bucket(self.bucket_name) bucket.new('one', data=1).store() diff --git a/riak/tests/test_misc.py b/riak/tests/test_misc.py new file mode 100644 index 00000000..3660720e --- /dev/null +++ b/riak/tests/test_misc.py @@ -0,0 +1,42 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class MiscTests(unittest.TestCase): + def test_timeout_validation(self): + from riak.client.operations import _validate_timeout + # valid cases + try: + _validate_timeout(None) + _validate_timeout(None, infinity_ok=True) + _validate_timeout('infinity', infinity_ok=True) + _validate_timeout(1234) + _validate_timeout(1234567898765432123456789) + except ValueError: + self.fail('_validate_timeout() unexpectedly raised ValueError') + # invalid cases + with self.assertRaises(ValueError): + _validate_timeout('infinity') + with self.assertRaises(ValueError): + _validate_timeout('infinity-foo') + with self.assertRaises(ValueError): + _validate_timeout('foobarbaz') + with self.assertRaises(ValueError): + _validate_timeout('1234') + with self.assertRaises(ValueError): + _validate_timeout(0) + with self.assertRaises(ValueError): + _validate_timeout(12.34) diff --git a/riak/tests/test_pool.py b/riak/tests/test_pool.py index 7984d436..346b2645 100644 --- a/riak/tests/test_pool.py +++ b/riak/tests/test_pool.py @@ -1,38 +1,34 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +import unittest from six import PY2 -import platform -if PY2: - from Queue import Queue -else: - from queue import Queue from threading import Thread, currentThread -from riak.transports.pool import Pool, BadResource from random import SystemRandom from time import sleep -if platform.python_version() < '2.7': - unittest = __import__('unittest2') +from riak import RiakError +from riak.tests import RUN_POOL +from riak.tests.comparison import Comparison +from riak.transports.pool import Pool, BadResource + +if PY2: + from Queue import Queue else: - import unittest -from . import SKIP_POOL -from riak.tests import test_six + from queue import Queue class SimplePool(Pool): @@ -53,10 +49,23 @@ def create_resource(self): return [] -@unittest.skipIf(SKIP_POOL, - 'Skipping connection pool tests') -class PoolTest(unittest.TestCase, - test_six.Comparison): +@unittest.skipUnless(RUN_POOL, 'RUN_POOL is 0') +class PoolTest(unittest.TestCase, Comparison): + + def test_can_raise_bad_resource(self): + ex_msg = 'exception-message!' + with self.assertRaises(BadResource) as cm: + raise BadResource(ex_msg) + ex = cm.exception + self.assertEqual(ex.args[0], ex_msg) + + def test_bad_resource_inner_exception(self): + ex_msg = 'exception-message!' + ex = RiakError(ex_msg) + with self.assertRaises(BadResource) as cm: + raise BadResource(ex) + br_ex = cm.exception + self.assertEqual(br_ex.args[0], ex) def test_yields_new_object_when_empty(self): """ @@ -354,5 +363,6 @@ def _run(): for th in threads: th.join() + if __name__ == '__main__': unittest.main() diff --git a/riak/tests/test_search.py b/riak/tests/test_search.py index fe8a23bd..efc5aa65 100644 --- a/riak/tests/test_search.py +++ b/riak/tests/test_search.py @@ -1,140 +1,169 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # -*- coding: utf-8 -*- from __future__ import print_function -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest -from . import SKIP_SEARCH +import unittest + +from riak.tests import RUN_SEARCH, RUN_YZ +from riak.tests.base import IntegrationTestBase + +testrun_search_bucket = 'searchbucket' + + +def setUpModule(): + if RUN_SEARCH and not RUN_YZ: + c = IntegrationTestBase.create_client() + b = c.bucket(testrun_search_bucket) + b.enable_search() + c.close() + + +def tearDownModule(): + if RUN_SEARCH and not RUN_YZ: + c = IntegrationTestBase.create_client() + b = c.bucket(testrun_search_bucket) + b.clear_properties() + c.close() -class EnableSearchTests(object): - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') +@unittest.skipUnless(RUN_SEARCH, 'RUN_SEARCH is 0') +class EnableSearchTests(IntegrationTestBase, unittest.TestCase): def test_bucket_search_enabled(self): bucket = self.client.bucket(self.bucket_name) self.assertFalse(bucket.search_enabled()) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_enable_search_commit_hook(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.clear_properties() - self.assertFalse(self.create_client(). - bucket(self.search_bucket). - search_enabled()) + + c = self.create_client() + self.assertFalse(c.bucket(testrun_search_bucket).search_enabled()) + c.close() + bucket.enable_search() - self.assertTrue(self.create_client(). - bucket(self.search_bucket). - search_enabled()) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') + c = self.create_client() + self.assertTrue(c.bucket(testrun_search_bucket).search_enabled()) + c.close() + def test_disable_search_commit_hook(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.clear_properties() bucket.enable_search() - self.assertTrue(self.create_client().bucket(self.search_bucket) - .search_enabled()) + + c = self.create_client() + self.assertTrue(c.bucket(testrun_search_bucket).search_enabled()) + c.close() + bucket.disable_search() - self.assertFalse(self.create_client().bucket(self.search_bucket) - .search_enabled()) + + c = self.create_client() + self.assertFalse(c.bucket(testrun_search_bucket).search_enabled()) + c.close() + bucket.enable_search() -class SolrSearchTests(object): - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') +@unittest.skipUnless(RUN_SEARCH, 'RUN_SEARCH is 0') +class SolrSearchTests(IntegrationTestBase, unittest.TestCase): def test_add_document_to_index(self): - self.client.fulltext_add(self.search_bucket, + self.client.fulltext_add(testrun_search_bucket, [{"id": "doc", "username": "tony"}]) - results = self.client.fulltext_search(self.search_bucket, + results = self.client.fulltext_search(testrun_search_bucket, "username:tony") self.assertEqual("tony", results['docs'][0]['username']) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_add_multiple_documents_to_index(self): self.client.fulltext_add( - self.search_bucket, + testrun_search_bucket, [{"id": "dizzy", "username": "dizzy"}, {"id": "russell", "username": "russell"}]) results = self.client.fulltext_search( - self.search_bucket, "username:russell OR username:dizzy") + testrun_search_bucket, "username:russell OR username:dizzy") self.assertEqual(2, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_delete_documents_from_search_by_id(self): self.client.fulltext_add( - self.search_bucket, + testrun_search_bucket, [{"id": "dizzy", "username": "dizzy"}, {"id": "russell", "username": "russell"}]) - self.client.fulltext_delete(self.search_bucket, docs=["dizzy"]) + self.client.fulltext_delete(testrun_search_bucket, docs=["dizzy"]) results = self.client.fulltext_search( - self.search_bucket, "username:russell OR username:dizzy") + testrun_search_bucket, "username:russell OR username:dizzy") self.assertEqual(1, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_delete_documents_from_search_by_query(self): self.client.fulltext_add( - self.search_bucket, + testrun_search_bucket, [{"id": "dizzy", "username": "dizzy"}, {"id": "russell", "username": "russell"}]) self.client.fulltext_delete( - self.search_bucket, + testrun_search_bucket, queries=["username:dizzy", "username:russell"]) results = self.client.fulltext_search( - self.search_bucket, "username:russell OR username:dizzy") + testrun_search_bucket, "username:russell OR username:dizzy") self.assertEqual(0, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_delete_documents_from_search_by_query_and_id(self): self.client.fulltext_add( - self.search_bucket, + testrun_search_bucket, [{"id": "dizzy", "username": "dizzy"}, {"id": "russell", "username": "russell"}]) self.client.fulltext_delete( - self.search_bucket, + testrun_search_bucket, docs=["dizzy"], queries=["username:russell"]) results = self.client.fulltext_search( - self.search_bucket, + testrun_search_bucket, "username:russell OR username:dizzy") self.assertEqual(0, len(results['docs'])) -class SearchTests(object): - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') +@unittest.skipUnless(RUN_SEARCH, 'RUN_SEARCH is 0') +class SearchTests(IntegrationTestBase, unittest.TestCase): def test_solr_search_from_bucket(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.new("user", {"username": "roidrage"}).store() results = bucket.search("username:roidrage") self.assertEqual(1, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_solr_search_with_params_from_bucket(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.new("user", {"username": "roidrage"}).store() results = bucket.search("username:roidrage", wt="xml") self.assertEqual(1, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_solr_search_with_params(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.new("user", {"username": "roidrage"}).store() results = self.client.fulltext_search( - self.search_bucket, + testrun_search_bucket, "username:roidrage", wt="xml") self.assertEqual(1, len(results['docs'])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_solr_search(self): - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.new("user", {"username": "roidrage"}).store() - results = self.client.fulltext_search(self.search_bucket, + results = self.client.fulltext_search(testrun_search_bucket, "username:roidrage") self.assertEqual(1, len(results["docs"])) - @unittest.skipIf(SKIP_SEARCH, 'SKIP_SEARCH is defined') def test_search_integration(self): # Create some objects to search across... - bucket = self.client.bucket(self.search_bucket) + bucket = self.client.bucket(testrun_search_bucket) bucket.new("one", {"foo": "one", "bar": "red"}).store() bucket.new("two", {"foo": "two", "bar": "green"}).store() bucket.new("three", {"foo": "three", "bar": "blue"}).store() @@ -142,7 +171,7 @@ def test_search_integration(self): bucket.new("five", {"foo": "five", "bar": "yellow"}).store() # Run some operations... - results = self.client.fulltext_search(self.search_bucket, + results = self.client.fulltext_search(testrun_search_bucket, "foo:one OR foo:two") if (len(results) == 0): print("\n\nNot running test \"testSearchIntegration()\".\n") @@ -153,6 +182,6 @@ def test_search_integration(self): self.assertEqual(len(results['docs']), 2) query = "(foo:one OR foo:two OR foo:three OR foo:four) AND\ (NOT bar:green)" - results = self.client.fulltext_search(self.search_bucket, query) + results = self.client.fulltext_search(testrun_search_bucket, query) self.assertEqual(len(results['docs']), 3) diff --git a/riak/tests/test_security.py b/riak/tests/test_security.py index b036a94b..d9e1ee10 100644 --- a/riak/tests/test_security.py +++ b/riak/tests/test_security.py @@ -1,48 +1,48 @@ -# -*- coding: utf-8 -*- -""" -Copyright 2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# -*- coding: utf-8 -*- +import sys +import unittest -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest from riak.tests import RUN_SECURITY, SECURITY_USER, SECURITY_PASSWD, \ SECURITY_CACERT, SECURITY_KEY, SECURITY_CERT, SECURITY_REVOKED, \ - SECURITY_CERT_USER, SECURITY_CERT_PASSWD, SECURITY_BAD_CERT + SECURITY_CERT_USER, SECURITY_BAD_CERT, SECURITY_CIPHERS from riak.security import SecurityCreds -from six import PY3 +from riak.tests.base import IntegrationTestBase -class SecurityTests(object): - @unittest.skipIf(RUN_SECURITY, 'RUN_SECURITY is set') +class SecurityTests(IntegrationTestBase, unittest.TestCase): + @unittest.skipIf(RUN_SECURITY, 'RUN_SECURITY is 1') def test_security_disabled(self): - creds = SecurityCreds(username=SECURITY_USER, - password=SECURITY_PASSWD, - cacert_file=SECURITY_CACERT) - client = self.create_client(credentials=creds) + """ + Test valid security settings without security enabled + """ + topts = {'timeout': 1} + # NB: can't use SECURITY_CREDS here since they won't be set + # if RUN_SECURITY is UN-set + creds = SecurityCreds(username='foo', password='bar') + client = self.create_client(credentials=creds, + transport_options=topts) myBucket = client.bucket('test') val1 = "foobar" key1 = myBucket.new('x', data=val1) with self.assertRaises(Exception): key1.store() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_basic_connection(self): myBucket = self.client.bucket('test') val1 = "foobar" @@ -50,44 +50,56 @@ def test_security_basic_connection(self): key1.store() myBucket.get('x') - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_bad_user(self): - creds = SecurityCreds(username='foo', password=SECURITY_PASSWD, - cacert_file=SECURITY_CACERT) + creds = SecurityCreds(username='foo', + password=SECURITY_PASSWD, + cacert_file=SECURITY_CACERT, + ciphers=SECURITY_CIPHERS) client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_bad_password(self): - creds = SecurityCreds(username=SECURITY_USER, password='foo', - cacert_file=SECURITY_CACERT) + creds = SecurityCreds(username=SECURITY_USER, + password='foo', + cacert_file=SECURITY_CACERT, + ciphers=SECURITY_CIPHERS) client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_invalid_cert(self): - creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, - cacert_file='/tmp/foo') + creds = SecurityCreds(username=SECURITY_USER, + password=SECURITY_PASSWD, + cacert_file='/tmp/foo', + ciphers=SECURITY_CIPHERS) client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_password_without_cacert(self): - creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD) + creds = SecurityCreds(username=SECURITY_USER, + password=SECURITY_PASSWD, + ciphers=SECURITY_CIPHERS) client = self.create_client(credentials=creds) with self.assertRaises(Exception): myBucket = client.bucket('test') val1 = "foobar" key1 = myBucket.new('x', data=val1) key1.store() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_cert_authentication(self): creds = SecurityCreds(username=SECURITY_CERT_USER, - password=SECURITY_CERT_PASSWD, + ciphers=SECURITY_CIPHERS, cert_file=SECURITY_CERT, pkey_file=SECURITY_KEY, cacert_file=SECURITY_CACERT) @@ -104,41 +116,48 @@ def test_security_cert_authentication(self): with self.assertRaises(Exception): key1.store() myBucket.get('x') + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_revoked_cert(self): - creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, + creds = SecurityCreds(username=SECURITY_USER, + password=SECURITY_PASSWD, + ciphers=SECURITY_CIPHERS, cacert_file=SECURITY_CACERT, crl_file=SECURITY_REVOKED) - # Curenly Python 3.x native CRL doesn't seem to work - # as advertised - if PY3: + # Currently Python >= 2.7.9 and Python 3.x native CRL doesn't seem to + # work as advertised + if sys.version_info >= (2, 7, 9): return client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_bad_ca_cert(self): creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, + ciphers=SECURITY_CIPHERS, cacert_file=SECURITY_BAD_CERT) client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_ciphers(self): creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, - cacert_file=SECURITY_CACERT, - ciphers='DHE-RSA-AES256-SHA') + ciphers=SECURITY_CIPHERS, + cacert_file=SECURITY_CACERT) client = self.create_client(credentials=creds) myBucket = client.bucket('test') val1 = "foobar" key1 = myBucket.new('x', data=val1) key1.store() myBucket.get('x') + client.close() - @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is not set') + @unittest.skipUnless(RUN_SECURITY, 'RUN_SECURITY is 0') def test_security_bad_ciphers(self): creds = SecurityCreds(username=SECURITY_USER, password=SECURITY_PASSWD, cacert_file=SECURITY_CACERT, @@ -146,3 +165,4 @@ def test_security_bad_ciphers(self): client = self.create_client(credentials=creds) with self.assertRaises(Exception): client.get_buckets() + client.close() diff --git a/riak/tests/test_server_test.py b/riak/tests/test_server_test.py index d02debe6..2b5cfc48 100644 --- a/riak/tests/test_server_test.py +++ b/riak/tests/test_server_test.py @@ -1,7 +1,24 @@ -from riak.test_server import TestServer +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys import unittest +from riak.test_server import TestServer + +@unittest.skipIf(sys.platform == 'win32', 'Windows is not supported') class TestServerTestCase(unittest.TestCase): def setUp(self): self.test_server = TestServer() diff --git a/riak/tests/test_timeseries_pbuf.py b/riak/tests/test_timeseries_pbuf.py new file mode 100644 index 00000000..8cffa1c7 --- /dev/null +++ b/riak/tests/test_timeseries_pbuf.py @@ -0,0 +1,511 @@ +# -*- coding: utf-8 -*- +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import six +import unittest + +import riak.pb.riak_ts_pb2 +from riak.pb.riak_ts_pb2 import TsColumnType + +from riak import RiakError +from riak.codecs.pbuf import PbufCodec +from riak.table import Table +from riak.tests import RUN_TIMESERIES +from riak.tests.base import IntegrationTestBase +from riak.ts_object import TsObject +from riak.util import str_to_bytes, bytes_to_str, \ + unix_time_millis, datetime_from_unix_time_millis, \ + is_timeseries_supported + +table_name = 'GeoCheckin' + +bd0 = '时间序列' +bd1 = 'временные ряды' + +blob0 = b'\x00\x01\x02\x03\x04\x05\x06\x07' + +fiveMins = datetime.timedelta(0, 300) +# NB: last arg is microseconds, 987ms expressed +ts0 = datetime.datetime(2015, 1, 1, 12, 0, 0, 987000) +ex0ms = 1420113600987 + +ts1 = ts0 + fiveMins +ex1ms = 1420113900987 + + +@unittest.skipUnless(is_timeseries_supported(), + 'Timeseries not supported by this Python version') +class TimeseriesPbufUnitTests(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.ts0ms = unix_time_millis(ts0) + if cls.ts0ms != ex0ms: + raise AssertionError( + 'expected {:d} to equal {:d}'.format(cls.ts0ms, ex0ms)) + + cls.ts1ms = unix_time_millis(ts1) + if cls.ts1ms != ex1ms: + raise AssertionError( + 'expected {:d} to equal {:d}'.format(cls.ts1ms, ex1ms)) + + cls.rows = [ + [bd0, 0, 1.2, ts0, True, None], + [bd1, 3, 4.5, ts1, False, blob0] + ] + cls.test_key = ['hash1', 'user2', ts0] + cls.table = Table(None, table_name) + + def validate_keyreq(self, req): + self.assertEqual(self.table.name, bytes_to_str(req.table)) + self.assertEqual(len(self.test_key), len(req.key)) + self.assertEqual('hash1', bytes_to_str(req.key[0].varchar_value)) + self.assertEqual('user2', bytes_to_str(req.key[1].varchar_value)) + self.assertEqual(self.ts0ms, req.key[2].timestamp_value) + + def test_encode_decode_timestamp(self): + ts0ms = unix_time_millis(ts0) + self.assertEqual(ts0ms, ex0ms) + ts0_d = datetime_from_unix_time_millis(ts0ms) + self.assertEqual(ts0, ts0_d) + + def test_encode_data_for_get(self): + c = PbufCodec() + msg = c.encode_timeseries_keyreq( + self.table, self.test_key, is_delete=False) + req = riak.pb.riak_ts_pb2.TsGetReq() + req.ParseFromString(msg.data) + self.validate_keyreq(req) + + def test_encode_data_for_delete(self): + c = PbufCodec() + msg = c.encode_timeseries_keyreq( + self.table, self.test_key, is_delete=True) + req = riak.pb.riak_ts_pb2.TsDelReq() + req.ParseFromString(msg.data) + self.validate_keyreq(req) + + def test_encode_data_for_put(self): + c = PbufCodec() + tsobj = TsObject(None, self.table, self.rows, None) + msg = c.encode_timeseries_put(tsobj) + req = riak.pb.riak_ts_pb2.TsPutReq() + req.ParseFromString(msg.data) + + # NB: expected, actual + self.assertEqual(self.table.name, bytes_to_str(req.table)) + self.assertEqual(len(self.rows), len(req.rows)) + + r0 = req.rows[0] + self.assertEqual(bytes_to_str(r0.cells[0].varchar_value), + self.rows[0][0]) + self.assertEqual(r0.cells[1].sint64_value, self.rows[0][1]) + self.assertEqual(r0.cells[2].double_value, self.rows[0][2]) + self.assertEqual(r0.cells[3].timestamp_value, self.ts0ms) + self.assertEqual(r0.cells[4].boolean_value, self.rows[0][4]) + self.assertFalse(r0.cells[5].HasField('varchar_value')) + + r1 = req.rows[1] + self.assertEqual(bytes_to_str(r1.cells[0].varchar_value), + self.rows[1][0]) + self.assertEqual(r1.cells[1].sint64_value, self.rows[1][1]) + self.assertEqual(r1.cells[2].double_value, self.rows[1][2]) + self.assertEqual(r1.cells[3].timestamp_value, self.ts1ms) + self.assertEqual(r1.cells[4].boolean_value, self.rows[1][4]) + self.assertEqual(r1.cells[5].varchar_value, self.rows[1][5]) + + def test_encode_data_for_listkeys(self): + c = PbufCodec(client_timeouts=True) + msg = c.encode_timeseries_listkeysreq(self.table, 1234) + req = riak.pb.riak_ts_pb2.TsListKeysReq() + req.ParseFromString(msg.data) + self.assertEqual(self.table.name, bytes_to_str(req.table)) + self.assertEqual(1234, req.timeout) + + def test_decode_data_from_query(self): + tqr = riak.pb.riak_ts_pb2.TsQueryResp() + + c0 = tqr.columns.add() + c0.name = str_to_bytes('col_varchar') + c0.type = TsColumnType.Value('VARCHAR') + c1 = tqr.columns.add() + c1.name = str_to_bytes('col_integer') + c1.type = TsColumnType.Value('SINT64') + c2 = tqr.columns.add() + c2.name = str_to_bytes('col_double') + c2.type = TsColumnType.Value('DOUBLE') + c3 = tqr.columns.add() + c3.name = str_to_bytes('col_timestamp') + c3.type = TsColumnType.Value('TIMESTAMP') + c4 = tqr.columns.add() + c4.name = str_to_bytes('col_boolean') + c4.type = TsColumnType.Value('BOOLEAN') + c5 = tqr.columns.add() + c5.name = str_to_bytes('col_blob') + c5.type = TsColumnType.Value('BLOB') + + r0 = tqr.rows.add() + r0c0 = r0.cells.add() + r0c0.varchar_value = str_to_bytes(self.rows[0][0]) + r0c1 = r0.cells.add() + r0c1.sint64_value = self.rows[0][1] + r0c2 = r0.cells.add() + r0c2.double_value = self.rows[0][2] + r0c3 = r0.cells.add() + r0c3.timestamp_value = self.ts0ms + r0c4 = r0.cells.add() + r0c4.boolean_value = self.rows[0][4] + r0.cells.add() + + r1 = tqr.rows.add() + r1c0 = r1.cells.add() + r1c0.varchar_value = str_to_bytes(self.rows[1][0]) + r1c1 = r1.cells.add() + r1c1.sint64_value = self.rows[1][1] + r1c2 = r1.cells.add() + r1c2.double_value = self.rows[1][2] + r1c3 = r1.cells.add() + r1c3.timestamp_value = self.ts1ms + r1c4 = r1.cells.add() + r1c4.boolean_value = self.rows[1][4] + r1c5 = r1.cells.add() + r1c5.varchar_value = self.rows[1][5] + + tsobj = TsObject(None, self.table) + c = PbufCodec() + c.decode_timeseries(tqr, tsobj, True) + + self.assertEqual(len(tsobj.rows), len(self.rows)) + self.assertEqual(len(tsobj.columns.names), len(tqr.columns)) + self.assertEqual(len(tsobj.columns.types), len(tqr.columns)) + + cn, ct = tsobj.columns + self.assertEqual(cn[0], 'col_varchar') + self.assertEqual(ct[0], 'varchar') + self.assertEqual(cn[1], 'col_integer') + self.assertEqual(ct[1], 'sint64') + self.assertEqual(cn[2], 'col_double') + self.assertEqual(ct[2], 'double') + self.assertEqual(cn[3], 'col_timestamp') + self.assertEqual(ct[3], 'timestamp') + self.assertEqual(cn[4], 'col_boolean') + self.assertEqual(ct[4], 'boolean') + self.assertEqual(cn[5], 'col_blob') + self.assertEqual(ct[5], 'blob') + + r0 = tsobj.rows[0] + self.assertEqual(bytes_to_str(r0[0]), self.rows[0][0]) + self.assertEqual(r0[1], self.rows[0][1]) + self.assertEqual(r0[2], self.rows[0][2]) + self.assertEqual(r0[3], ts0) + self.assertEqual(r0[4], self.rows[0][4]) + self.assertEqual(r0[5], self.rows[0][5]) + + r1 = tsobj.rows[1] + self.assertEqual(bytes_to_str(r1[0]), self.rows[1][0]) + self.assertEqual(r1[1], self.rows[1][1]) + self.assertEqual(r1[2], self.rows[1][2]) + self.assertEqual(r1[3], ts1) + self.assertEqual(r1[4], self.rows[1][4]) + self.assertEqual(r1[5], self.rows[1][5]) + + +@unittest.skipUnless(is_timeseries_supported() and RUN_TIMESERIES, + 'Timeseries not supported by this Python version' + ' or RUN_TIMESERIES is 0') +class TimeseriesPbufTests(IntegrationTestBase, unittest.TestCase): + client_options = {'transport_options': + {'use_ttb': False, 'ts_convert_timestamp': True}} + + @classmethod + def setUpClass(cls): + super(TimeseriesPbufTests, cls).setUpClass() + cls.now = datetime.datetime.utcfromtimestamp(144379690.987000) + fiveMinsAgo = cls.now - fiveMins + tenMinsAgo = fiveMinsAgo - fiveMins + fifteenMinsAgo = tenMinsAgo - fiveMins + twentyMinsAgo = fifteenMinsAgo - fiveMins + twentyFiveMinsAgo = twentyMinsAgo - fiveMins + + client = cls.create_client() + table = client.table(table_name) + rows = [ + ['hash1', 'user2', twentyFiveMinsAgo, 'typhoon', 90.3], + ['hash1', 'user2', twentyMinsAgo, 'hurricane', 82.3], + ['hash1', 'user2', fifteenMinsAgo, 'rain', 79.0], + ['hash1', 'user2', fiveMinsAgo, 'wind', None], + ['hash1', 'user2', cls.now, 'snow', 20.1] + ] + try: + ts_obj = table.new(rows) + result = ts_obj.store() + except (RiakError, NotImplementedError) as e: + raise unittest.SkipTest(e) + finally: + client.close() + if result is not True: + raise AssertionError("expected success") + + cls.nowMsec = unix_time_millis(cls.now) + cls.fiveMinsAgo = fiveMinsAgo + cls.twentyMinsAgo = twentyMinsAgo + cls.twentyFiveMinsAgo = twentyFiveMinsAgo + cls.tenMinsAgoMsec = unix_time_millis(tenMinsAgo) + cls.twentyMinsAgoMsec = unix_time_millis(twentyMinsAgo) + cls.numCols = len(rows[0]) + cls.rows = rows + encoded_rows = [ + [str_to_bytes('hash1'), str_to_bytes('user2'), + twentyFiveMinsAgo, str_to_bytes('typhoon'), 90.3], + [str_to_bytes('hash1'), str_to_bytes('user2'), + twentyMinsAgo, str_to_bytes('hurricane'), 82.3], + [str_to_bytes('hash1'), str_to_bytes('user2'), + fifteenMinsAgo, str_to_bytes('rain'), 79.0], + [str_to_bytes('hash1'), str_to_bytes('user2'), + fiveMinsAgo, str_to_bytes('wind'), None], + [str_to_bytes('hash1'), str_to_bytes('user2'), + cls.now, str_to_bytes('snow'), 20.1] + ] + cls.encoded_rows = encoded_rows + + def validate_len(self, ts_obj, elen): + if isinstance(elen, tuple): + self.assertIn(len(ts_obj.columns.names), elen) + self.assertIn(len(ts_obj.columns.types), elen) + self.assertIn(len(ts_obj.rows), elen) + else: + self.assertEqual(len(ts_obj.columns.names), elen) + self.assertEqual(len(ts_obj.columns.types), elen) + self.assertEqual(len(ts_obj.rows), elen) + + def validate_data(self, ts_obj): + if ts_obj.columns is not None: + self.assertEqual(len(ts_obj.columns.names), self.numCols) + self.assertEqual(len(ts_obj.columns.types), self.numCols) + self.assertEqual(len(ts_obj.rows), 1) + row = ts_obj.rows[0] + self.assertEqual(bytes_to_str(row[0]), 'hash1') + self.assertEqual(bytes_to_str(row[1]), 'user2') + self.assertEqual(row[2], self.fiveMinsAgo) + self.assertEqual(row[2].microsecond, 987000) + self.assertEqual(bytes_to_str(row[3]), 'wind') + self.assertIsNone(row[4]) + + def test_insert_data_via_sql(self): + query = """ + INSERT INTO GeoCheckin_Wide + (geohash, user, time, weather, temperature, uv_index, observed) + VALUES + ('hash3', 'user3', 1460203200000, 'tornado', 43.5, 128, True); + """ + ts_obj = self.client.ts_query('GeoCheckin_Wide', query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, 0) + + def test_query_that_creates_table_using_interpolation(self): + table = self.randname() + query = """CREATE TABLE test-{table} ( + geohash varchar not null, + user varchar not null, + time timestamp not null, + weather varchar not null, + temperature double, + PRIMARY KEY((geohash, user, quantum(time, 15, m)), + geohash, user, time)) + """ + ts_obj = self.client.ts_query(table, query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, 0) + + def test_query_that_returns_table_description(self): + fmt = 'DESCRIBE {table}' + query = fmt.format(table=table_name) + ts_obj = self.client.ts_query(table_name, query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_query_that_returns_table_description_using_interpolation(self): + query = 'Describe {table}' + ts_obj = self.client.ts_query(table_name, query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_query_description_via_table(self): + query = 'describe {table}' + table = Table(self.client, table_name) + ts_obj = table.query(query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_get_description(self): + ts_obj = self.client.ts_describe(table_name) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_get_description_via_table(self): + table = Table(self.client, table_name) + ts_obj = table.describe() + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_query_that_returns_no_data(self): + fmt = """ + select * from {table} where + time > 0 and time < 10 and + geohash = 'hash1' and + user = 'user1' + """ + query = fmt.format(table=table_name) + ts_obj = self.client.ts_query(table_name, query) + self.validate_len(ts_obj, 0) + + def test_query_that_returns_no_data_using_interpolation(self): + query = """ + select * from {table} where + time > 0 and time < 10 and + geohash = 'hash1' and + user = 'user1' + """ + ts_obj = self.client.ts_query(table_name, query) + self.validate_len(ts_obj, 0) + + def test_query_that_matches_some_data(self): + fmt = """ + select * from {table} where + time > {t1} and time < {t2} and + geohash = 'hash1' and + user = 'user2' + """ + query = fmt.format( + table=table_name, + t1=self.tenMinsAgoMsec, + t2=self.nowMsec) + ts_obj = self.client.ts_query(table_name, query) + self.validate_data(ts_obj) + + def test_query_that_matches_some_data_using_interpolation(self): + fmt = """ + select * from {{table}} where + time > {t1} and time < {t2} and + geohash = 'hash1' and + user = 'user2' + """ + query = fmt.format( + t1=self.tenMinsAgoMsec, + t2=self.nowMsec) + ts_obj = self.client.ts_query(table_name, query) + self.validate_data(ts_obj) + + def test_query_that_matches_more_data(self): + fmt = """ + select * from {table} where + time >= {t1} and time <= {t2} and + geohash = 'hash1' and + user = 'user2' + """ + query = fmt.format( + table=table_name, + t1=self.twentyMinsAgoMsec, + t2=self.nowMsec) + ts_obj = self.client.ts_query(table_name, query) + j = 0 + for i, want in enumerate(self.encoded_rows): + if want[2] == self.twentyFiveMinsAgo: + continue + got = ts_obj.rows[j] + j += 1 + self.assertListEqual(got, want) + + def test_get_with_invalid_key(self): + key = ['hash1', 'user2'] + with self.assertRaises(RiakError): + self.client.ts_get(table_name, key) + + def test_get_single_value(self): + key = ['hash1', 'user2', self.fiveMinsAgo] + ts_obj = self.client.ts_get(table_name, key) + self.assertIsNotNone(ts_obj) + self.validate_data(ts_obj) + + def test_get_single_value_via_table(self): + key = ['hash1', 'user2', self.fiveMinsAgo] + table = Table(self.client, table_name) + ts_obj = table.get(key) + self.assertIsNotNone(ts_obj) + self.validate_data(ts_obj) + + def test_stream_keys(self): + table = Table(self.client, table_name) + streamed_keys = [] + for keylist in table.stream_keys(): + self.validate_keylist(streamed_keys, keylist) + self.assertGreater(len(streamed_keys), 0) + + def test_stream_keys_from_string_table(self): + streamed_keys = [] + for keylist in self.client.ts_stream_keys(table_name): + self.validate_keylist(streamed_keys, keylist) + self.assertGreater(len(streamed_keys), 0) + + def validate_keylist(self, streamed_keys, keylist): + self.assertNotEqual([], keylist) + streamed_keys += keylist + for key in keylist: + self.assertIsInstance(key, list) + self.assertEqual(len(key), 3) + self.assertEqual(bytes_to_str(key[0]), 'hash1') + self.assertEqual(bytes_to_str(key[1]), 'user2') + self.assertIsInstance(key[2], datetime.datetime) + + def test_delete_single_value(self): + key = ['hash1', 'user2', self.twentyFiveMinsAgo] + rslt = self.client.ts_delete(table_name, key) + self.assertTrue(rslt) + ts_obj = self.client.ts_get(table_name, key) + self.assertIsNotNone(ts_obj) + self.assertEqual(len(ts_obj.rows), 0) + self.assertEqual(len(ts_obj.columns.names), 0) + self.assertEqual(len(ts_obj.columns.types), 0) + + def test_create_error_via_put(self): + table = Table(self.client, table_name) + ts_obj = table.new([]) + with self.assertRaises(RiakError): + ts_obj.store() + + def test_store_and_fetch_gh_483(self): + now = datetime.datetime(2015, 1, 1, 12, 0, 0) + table = self.client.table(table_name) + rows = [ + ['hash1', 'user2', now, 'frazzle', 12.3] + ] + + ts_obj = table.new(rows) + result = ts_obj.store() + self.assertTrue(result) + + k = ['hash1', 'user2', now] + ts_obj = self.client.ts_get(table_name, k) + self.assertIsNotNone(ts_obj) + ts_cols = ts_obj.columns + self.assertEqual(len(ts_cols.names), 5) + self.assertEqual(len(ts_cols.types), 5) + self.assertEqual(len(ts_obj.rows), 1) + + row = ts_obj.rows[0] + self.assertEqual(len(row), 5) + exp = [six.b('hash1'), six.b('user2'), now, + six.b('frazzle'), 12.3] + self.assertEqual(row, exp) diff --git a/riak/tests/test_timeseries_ttb.py b/riak/tests/test_timeseries_ttb.py new file mode 100644 index 00000000..d2434799 --- /dev/null +++ b/riak/tests/test_timeseries_ttb.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import logging +import six +import unittest + +from erlastic import decode, encode +from erlastic.types import Atom + +from riak import RiakError +from riak.table import Table +from riak.tests import RUN_TIMESERIES +from riak.ts_object import TsObject +from riak.codecs.ttb import TtbCodec +from riak.util import str_to_bytes, bytes_to_str, \ + unix_time_millis, is_timeseries_supported +from riak.tests.base import IntegrationTestBase + +rpberrorresp_a = Atom('rpberrorresp') +tsgetreq_a = Atom('tsgetreq') +tsgetresp_a = Atom('tsgetresp') +tsputreq_a = Atom('tsputreq') + +udef_a = Atom('undefined') +varchar_a = Atom('varchar') +sint64_a = Atom('sint64') +double_a = Atom('double') +timestamp_a = Atom('timestamp') +boolean_a = Atom('boolean') + +table_name = 'GeoCheckin' + +str0 = 'ascii-0' +str1 = 'ascii-1' + +bd0 = six.u('时间序列') +bd1 = six.u('временные ряды') + +blob0 = b'\x00\x01\x02\x03\x04\x05\x06\x07' + +fiveMins = datetime.timedelta(0, 300) +ts0 = datetime.datetime(2015, 1, 1, 12, 1, 2, 987000) +ts1 = ts0 + fiveMins + + +@unittest.skipUnless(is_timeseries_supported(), + 'Timeseries not supported by this Python version') +class TimeseriesTtbUnitTests(unittest.TestCase): + def setUp(self): + self.table = Table(None, table_name) + + def test_encode_data_for_get(self): + keylist = [ + str_to_bytes('hash1'), str_to_bytes('user2'), unix_time_millis(ts0) + ] + req = tsgetreq_a, str_to_bytes(table_name), keylist, udef_a + req_test = encode(req) + + test_key = ['hash1', 'user2', ts0] + c = TtbCodec() + msg = c.encode_timeseries_keyreq(self.table, test_key) + self.assertEqual(req_test, msg.data) + + # {tsgetresp, + # { + # [<<"geohash">>, <<"user">>, <<"time">>, + # <<"weather">>, <<"temperature">>, <<"blob">>], + # [varchar, varchar, timestamp, varchar, double, blob], + # [(<<"hash1">>, <<"user2">>, 144378190987, <<"typhoon">>, + # 90.3, <<0,1,2,3,4,5,6,7>>)] + # } + # } + def test_decode_data_from_get(self): + colnames = ["varchar", "sint64", "double", "timestamp", + "boolean", "varchar", "varchar", "blob"] + coltypes = [varchar_a, sint64_a, double_a, timestamp_a, + boolean_a, varchar_a, varchar_a] + r0 = (bd0, 0, 1.2, unix_time_millis(ts0), True, + [], str1, None, None) + r1 = (bd1, 3, 4.5, unix_time_millis(ts1), False, + [], str1, None, blob0) + rows = [r0, r1] + # { tsgetresp, { [colnames], [coltypes], [rows] } } + data_t = colnames, coltypes, rows + rsp_data = tsgetresp_a, data_t + rsp_ttb = encode(rsp_data) + + tsobj = TsObject(None, self.table) + c = TtbCodec() + c.decode_timeseries(decode(rsp_ttb), tsobj) + + for i in range(0, 1): + dr = rows[i] + r = tsobj.rows[i] # encoded + self.assertEqual(r[0], dr[0].encode('utf-8')) + self.assertEqual(r[1], dr[1]) + self.assertEqual(r[2], dr[2]) + # NB *not* decoding timestamps + # dt = datetime_from_unix_time_millis(dr[3]) + self.assertEqual(r[3], dr[3]) + if i == 0: + self.assertEqual(r[4], True) + else: + self.assertEqual(r[4], False) + self.assertEqual(r[5], None) + self.assertEqual(r[6], dr[6].encode('ascii')) + self.assertEqual(r[7], None) + self.assertEqual(r[8], dr[8]) + + def test_encode_data_for_put(self): + r0 = (bd0, 0, 1.2, unix_time_millis(ts0), True, []) + r1 = (bd1, 3, 4.5, unix_time_millis(ts1), False, []) + rows = [r0, r1] + req = tsputreq_a, str_to_bytes(table_name), [], rows + req_test = encode(req) + + rows_to_encode = [ + [bd0, 0, 1.2, ts0, True, None], + [bd1, 3, 4.5, ts1, False, None] + ] + + tsobj = TsObject(None, self.table, rows_to_encode, None) + c = TtbCodec() + msg = c.encode_timeseries_put(tsobj) + self.assertEqual(req_test, msg.data) + + +@unittest.skipUnless(is_timeseries_supported() and RUN_TIMESERIES, + 'Timeseries not supported by this Python version' + ' or RUN_TIMESERIES is 0') +class TimeseriesTtbTests(IntegrationTestBase, unittest.TestCase): + client_options = {'transport_options': + {'use_ttb': True, 'ts_convert_timestamp': True}} + + @classmethod + def setUpClass(cls): + super(TimeseriesTtbTests, cls).setUpClass() + client = cls.create_client() + skey = 'test-key' + btype = client.bucket_type(table_name) + bucket = btype.bucket(table_name) + try: + bucket.get(skey) + except (RiakError, NotImplementedError) as e: + raise unittest.SkipTest(e) + finally: + client.close() + + def validate_len(self, ts_obj, elen): + if isinstance(elen, tuple): + self.assertIn(len(ts_obj.columns.names), elen) + self.assertIn(len(ts_obj.columns.types), elen) + self.assertIn(len(ts_obj.rows), elen) + else: + self.assertEqual(len(ts_obj.columns.names), elen) + self.assertEqual(len(ts_obj.columns.types), elen) + self.assertEqual(len(ts_obj.rows), elen) + + def test_insert_data_via_sql(self): + query = """ + INSERT INTO GeoCheckin_Wide + (geohash, user, time, weather, temperature, uv_index, observed) + VALUES + ('hash3', 'user3', 1460203200000, 'tornado', 43.5, 128, True); + """ + ts_obj = self.client.ts_query('GeoCheckin_Wide', query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, 0) + + def test_query_that_creates_table_using_interpolation(self): + table = self.randname() + query = """CREATE TABLE test-{table} ( + geohash varchar not null, + user varchar not null, + time timestamp not null, + weather varchar not null, + temperature double, + PRIMARY KEY((geohash, user, quantum(time, 15, m)), + geohash, user, time)) + """ + ts_obj = self.client.ts_query(table, query) + self.assertIsNotNone(ts_obj) + self.assertFalse(hasattr(ts_obj, 'ts_cols')) + self.assertIsNone(ts_obj.rows) + + def test_query_that_returns_table_description(self): + fmt = 'DESCRIBE {table}' + query = fmt.format(table=table_name) + ts_obj = self.client.ts_query(table_name, query) + self.assertIsNotNone(ts_obj) + self.validate_len(ts_obj, (5, 7, 8)) + + def test_store_and_fetch_gh_483(self): + now = datetime.datetime(2015, 1, 1, 12, 0, 0) + table = self.client.table(table_name) + rows = [ + ['hash1', 'user2', now, 'frazzle', 12.3] + ] + + ts_obj = table.new(rows) + result = ts_obj.store() + self.assertTrue(result) + + k = ['hash1', 'user2', now] + ts_obj = self.client.ts_get(table_name, k) + self.assertIsNotNone(ts_obj) + ts_cols = ts_obj.columns + self.assertEqual(len(ts_cols.names), 5) + self.assertEqual(len(ts_cols.types), 5) + self.assertEqual(len(ts_obj.rows), 1) + + row = ts_obj.rows[0] + self.assertEqual(len(row), 5) + exp = [six.b('hash1'), six.b('user2'), now, + six.b('frazzle'), 12.3] + self.assertEqual(row, exp) + + def test_store_and_fetch_and_query(self): + now = datetime.datetime.utcfromtimestamp(144379690.987000) + fiveMinsAgo = now - fiveMins + tenMinsAgo = fiveMinsAgo - fiveMins + fifteenMinsAgo = tenMinsAgo - fiveMins + twentyMinsAgo = fifteenMinsAgo - fiveMins + twentyFiveMinsAgo = twentyMinsAgo - fiveMins + + table = self.client.table(table_name) + rows = [ + ['hash1', 'user2', twentyFiveMinsAgo, 'typhoon', 90.3], + ['hash1', 'user2', twentyMinsAgo, 'hurricane', 82.3], + ['hash1', 'user2', fifteenMinsAgo, 'rain', 79.0], + ['hash1', 'user2', fiveMinsAgo, 'wind', None], + ['hash1', 'user2', now, 'snow', 20.1] + ] + # NB: response data is binary + exp_rows = [ + [six.b('hash1'), six.b('user2'), twentyFiveMinsAgo, + six.b('typhoon'), 90.3], + [six.b('hash1'), six.b('user2'), twentyMinsAgo, + six.b('hurricane'), 82.3], + [six.b('hash1'), six.b('user2'), fifteenMinsAgo, + six.b('rain'), 79.0], + [six.b('hash1'), six.b('user2'), fiveMinsAgo, + six.b('wind'), None], + [six.b('hash1'), six.b('user2'), now, + six.b('snow'), 20.1] + ] + ts_obj = table.new(rows) + result = ts_obj.store() + self.assertTrue(result) + + for i, r in enumerate(rows): + k = r[0:3] + ts_obj = self.client.ts_get(table_name, k) + self.assertIsNotNone(ts_obj) + ts_cols = ts_obj.columns + self.assertEqual(len(ts_cols.names), 5) + self.assertEqual(len(ts_cols.types), 5) + self.assertEqual(len(ts_obj.rows), 1) + row = ts_obj.rows[0] + exp = exp_rows[i] + self.assertEqual(len(row), 5) + self.assertEqual(row, exp) + + fmt = """ + select * from {table} where + time > {t1} and time < {t2} and + geohash = 'hash1' and + user = 'user2' + """ + query = fmt.format( + table=table_name, + t1=unix_time_millis(tenMinsAgo), + t2=unix_time_millis(now)) + ts_obj = self.client.ts_query(table_name, query) + if ts_obj.columns is not None: + self.assertEqual(len(ts_obj.columns.names), 5) + self.assertEqual(len(ts_obj.columns.types), 5) + self.assertEqual(len(ts_obj.rows), 1) + row = ts_obj.rows[0] + self.assertEqual(bytes_to_str(row[0]), 'hash1') + self.assertEqual(bytes_to_str(row[1]), 'user2') + self.assertEqual(row[2], fiveMinsAgo) + self.assertEqual(row[2].microsecond, 987000) + self.assertEqual(bytes_to_str(row[3]), 'wind') + self.assertIsNone(row[4]) + + def test_create_error_via_put(self): + table = Table(self.client, table_name) + ts_obj = table.new([]) + with self.assertRaises(RiakError) as cm: + ts_obj.store() + logging.debug( + "[test_timeseries_ttb] saw exception: {}" + .format(cm.exception)) diff --git a/riak/tests/test_util.py b/riak/tests/test_util.py new file mode 100644 index 00000000..766c82fa --- /dev/null +++ b/riak/tests/test_util.py @@ -0,0 +1,94 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import unittest + +from riak.util import is_timeseries_supported, \ + datetime_from_unix_time_millis, \ + unix_time_millis + + +class UtilUnitTests(unittest.TestCase): + # NB: + # 144379690 secs, 987 msecs past epoch + # 144379690987 total msecs past epoch + def test_conv_ms_timestamp_to_datetime_and_back(self): + if is_timeseries_supported(): + # this is what would be stored in Riak TS + v = 144379690987 + dt = datetime_from_unix_time_millis(v) + + # This is how Python represents the above + utp = 144379690.987000 + dtp = datetime.datetime.utcfromtimestamp(utp) + self.assertEqual(dt, dtp) + + utm = unix_time_millis(dt) + self.assertEqual(v, utm) + else: + pass + + def test_conv_datetime_to_unix_millis(self): + # This is the "native" Python unix timestamp including + # microseconds, as float. timedelta "total_seconds()" + # returns a value like this + if is_timeseries_supported(): + v = 144379690.987000 + d = datetime.datetime.utcfromtimestamp(v) + utm = unix_time_millis(d) + self.assertEqual(utm, 144379690987) + else: + pass + + def test_unix_millis_validation(self): + v = 144379690.987 + with self.assertRaises(ValueError): + datetime_from_unix_time_millis(v) + + def test_unix_millis_small_value(self): + if is_timeseries_supported(): + # this is what would be stored in Riak TS + v = 1001 + dt = datetime_from_unix_time_millis(v) + + # This is how Python represents the above + utp = 1.001 + dtp = datetime.datetime.utcfromtimestamp(utp) + self.assertEqual(dt, dtp) + + utm = unix_time_millis(dt) + self.assertEqual(v, utm) + else: + pass + + def test_is_timeseries_supported(self): + v = (2, 7, 10) + self.assertEqual(True, is_timeseries_supported(v)) + v = (2, 7, 11) + self.assertEqual(True, is_timeseries_supported(v)) + v = (2, 7, 12) + self.assertEqual(True, is_timeseries_supported(v)) + v = (3, 3, 6) + self.assertEqual(False, is_timeseries_supported(v)) + v = (3, 4, 3) + self.assertEqual(False, is_timeseries_supported(v)) + v = (3, 4, 4) + self.assertEqual(True, is_timeseries_supported(v)) + v = (3, 4, 5) + self.assertEqual(True, is_timeseries_supported(v)) + v = (3, 5, 0) + self.assertEqual(False, is_timeseries_supported(v)) + v = (3, 5, 1) + self.assertEqual(True, is_timeseries_supported(v)) diff --git a/riak/tests/test_yokozuna.py b/riak/tests/test_yokozuna.py index 1439373e..a1823774 100644 --- a/riak/tests/test_yokozuna.py +++ b/riak/tests/test_yokozuna.py @@ -1,11 +1,24 @@ # -*- coding: utf-8 -*- -import platform -if platform.python_version() < '2.7': - unittest = __import__('unittest2') -else: - import unittest - -from . import RUN_YZ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from riak.tests import RUN_YZ +from riak.tests.base import IntegrationTestBase +from riak.tests.comparison import Comparison +from riak.tests.yz_setup import yzSetUp, yzTearDown def wait_for_yz_index(bucket, key, index=None): @@ -21,10 +34,26 @@ def wait_for_yz_index(bucket, key, index=None): pass -class YZSearchTests(object): - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') +# YZ index on bucket of the same name +testrun_yz = {'btype': None, 'bucket': 'yzbucket', 'index': 'yzbucket'} +# YZ index on bucket of a different name +testrun_yz_index = {'btype': None, + 'bucket': 'yzindexbucket', + 'index': 'yzindex'} + + +def setUpModule(): + yzSetUp(testrun_yz, testrun_yz_index) + + +def tearDownModule(): + yzTearDown(testrun_yz, testrun_yz_index) + + +@unittest.skipUnless(RUN_YZ, 'RUN_YZ is 0') +class YZSearchTests(IntegrationTestBase, unittest.TestCase, Comparison): def test_yz_search_from_bucket(self): - bucket = self.client.bucket(self.yz['bucket']) + bucket = self.client.bucket(testrun_yz['bucket']) bucket.new("user", {"user_s": "Z"}).store() wait_for_yz_index(bucket, "user") results = bucket.search("user_s:Z") @@ -34,63 +63,58 @@ def test_yz_search_from_bucket(self): self.assertIn('_yz_rk', result) self.assertEqual(u'user', result['_yz_rk']) self.assertIn('_yz_rb', result) - self.assertEqual(self.yz['bucket'], result['_yz_rb']) + self.assertEqual(testrun_yz['bucket'], result['_yz_rb']) self.assertIn('score', result) self.assertIn('user_s', result) self.assertEqual(u'Z', result['user_s']) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_search_index_using_bucket(self): - bucket = self.client.bucket(self.yz_index['bucket']) + bucket = self.client.bucket(testrun_yz_index['bucket']) bucket.new("feliz", {"name_s": "Felix", "species_s": "Felis catus"}).store() - wait_for_yz_index(bucket, "feliz", index=self.yz_index['index']) - results = bucket.search('name_s:Felix', index=self.yz_index['index']) + wait_for_yz_index(bucket, "feliz", index=testrun_yz_index['index']) + results = bucket.search('name_s:Felix', + index=testrun_yz_index['index']) self.assertEqual(1, len(results['docs'])) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_search_index_using_wrong_bucket(self): - bucket = self.client.bucket(self.yz_index['bucket']) + bucket = self.client.bucket(testrun_yz_index['bucket']) bucket.new("feliz", {"name_s": "Felix", "species_s": "Felis catus"}).store() - wait_for_yz_index(bucket, "feliz", index=self.yz_index['index']) + wait_for_yz_index(bucket, "feliz", index=testrun_yz_index['index']) with self.assertRaises(Exception): bucket.search('name_s:Felix') - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_get_search_index(self): - index = self.client.get_search_index(self.yz['bucket']) - self.assertEqual(self.yz['bucket'], index['name']) + index = self.client.get_search_index(testrun_yz['bucket']) + self.assertEqual(testrun_yz['bucket'], index['name']) self.assertEqual('_yz_default', index['schema']) self.assertEqual(3, index['n_val']) with self.assertRaises(Exception): - self.client.get_search_index('NOT' + self.yz['bucket']) + self.client.get_search_index('NOT' + testrun_yz['bucket']) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_delete_search_index(self): # expected to fail, since there's an attached bucket with self.assertRaises(Exception): - self.client.delete_search_index(self.yz['bucket']) + self.client.delete_search_index(testrun_yz['bucket']) # detatch bucket from index then delete - b = self.client.bucket(self.yz['bucket']) + b = self.client.bucket(testrun_yz['bucket']) b.set_property('search_index', '_dont_index_') - self.assertTrue(self.client.delete_search_index(self.yz['bucket'])) + self.assertTrue(self.client.delete_search_index(testrun_yz['bucket'])) # create it again - self.client.create_search_index(self.yz['bucket'], '_yz_default', 3) - b = self.client.bucket(self.yz['bucket']) - b.set_property('search_index', self.yz['bucket']) + self.client.create_search_index(testrun_yz['bucket'], '_yz_default', 3) + b = self.client.bucket(testrun_yz['bucket']) + b.set_property('search_index', testrun_yz['bucket']) # Wait for index to apply indexes = [] - while self.yz['bucket'] not in indexes: + while testrun_yz['bucket'] not in indexes: indexes = [i['name'] for i in self.client.list_search_indexes()] - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_list_search_indexes(self): indexes = self.client.list_search_indexes() - self.assertIn(self.yz['bucket'], [item['name'] for item in indexes]) + self.assertIn(testrun_yz['bucket'], [item['name'] for item in indexes]) self.assertLessEqual(1, len(indexes)) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_create_schema(self): content = """ @@ -126,7 +150,6 @@ def test_yz_create_schema(self): self.assertEqual(schema_name, schema['name']) self.assertEqual(content, schema['content']) - @unittest.skipUnless(RUN_YZ, 'RUN_YZ is undefined') def test_yz_create_bad_schema(self): bad_content = """ `. + :class:`Transport `. """ def _server_version(self): @@ -192,6 +190,22 @@ def datatypes(self): """ return self.server_version >= versions[2.0] + def preflists(self): + """ + Whether bucket/key preflists are supported. + + :rtype: bool + """ + return self.server_version >= versions[2.1] + + def write_once(self): + """ + Whether write-once operations are supported. + + :rtype: bool + """ + return self.server_version >= versions[2.1] + @lazy_property def server_version(self): return LooseVersion(self._server_version()) diff --git a/riak/transports/http/__init__.py b/riak/transports/http/__init__.py index 1d073604..68797b7b 100644 --- a/riak/transports/http/__init__.py +++ b/riak/transports/http/__init__.py @@ -1,34 +1,40 @@ -""" -Copyright 2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import socket import select + from six import PY2 -if PY2: +from riak.security import SecurityError, USE_STDLIB_SSL +from riak.transports.pool import Pool +from riak.transports.http.transport import HttpTransport + +if USE_STDLIB_SSL: + import ssl + from riak.transports.security import configure_ssl_context +else: import OpenSSL.SSL + from riak.transports.security import RiakWrappedSocket,\ + configure_pyopenssl_context + +if PY2: from httplib import HTTPConnection, \ NotConnected, \ IncompleteRead, \ ImproperConnectionState, \ BadStatusLine, \ HTTPSConnection - from riak.transports.security import RiakWrappedSocket,\ - configure_pyopenssl_context else: from http.client import HTTPConnection, \ HTTPSConnection, \ @@ -36,12 +42,6 @@ IncompleteRead, \ ImproperConnectionState, \ BadStatusLine - import ssl - from riak.transports.security import configure_ssl_context - -from riak.security import SecurityError -from riak.transports.pool import Pool -from riak.transports.http.transport import RiakHttpTransport class NoNagleHTTPConnection(HTTPConnection): @@ -85,11 +85,21 @@ def __init__(self, :type timeout: int """ if PY2: + # NB: it appears that pkey_file / cert_file are never set + # in riak/transports/http/connection.py#_connect() method + pkf = pkey_file + if pkf is None and credentials is not None: + pkf = credentials._pkey_file + + cf = cert_file + if cf is None and credentials is not None: + cf = credentials._cert_file + HTTPSConnection.__init__(self, host, port, - key_file=pkey_file, - cert_file=cert_file) + key_file=pkf, + cert_file=cf) else: super(RiakHTTPSConnection, self). \ __init__(host=host, @@ -106,7 +116,7 @@ def connect(self): Connect to a host on a given (SSL) port using PyOpenSSL. """ sock = socket.create_connection((self.host, self.port), self.timeout) - if PY2: + if not USE_STDLIB_SSL: ssl_ctx = configure_pyopenssl_context(self.credentials) # attempt to upgrade the socket to TLS @@ -126,18 +136,19 @@ def connect(self): self.credentials._check_revoked_cert(self.sock) else: ssl_ctx = configure_ssl_context(self.credentials) - host = "riak@" + self.host + if self.timeout is not None: + sock.settimeout(self.timeout) self.sock = ssl.SSLSocket(sock=sock, keyfile=self.credentials.pkey_file, certfile=self.credentials.cert_file, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.credentials.cacert_file, ciphers=self.credentials.ciphers, - server_hostname=host) + server_hostname=self.host) self.sock.context = ssl_ctx -class RiakHttpPool(Pool): +class HttpPool(Pool): """ A pool of HTTP(S) transport connections. """ @@ -148,14 +159,14 @@ def __init__(self, client, **options): if self.client._credentials: self.connection_class = RiakHTTPSConnection - super(RiakHttpPool, self).__init__() + super(HttpPool, self).__init__() def create_resource(self): node = self.client._choose_node() - return RiakHttpTransport(node=node, - client=self.client, - connection_class=self.connection_class, - **self.options) + return HttpTransport(node=node, + client=self.client, + connection_class=self.connection_class, + **self.options) def destroy_resource(self, transport): transport.close() diff --git a/riak/transports/http/connection.py b/riak/transports/http/connection.py index db7689e8..d1c16281 100644 --- a/riak/transports/http/connection.py +++ b/riak/transports/http/connection.py @@ -1,33 +1,31 @@ -""" -Copyright 2012 Basho Technologies, Inc. +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +import base64 from six import PY2 +from riak.util import str_to_bytes + if PY2: from httplib import NotConnected, HTTPConnection else: from http.client import NotConnected, HTTPConnection -import base64 -from riak.util import str_to_bytes -class RiakHttpConnection(object): +class HttpConnection(object): """ - Connection and low-level request methods for RiakHttpTransport. + Connection and low-level request methods for HttpTransport. """ def _request(self, method, uri, headers={}, body='', stream=False): @@ -47,7 +45,10 @@ def _request(self, method, uri, headers={}, body='', stream=False): try: self._connection.request(method, uri, body, headers) - response = self._connection.getresponse() + try: + response = self._connection.getresponse(buffering=True) + except TypeError: + response = self._connection.getresponse() if stream: # The caller is responsible for fully reading the @@ -65,14 +66,21 @@ def _connect(self): """ Use the appropriate connection class; optionally with security. """ + timeout = None + if self._options is not None and 'timeout' in self._options: + timeout = self._options['timeout'] + if self._client._credentials: - self._connection = \ - self._connection_class(self._node.host, - self._node.http_port, - self._client._credentials) + self._connection = self._connection_class( + host=self._node.host, + port=self._node.http_port, + credentials=self._client._credentials, + timeout=timeout) else: - self._connection = self._connection_class(self._node.host, - self._node.http_port) + self._connection = self._connection_class( + host=self._node.host, + port=self._node.http_port, + timeout=timeout) # Forces the population of stats and resources before any # other requests are made. self.server_version @@ -86,7 +94,7 @@ def close(self): except NotConnected: pass - # These are set by the RiakHttpTransport initializer + # These are set by the HttpTransport initializer _connection_class = HTTPConnection _node = None diff --git a/riak/transports/http/resources.py b/riak/transports/http/resources.py index 7075952a..2b53ae7c 100644 --- a/riak/transports/http/resources.py +++ b/riak/transports/http/resources.py @@ -1,34 +1,32 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re + from six import PY2 +from riak import RiakError +from riak.util import lazy_property, bytes_to_str + if PY2: from urllib import quote_plus, urlencode else: from urllib.parse import quote_plus, urlencode -from riak import RiakError -from riak.util import lazy_property, bytes_to_str -class RiakHttpResources(object): +class HttpResources(object): """ - Methods for RiakHttpTransport related to URL generation, i.e. + Methods for HttpTransport related to URL generation, i.e. creating the proper paths. """ @@ -172,6 +170,30 @@ def datatypes_path(self, bucket_type, bucket, key=None, **options): return mkpath("/types", quote_plus(bucket_type), "buckets", quote_plus(bucket), "datatypes", key, **options) + def preflist_path(self, bucket, key, bucket_type=None, **options): + """ + Generate the URL for bucket/key preflist information + + :param bucket: Name of a Riak bucket + :type bucket: string + :param key: Name of a Key + :type key: string + :param bucket_type: Optional Riak Bucket Type + :type bucket_type: None or string + :rtype URL string + """ + if not self.riak_kv_wm_preflist: + raise RiakError("Preflists are unsupported by this Riak node") + if self.riak_kv_wm_bucket_type and bucket_type: + return mkpath("/types", quote_plus(bucket_type), + "buckets", quote_plus(bucket), + "keys", quote_plus(key), + "preflist", **options) + else: + return mkpath("/buckets", quote_plus(bucket), + "keys", quote_plus(key), + "preflist", **options) + # Feature detection overrides def bucket_types(self): return self.riak_kv_wm_bucket_type is not None @@ -180,7 +202,7 @@ def index_term_regex(self): if self.riak_kv_wm_bucket_type is not None: return True else: - return super(RiakHttpResources, self).index_term_regex() + return super(HttpResources, self).index_term_regex() # Resource root paths @lazy_property @@ -225,6 +247,10 @@ def riak_solr_indexer_wm(self): def riak_kv_wm_counter(self): return self.resources.get('riak_kv_wm_counter') + @lazy_property + def riak_kv_wm_preflist(self): + return self.resources.get('riak_kv_wm_preflist') + @lazy_property def yz_wm_search(self): return self.resources.get('yz_wm_search') @@ -264,7 +290,7 @@ def mkpath(*segments, **query): if query[key] in [False, True]: _query[key] = str(query[key]).lower() elif query[key] is not None: - if PY2 and isinstance(query[key], unicode): + if PY2 and isinstance(query[key], unicode): # noqa _query[key] = query[key].encode('utf-8') else: _query[key] = query[key] diff --git a/riak/transports/http/search.py b/riak/transports/http/search.py index 4e6c69e6..d43688f6 100644 --- a/riak/transports/http/search.py +++ b/riak/transports/http/search.py @@ -1,3 +1,18 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + class XMLSearchResult(object): # Match tags that are document fields fieldtags = ['str', 'int', 'date'] diff --git a/riak/transports/http/stream.py b/riak/transports/http/stream.py index edb1c818..590565f2 100644 --- a/riak/transports/http/stream.py +++ b/riak/transports/http/stream.py @@ -1,23 +1,20 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import json import re + from cgi import parse_header from email import message_from_string from riak.util import decode_index_value @@ -26,7 +23,7 @@ from six import PY2 -class RiakHttpStream(object): +class HttpStream(object): """ Base class for HTTP streaming iterators. """ @@ -66,7 +63,7 @@ def close(self): self.resource.release() -class RiakHttpJsonStream(RiakHttpStream): +class HttpJsonStream(HttpStream): _json_field = None def next(self): @@ -92,26 +89,26 @@ def __next__(self): return self.next() -class RiakHttpKeyStream(RiakHttpJsonStream): +class HttpKeyStream(HttpJsonStream): """ Streaming iterator for list-keys over HTTP """ _json_field = u'keys' -class RiakHttpBucketStream(RiakHttpJsonStream): +class HttpBucketStream(HttpJsonStream): """ Streaming iterator for list-buckets over HTTP """ _json_field = u'buckets' -class RiakHttpMultipartStream(RiakHttpStream): +class HttpMultipartStream(HttpStream): """ Streaming iterator for multipart messages over HTTP """ def __init__(self, response): - super(RiakHttpMultipartStream, self).__init__(response) + super(HttpMultipartStream, self).__init__(response) ctypehdr = response.getheader('content-type') _, params = parse_header(ctypehdr) self.boundary_re = re.compile('\r?\n--%s(?:--)?\r?\n' % @@ -154,13 +151,13 @@ def read_until_boundary(self): self._read() -class RiakHttpMapReduceStream(RiakHttpMultipartStream): +class HttpMapReduceStream(HttpMultipartStream): """ Streaming iterator for MapReduce over HTTP """ def next(self): - message = super(RiakHttpMapReduceStream, self).next() + message = super(HttpMapReduceStream, self).next() payload = json.loads(message.get_payload()) return payload['phase'], payload['data'] @@ -169,18 +166,18 @@ def __next__(self): return self.next() -class RiakHttpIndexStream(RiakHttpMultipartStream): +class HttpIndexStream(HttpMultipartStream): """ Streaming iterator for secondary indexes over HTTP """ def __init__(self, response, index, return_terms): - super(RiakHttpIndexStream, self).__init__(response) + super(HttpIndexStream, self).__init__(response) self.index = index self.return_terms = return_terms def next(self): - message = super(RiakHttpIndexStream, self).next() + message = super(HttpIndexStream, self).next() payload = json.loads(message.get_payload()) if u'error' in payload: raise RiakError(payload[u'error']) diff --git a/riak/transports/http/transport.py b/riak/transports/http/transport.py index 7214fbe0..7cba2681 100644 --- a/riak/transports/http/transport.py +++ b/riak/transports/http/transport.py @@ -1,23 +1,16 @@ -""" -Copyright 2012 Basho Technologies, Inc. -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. try: import simplejson as json @@ -25,29 +18,31 @@ import json from six import PY2 -if PY2: - from httplib import HTTPConnection -else: - from http.client import HTTPConnection from xml.dom.minidom import Document -from riak.transports.transport import RiakTransport -from riak.transports.http.resources import RiakHttpResources -from riak.transports.http.connection import RiakHttpConnection -from riak.transports.http.codec import RiakHttpCodec -from riak.transports.http.stream import ( - RiakHttpKeyStream, - RiakHttpMapReduceStream, - RiakHttpBucketStream, - RiakHttpIndexStream) + from riak import RiakError +from riak.codecs.http import HttpCodec +from riak.transports.transport import Transport +from riak.transports.http.resources import HttpResources +from riak.transports.http.connection import HttpConnection +from riak.transports.http.stream import ( + HttpKeyStream, + HttpMapReduceStream, + HttpBucketStream, + HttpIndexStream) from riak.security import SecurityError from riak.util import decode_index_value, bytes_to_str, str_to_long +if PY2: + from httplib import HTTPConnection +else: + from http.client import HTTPConnection -class RiakHttpTransport(RiakHttpConnection, RiakHttpResources, RiakHttpCodec, - RiakTransport): + +class HttpTransport(Transport, + HttpConnection, HttpResources, HttpCodec): """ - The RiakHttpTransport object holds information necessary to + The HttpTransport object holds information necessary to connect to Riak via HTTP. """ @@ -55,16 +50,17 @@ def __init__(self, node=None, client=None, connection_class=HTTPConnection, client_id=None, - **unused_options): + **options): """ Construct a new HTTP connection to Riak. """ - super(RiakHttpTransport, self).__init__() + super(HttpTransport, self).__init__() self._client = client self._node = node self._connection_class = connection_class self._client_id = client_id + self._options = options if not self._client_id: self._client_id = self.make_random_client_id() self._connect() @@ -91,7 +87,11 @@ def stats(self): def _server_version(self): stats = self.stats() if stats is not None: - return stats['riak_kv_version'] + s = stats['riak_kv_version'] + if s.startswith('riak_ts-'): + return stats['riak_pb_version'] + else: + return s # If stats is disabled, we can't assume the Riak version # is >= 1.1. However, we can assume the new URL scheme is # at least version 1.0 @@ -121,7 +121,7 @@ def get_resources(self): return {} def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, - notfound_ok=None): + notfound_ok=None, head_only=False): """ Get a bucket/key from the server """ @@ -188,8 +188,8 @@ def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, url = self.object_path(robj.bucket.name, robj.key, bucket_type=bucket_type, **params) - use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') - and robj.vclock is not None) + use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') and + robj.vclock is not None) if use_vclocks: headers['X-Riak-Vclock'] = robj.vclock.encode('base64') response = self._request('DELETE', url, headers) @@ -218,7 +218,7 @@ def stream_keys(self, bucket, timeout=None): status, headers, response = self._request('GET', url, stream=True) if status == 200: - return RiakHttpKeyStream(response) + return HttpKeyStream(response) else: raise RiakError('Error listing keys.') @@ -251,7 +251,7 @@ def stream_buckets(self, bucket_type=None, timeout=None): status, headers, response = self._request('GET', url, stream=True) if status == 200: - return RiakHttpBucketStream(response) + return HttpBucketStream(response) else: raise RiakError('Error listing buckets.') @@ -370,7 +370,7 @@ def stream_mapred(self, inputs, query, timeout=None): content, stream=True) if status == 200: - return RiakHttpMapReduceStream(response) + return HttpMapReduceStream(response) else: raise RiakError( 'Error running MapReduce operation. Headers: %s Body: %s' % @@ -440,11 +440,12 @@ def stream_index(self, bucket, index, startkey, endkey=None, status, headers, response = self._request('GET', url, stream=True) if status == 200: - return RiakHttpIndexStream(response, index, return_terms) + return HttpIndexStream(response, index, return_terms) else: raise RiakError('Error streaming secondary index.') - def create_search_index(self, index, schema=None, n_val=None): + def create_search_index(self, index, schema=None, n_val=None, + timeout=None): """ Create a Solr search index for Yokozuna. @@ -454,6 +455,8 @@ def create_search_index(self, index, schema=None, n_val=None): :type schema: string :param n_val: N value of the write :type n_val: int + :param timeout: optional timeout (in ms) + :type timeout: integer, None :rtype boolean """ @@ -468,6 +471,8 @@ def create_search_index(self, index, schema=None, n_val=None): content_dict['schema'] = schema if n_val: content_dict['n_val'] = n_val + if timeout: + content_dict['timeout'] = timeout content = json.dumps(content_dict) # Run the request... @@ -709,12 +714,11 @@ def update_counter(self, bucket, key, amount, **options): self.check_http_code(status, [200, 204]) def fetch_datatype(self, bucket, key, **options): - if bucket.bucket_type.is_default(): - raise NotImplementedError("Datatypes cannot be used in the default" - " bucket-type.") - if not self.datatypes(): raise NotImplementedError("Datatypes are not supported.") + if bucket.bucket_type.is_default(): + raise NotImplementedError( + 'Datatypes cannot be used in the default bucket-type.') url = self.datatypes_path(bucket.bucket_type.name, bucket.name, key, **options) @@ -730,12 +734,11 @@ def fetch_datatype(self, bucket, key, **options): response.get('context')) def update_datatype(self, datatype, **options): - if datatype.bucket.bucket_type.is_default(): - raise NotImplementedError("Datatypes cannot be used in the default" - " bucket-type.") - if not self.datatypes(): - raise NotImplementedError("Datatypes are not supported.") + raise NotImplementedError('Datatypes are not supported.') + if datatype.bucket.bucket_type.is_default(): + raise NotImplementedError( + 'Datatypes cannot be used in the default bucket-type.') op = datatype.to_op() context = datatype.context @@ -744,7 +747,7 @@ def update_datatype(self, datatype, **options): raise ValueError("No operation to send on datatype {!r}". format(datatype)) - if type_name not in ('counter', 'set', 'map'): + if type_name not in ('counter', 'set', 'hll', 'map'): raise TypeError("Cannot send operation on datatype {!r}". format(type_name)) @@ -775,6 +778,28 @@ def update_datatype(self, datatype, **options): return True + def get_preflist(self, bucket, key): + """ + Get the preflist for a bucket/key + + :param bucket: Riak Bucket + :type bucket: :class:`~riak.bucket.RiakBucket` + :param key: Riak Key + :type key: string + :rtype: list of dicts + """ + if not self.preflists(): + raise NotImplementedError("fetching preflists is not supported.") + bucket_type = self._get_bucket_type(bucket.bucket_type) + url = self.preflist_path(bucket.name, key, bucket_type=bucket_type) + status, headers, body = self._request('GET', url) + + if status == 200: + preflist = json.loads(bytes_to_str(body)) + return preflist['preflist'] + else: + raise RiakError('Error getting bucket/key preflist.') + def check_http_code(self, status, expected_statuses): if status not in expected_statuses: raise RiakError('Expected status %s, received %s' % diff --git a/riak/transports/pbc/__init__.py b/riak/transports/pbc/__init__.py deleted file mode 100644 index fc8914b6..00000000 --- a/riak/transports/pbc/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -Copyright 2012 Basho Technologies, Inc. -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -import errno -import socket -from riak.transports.pool import Pool -from riak.transports.pbc.transport import RiakPbcTransport - - -class RiakPbcPool(Pool): - """ - A resource pool of PBC transports. - """ - def __init__(self, client, **options): - super(RiakPbcPool, self).__init__() - self._client = client - self._options = options - - def create_resource(self): - node = self._client._choose_node() - return RiakPbcTransport(node=node, - client=self._client, - **self._options) - - def destroy_resource(self, pbc): - pbc.close() - -# These are a specific set of socket errors -# that could be raised on send/recv that indicate -# that the socket is closed or reset, and is not -# usable. On seeing any of these errors, the socket -# should be closed, and the connection re-established. -CONN_CLOSED_ERRORS = ( - errno.EHOSTUNREACH, - errno.ECONNRESET, - errno.ECONNREFUSED, - errno.ECONNABORTED, - errno.ETIMEDOUT, - errno.EBADF, - errno.EPIPE -) - - -def is_retryable(err): - """ - Determines if the given exception is something that is - network/socket-related and should thus cause the PBC connection to - close and the operation retried on another node. - - :rtype: boolean - """ - if isinstance(err, socket.error): - code = err.args[0] - return code in CONN_CLOSED_ERRORS - else: - return False diff --git a/riak/transports/pbc/codec.py b/riak/transports/pbc/codec.py deleted file mode 100644 index ca48ff59..00000000 --- a/riak/transports/pbc/codec.py +++ /dev/null @@ -1,624 +0,0 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" -import riak_pb -from riak import RiakError -from riak.content import RiakContent -from riak.util import decode_index_value, str_to_bytes, bytes_to_str -from riak.multidict import MultiDict -from six import string_types, PY2 - - -def _invert(d): - out = {} - for key in d: - value = d[key] - out[value] = key - return out - -REPL_TO_PY = {riak_pb.RpbBucketProps.FALSE: False, - riak_pb.RpbBucketProps.TRUE: True, - riak_pb.RpbBucketProps.REALTIME: 'realtime', - riak_pb.RpbBucketProps.FULLSYNC: 'fullsync'} - -REPL_TO_PB = _invert(REPL_TO_PY) - -RIAKC_RW_ONE = 4294967294 -RIAKC_RW_QUORUM = 4294967293 -RIAKC_RW_ALL = 4294967292 -RIAKC_RW_DEFAULT = 4294967291 - -QUORUM_TO_PB = {'default': RIAKC_RW_DEFAULT, - 'all': RIAKC_RW_ALL, - 'quorum': RIAKC_RW_QUORUM, - 'one': RIAKC_RW_ONE} - -QUORUM_TO_PY = _invert(QUORUM_TO_PB) - -NORMAL_PROPS = ['n_val', 'allow_mult', 'last_write_wins', 'old_vclock', - 'young_vclock', 'big_vclock', 'small_vclock', 'basic_quorum', - 'notfound_ok', 'search', 'backend', 'search_index', 'datatype'] -COMMIT_HOOK_PROPS = ['precommit', 'postcommit'] -MODFUN_PROPS = ['chash_keyfun', 'linkfun'] -QUORUM_PROPS = ['r', 'pr', 'w', 'pw', 'dw', 'rw'] - -MAP_FIELD_TYPES = { - riak_pb.MapField.COUNTER: 'counter', - riak_pb.MapField.SET: 'set', - riak_pb.MapField.REGISTER: 'register', - riak_pb.MapField.FLAG: 'flag', - riak_pb.MapField.MAP: 'map', - 'counter': riak_pb.MapField.COUNTER, - 'set': riak_pb.MapField.SET, - 'register': riak_pb.MapField.REGISTER, - 'flag': riak_pb.MapField.FLAG, - 'map': riak_pb.MapField.MAP -} - -DT_FETCH_TYPES = { - riak_pb.DtFetchResp.COUNTER: 'counter', - riak_pb.DtFetchResp.SET: 'set', - riak_pb.DtFetchResp.MAP: 'map' -} - - -class RiakPbcCodec(object): - """ - Protobuffs Encoding and decoding methods for RiakPbcTransport. - """ - - def __init__(self, **unused_args): - if riak_pb is None: - raise NotImplementedError("this transport is not available") - super(RiakPbcCodec, self).__init__(**unused_args) - - def _encode_quorum(self, rw): - """ - Converts a symbolic quorum value into its on-the-wire - equivalent. - - :param rw: the quorum - :type rw: string, integer - :rtype: integer - """ - if rw in QUORUM_TO_PB: - return QUORUM_TO_PB[rw] - elif type(rw) is int and rw >= 0: - return rw - else: - return None - - def _decode_quorum(self, rw): - """ - Converts a protobuf quorum value to a symbolic value if - necessary. - - :param rw: the quorum - :type rw: int - :rtype int or string - """ - if rw in QUORUM_TO_PY: - return QUORUM_TO_PY[rw] - else: - return rw - - def _decode_contents(self, contents, obj): - """ - Decodes the list of siblings from the protobuf representation - into the object. - - :param contents: a list of RpbContent messages - :type contents: list - :param obj: a RiakObject - :type obj: RiakObject - :rtype RiakObject - """ - obj.siblings = [self._decode_content(c, RiakContent(obj)) - for c in contents] - # Invoke sibling-resolution logic - if len(obj.siblings) > 1 and obj.resolver is not None: - obj.resolver(obj) - return obj - - def _decode_content(self, rpb_content, sibling): - """ - Decodes a single sibling from the protobuf representation into - a RiakObject. - - :param rpb_content: a single RpbContent message - :type rpb_content: riak_pb.RpbContent - :param sibling: a RiakContent sibling container - :type sibling: RiakContent - :rtype: RiakContent - """ - - if rpb_content.HasField("deleted") and rpb_content.deleted: - sibling.exists = False - else: - sibling.exists = True - if rpb_content.HasField("content_type"): - sibling.content_type = bytes_to_str(rpb_content.content_type) - if rpb_content.HasField("charset"): - sibling.charset = bytes_to_str(rpb_content.charset) - if rpb_content.HasField("content_encoding"): - sibling.content_encoding = \ - bytes_to_str(rpb_content.content_encoding) - if rpb_content.HasField("vtag"): - sibling.etag = bytes_to_str(rpb_content.vtag) - - sibling.links = [self._decode_link(link) - for link in rpb_content.links] - if rpb_content.HasField("last_mod"): - sibling.last_modified = float(rpb_content.last_mod) - if rpb_content.HasField("last_mod_usecs"): - sibling.last_modified += rpb_content.last_mod_usecs / 1000000.0 - - sibling.usermeta = dict([(bytes_to_str(usermd.key), - bytes_to_str(usermd.value)) - for usermd in rpb_content.usermeta]) - sibling.indexes = set([(bytes_to_str(index.key), - decode_index_value(index.key, index.value)) - for index in rpb_content.indexes]) - sibling.encoded_data = rpb_content.value - - return sibling - - def _encode_content(self, robj, rpb_content): - """ - Fills an RpbContent message with the appropriate data and - metadata from a RiakObject. - - :param robj: a RiakObject - :type robj: RiakObject - :param rpb_content: the protobuf message to fill - :type rpb_content: riak_pb.RpbContent - """ - if robj.content_type: - rpb_content.content_type = str_to_bytes(robj.content_type) - if robj.charset: - rpb_content.charset = str_to_bytes(robj.charset) - if robj.content_encoding: - rpb_content.content_encoding = str_to_bytes(robj.content_encoding) - for uk in robj.usermeta: - pair = rpb_content.usermeta.add() - pair.key = str_to_bytes(uk) - pair.value = str_to_bytes(robj.usermeta[uk]) - for link in robj.links: - pb_link = rpb_content.links.add() - try: - bucket, key, tag = link - except ValueError: - raise RiakError("Invalid link tuple %s" % link) - - pb_link.bucket = str_to_bytes(bucket) - pb_link.key = str_to_bytes(key) - if tag: - pb_link.tag = str_to_bytes(tag) - else: - pb_link.tag = str_to_bytes('') - - for field, value in robj.indexes: - pair = rpb_content.indexes.add() - pair.key = str_to_bytes(field) - pair.value = str_to_bytes(str(value)) - - # Python 2.x data is stored in a string - if PY2: - rpb_content.value = str(robj.encoded_data) - else: - rpb_content.value = robj.encoded_data - - def _decode_link(self, link): - """ - Decodes an RpbLink message into a tuple - - :param link: an RpbLink message - :type link: riak_pb.RpbLink - :rtype tuple - """ - - if link.HasField("bucket"): - bucket = bytes_to_str(link.bucket) - else: - bucket = None - if link.HasField("key"): - key = bytes_to_str(link.key) - else: - key = None - if link.HasField("tag"): - tag = bytes_to_str(link.tag) - else: - tag = None - - return (bucket, key, tag) - - def _decode_index_value(self, index, value): - """ - Decodes a secondary index value into the correct Python type. - :param index: the name of the index - :type index: str - :param value: the value of the index entry - :type value: str - :rtype str or int - """ - if index.endswith("_int"): - return int(value) - else: - return bytes_to_str(value) - - def _encode_bucket_props(self, props, msg): - """ - Encodes a dict of bucket properties into the protobuf message. - - :param props: bucket properties - :type props: dict - :param msg: the protobuf message to fill - :type msg: riak_pb.RpbSetBucketReq - """ - for prop in NORMAL_PROPS: - if prop in props and props[prop] is not None: - if isinstance(props[prop], string_types): - setattr(msg.props, prop, str_to_bytes(props[prop])) - else: - setattr(msg.props, prop, props[prop]) - for prop in COMMIT_HOOK_PROPS: - if prop in props: - setattr(msg.props, 'has_' + prop, True) - self._encode_hooklist(props[prop], getattr(msg.props, prop)) - for prop in MODFUN_PROPS: - if prop in props and props[prop] is not None: - self._encode_modfun(props[prop], getattr(msg.props, prop)) - for prop in QUORUM_PROPS: - if prop in props and props[prop] not in (None, 'default'): - value = self._encode_quorum(props[prop]) - if value is not None: - if isinstance(value, string_types): - setattr(msg.props, prop, str_to_bytes(value)) - else: - setattr(msg.props, prop, value) - if 'repl' in props: - msg.props.repl = REPL_TO_PY[props['repl']] - - return msg - - def _decode_bucket_props(self, msg): - """ - Decodes the protobuf bucket properties message into a dict. - - :param msg: the protobuf message to decode - :type msg: riak_pb.RpbBucketProps - :rtype dict - """ - props = {} - - for prop in NORMAL_PROPS: - if msg.HasField(prop): - props[prop] = getattr(msg, prop) - if isinstance(props[prop], bytes): - props[prop] = bytes_to_str(props[prop]) - for prop in COMMIT_HOOK_PROPS: - if getattr(msg, 'has_' + prop): - props[prop] = self._decode_hooklist(getattr(msg, prop)) - for prop in MODFUN_PROPS: - if msg.HasField(prop): - props[prop] = self._decode_modfun(getattr(msg, prop)) - for prop in QUORUM_PROPS: - if msg.HasField(prop): - props[prop] = self._decode_quorum(getattr(msg, prop)) - if msg.HasField('repl'): - props['repl'] = REPL_TO_PY[msg.repl] - - return props - - def _decode_modfun(self, modfun): - """ - Decodes a protobuf modfun pair into a dict with 'mod' and - 'fun' keys. Used in bucket properties. - - :param modfun: the protobuf message to decode - :type modfun: riak_pb.RpbModFun - :rtype dict - """ - return {'mod': bytes_to_str(modfun.module), - 'fun': bytes_to_str(modfun.function)} - - def _encode_modfun(self, props, msg=None): - """ - Encodes a dict with 'mod' and 'fun' keys into a protobuf - modfun pair. Used in bucket properties. - - :param props: the module/function pair - :type props: dict - :param msg: the protobuf message to fill - :type msg: riak_pb.RpbModFun - :rtype riak_pb.RpbModFun - """ - if msg is None: - msg = riak_pb.RpbModFun() - msg.module = str_to_bytes(props['mod']) - msg.function = str_to_bytes(props['fun']) - return msg - - def _decode_hooklist(self, hooklist): - """ - Decodes a list of protobuf commit hooks into their python - equivalents. Used in bucket properties. - - :param hooklist: a list of protobuf commit hooks - :type hooklist: list - :rtype list - """ - return [self._decode_hook(hook) for hook in hooklist] - - def _encode_hooklist(self, hooklist, msg): - """ - Encodes a list of commit hooks into their protobuf equivalent. - Used in bucket properties. - - :param hooklist: a list of commit hooks - :type hooklist: list - :param msg: a protobuf field that is a list of commit hooks - """ - for hook in hooklist: - pbhook = msg.add() - self._encode_hook(hook, pbhook) - - def _decode_hook(self, hook): - """ - Decodes a protobuf commit hook message into a dict. Used in - bucket properties. - - :param hook: the hook to decode - :type hook: riak_pb.RpbCommitHook - :rtype dict - """ - if hook.HasField('modfun'): - return self._decode_modfun(hook.modfun) - else: - return {'name': bytes_to_str(hook.name)} - - def _encode_hook(self, hook, msg): - """ - Encodes a commit hook dict into the protobuf message. Used in - bucket properties. - - :param hook: the hook to encode - :type hook: dict - :param msg: the protobuf message to fill - :type msg: riak_pb.RpbCommitHook - :rtype riak_pb.RpbCommitHook - """ - if 'name' in hook: - msg.name = str_to_bytes(hook['name']) - else: - self._encode_modfun(hook, msg.modfun) - return msg - - def _encode_index_req(self, bucket, index, startkey, endkey=None, - return_terms=None, max_results=None, - continuation=None, timeout=None, term_regex=None): - """ - Encodes a secondary index request into the protobuf message. - - :param bucket: the bucket whose index to query - :type bucket: string - :param index: the index to query - :type index: string - :param startkey: the value or beginning of the range - :type startkey: integer, string - :param endkey: the end of the range - :type endkey: integer, string - :param return_terms: whether to return the index term with the key - :type return_terms: bool - :param max_results: the maximum number of results to return (page size) - :type max_results: integer - :param continuation: the opaque continuation returned from a - previous paginated request - :type continuation: string - :param timeout: a timeout value in milliseconds, or 'infinity' - :type timeout: int - :param term_regex: a regular expression used to filter index terms - :type term_regex: string - :rtype riak_pb.RpbIndexReq - """ - req = riak_pb.RpbIndexReq(bucket=str_to_bytes(bucket.name), - index=str_to_bytes(index)) - self._add_bucket_type(req, bucket.bucket_type) - if endkey is not None: - req.qtype = riak_pb.RpbIndexReq.range - req.range_min = str_to_bytes(str(startkey)) - req.range_max = str_to_bytes(str(endkey)) - else: - req.qtype = riak_pb.RpbIndexReq.eq - req.key = str_to_bytes(str(startkey)) - if return_terms is not None: - req.return_terms = return_terms - if max_results: - req.max_results = max_results - if continuation: - req.continuation = str_to_bytes(continuation) - if timeout: - if timeout == 'infinity': - req.timeout = 0 - else: - req.timeout = timeout - if term_regex: - req.term_regex = str_to_bytes(term_regex) - return req - - def _decode_search_index(self, index): - """ - Fills an RpbYokozunaIndex message with the appropriate data. - - :param index: a yz index message - :type index: riak_pb.RpbYokozunaIndex - :rtype dict - """ - result = {} - result['name'] = bytes_to_str(index.name) - if index.HasField('schema'): - result['schema'] = bytes_to_str(index.schema) - if index.HasField('n_val'): - result['n_val'] = index.n_val - return result - - def _add_bucket_type(self, req, bucket_type): - if bucket_type and not bucket_type.is_default(): - if not self.bucket_types(): - raise NotImplementedError( - 'Server does not support bucket-types') - req.type = str_to_bytes(bucket_type.name) - - def _encode_search_query(self, req, params): - if 'rows' in params: - req.rows = params['rows'] - if 'start' in params: - req.start = params['start'] - if 'sort' in params: - req.sort = str_to_bytes(params['sort']) - if 'filter' in params: - req.filter = str_to_bytes(params['filter']) - if 'df' in params: - req.df = str_to_bytes(params['df']) - if 'op' in params: - req.op = str_to_bytes(params['op']) - if 'q.op' in params: - req.op = params['q.op'] - if 'fl' in params: - if isinstance(params['fl'], list): - req.fl.extend(params['fl']) - else: - req.fl.append(params['fl']) - if 'presort' in params: - req.presort = params['presort'] - - def _decode_search_doc(self, doc): - resultdoc = MultiDict() - for pair in doc.fields: - if PY2: - ukey = unicode(pair.key, 'utf-8') - uval = unicode(pair.value, 'utf-8') - else: - ukey = bytes_to_str(pair.key) - uval = bytes_to_str(pair.value) - resultdoc.add(ukey, uval) - return resultdoc.mixed() - - def _decode_dt_fetch(self, resp): - dtype = DT_FETCH_TYPES.get(resp.type) - if dtype is None: - raise ValueError("Unknown datatype on wire: {}".format(resp.type)) - - value = self._decode_dt_value(dtype, resp.value) - - if resp.HasField('context'): - context = resp.context[:] - else: - context = None - - return dtype, value, context - - def _decode_dt_value(self, dtype, msg): - if dtype == 'counter': - return msg.counter_value - elif dtype == 'set': - return self._decode_set_value(msg.set_value) - elif dtype == 'map': - return self._decode_map_value(msg.map_value) - - def _encode_dt_options(self, req, params): - for q in ['r', 'pr', 'w', 'dw', 'pw']: - if q in params and params[q] is not None: - setattr(req, q, self._encode_quorum(params[q])) - - for o in ['basic_quorum', 'notfound_ok', 'timeout', 'return_body', - 'include_context']: - if o in params and params[o] is not None: - setattr(req, o, params[o]) - - def _decode_map_value(self, entries): - out = {} - for entry in entries: - name = bytes_to_str(entry.field.name[:]) - dtype = MAP_FIELD_TYPES[entry.field.type] - if dtype == 'counter': - value = entry.counter_value - elif dtype == 'set': - value = self._decode_set_value(entry.set_value) - elif dtype == 'register': - value = bytes_to_str(entry.register_value[:]) - elif dtype == 'flag': - value = entry.flag_value - elif dtype == 'map': - value = self._decode_map_value(entry.map_value) - out[(name, dtype)] = value - return out - - def _decode_set_value(self, set_value): - return [bytes_to_str(string[:]) for string in set_value] - - def _encode_dt_op(self, dtype, req, op): - if dtype == 'counter': - req.op.counter_op.increment = op[1] - elif dtype == 'set': - self._encode_set_op(req.op, op) - elif dtype == 'map': - self._encode_map_op(req.op.map_op, op) - else: - raise TypeError("Cannot send operation on datatype {!r}". - format(dtype)) - - def _encode_set_op(self, msg, op): - if 'adds' in op: - msg.set_op.adds.extend(str_to_bytes(op['adds'])) - if 'removes' in op: - msg.set_op.removes.extend(str_to_bytes(op['removes'])) - - def _encode_map_op(self, msg, ops): - for op in ops: - name, dtype = op[1] - ftype = MAP_FIELD_TYPES[dtype] - if op[0] == 'add': - add = msg.adds.add() - add.name = str_to_bytes(name) - add.type = ftype - elif op[0] == 'remove': - remove = msg.removes.add() - remove.name = str_to_bytes(name) - remove.type = ftype - elif op[0] == 'update': - update = msg.updates.add() - update.field.name = str_to_bytes(name) - update.field.type = ftype - self._encode_map_update(dtype, update, op[2]) - - def _encode_map_update(self, dtype, msg, op): - if dtype == 'counter': - # ('increment', some_int) - msg.counter_op.increment = op[1] - elif dtype == 'set': - self._encode_set_op(msg, op) - elif dtype == 'map': - self._encode_map_op(msg.map_op, op) - elif dtype == 'register': - # ('assign', some_str) - msg.register_op = str_to_bytes(op[1]) - elif dtype == 'flag': - if op == 'enable': - msg.flag_op = riak_pb.MapUpdate.ENABLE - else: - msg.flag_op = riak_pb.MapUpdate.DISABLE diff --git a/riak/transports/pbc/connection.py b/riak/transports/pbc/connection.py deleted file mode 100644 index 6f4ee95a..00000000 --- a/riak/transports/pbc/connection.py +++ /dev/null @@ -1,249 +0,0 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -import socket -import struct -import riak_pb -from riak.security import SecurityError -from riak import RiakError -from riak_pb.messages import ( - MESSAGE_CLASSES, - MSG_CODE_ERROR_RESP, - MSG_CODE_START_TLS, - MSG_CODE_AUTH_REQ, - MSG_CODE_AUTH_RESP -) -from riak.util import bytes_to_str, str_to_bytes -from six import PY2 -if PY2: - from OpenSSL.SSL import Connection - from riak.transports.security import configure_pyopenssl_context -else: - import ssl - from riak.transports.security import configure_ssl_context - - -class RiakPbcConnection(object): - """ - Connection-related methods for RiakPbcTransport. - """ - - def _encode_msg(self, msg_code, msg=None): - if msg is None: - return struct.pack("!iB", 1, msg_code) - msgstr = msg.SerializeToString() - slen = len(msgstr) - hdr = struct.pack("!iB", 1 + slen, msg_code) - return hdr + msgstr - - def _request(self, msg_code, msg=None, expect=None): - self._send_msg(msg_code, msg) - return self._recv_msg(expect) - - def _non_connect_request(self, msg_code, msg=None, expect=None): - """ - Similar to self._request, but doesn't try to initiate a connection, - thus preventing an infinite loop. - """ - self._non_connect_send_msg(msg_code, msg) - return self._recv_msg(expect) - - def _non_connect_send_msg(self, msg_code, msg): - """ - Similar to self._send, but doesn't try to initiate a connection, - thus preventing an infinite loop. - """ - self._socket.sendall(self._encode_msg(msg_code, msg)) - - def _send_msg(self, msg_code, msg): - self._connect() - self._non_connect_send_msg(msg_code, msg) - - def _init_security(self): - """ - Initialize a secure connection to the server. - """ - if not self._starttls(): - raise SecurityError("Could not start TLS connection") - # _ssh_handshake() will throw an exception upon failure - self._ssl_handshake() - if not self._auth(): - raise SecurityError("Could not authorize connection") - - def _starttls(self): - """ - Exchange a STARTTLS message with Riak to initiate secure communications - return True is Riak responds with a STARTTLS response, False otherwise - """ - msg_code, _ = self._non_connect_request(MSG_CODE_START_TLS) - if msg_code == MSG_CODE_START_TLS: - return True - else: - return False - - def _auth(self): - """ - Perform an authorization request against Riak - returns True upon success, False otherwise - Note: Riak will sleep for a short period of time upon a failed - auth request/response to prevent denial of service attacks - """ - req = riak_pb.RpbAuthReq() - req.user = str_to_bytes(self._client._credentials.username) - req.password = str_to_bytes(self._client._credentials.password) - msg_code, _ = self._non_connect_request(MSG_CODE_AUTH_REQ, req, - MSG_CODE_AUTH_RESP) - if msg_code == MSG_CODE_AUTH_RESP: - return True - else: - return False - - if PY2: - def _ssl_handshake(self): - """ - Perform an SSL handshake w/ the server. - Precondition: a successful STARTTLS exchange has - taken place with Riak - returns True upon success, otherwise an exception is raised - """ - if self._client._credentials: - try: - ssl_ctx = configure_pyopenssl_context(self. - _client._credentials) - # attempt to upgrade the socket to SSL - ssl_socket = Connection(ssl_ctx, self._socket) - ssl_socket.set_connect_state() - ssl_socket.do_handshake() - # ssl handshake successful - self._socket = ssl_socket - - self._client._credentials._check_revoked_cert(ssl_socket) - return True - except Exception as e: - # fail if *any* exceptions are thrown during SSL handshake - raise SecurityError(e.message) - else: - def _ssl_handshake(self): - """ - Perform an SSL handshake w/ the server. - Precondition: a successful STARTTLS exchange has - taken place with Riak - returns True upon success, otherwise an exception is raised - """ - credentials = self._client._credentials - if credentials: - try: - ssl_ctx = configure_ssl_context(credentials) - host = "riak@" + self._address[0] - ssl_socket = ssl.SSLSocket(sock=self._socket, - keyfile=credentials.pkey_file, - certfile=credentials.cert_file, - cert_reqs=ssl.CERT_REQUIRED, - ca_certs=credentials. - cacert_file, - ciphers=credentials.ciphers, - server_hostname=host) - ssl_socket.context = ssl_ctx - # ssl handshake successful - ssl_socket.do_handshake() - self._socket = ssl_socket - - return True - except ssl.SSLError as e: - raise SecurityError(e.library + ": " + e.reason) - except Exception as e: - # fail if *any* exceptions are thrown during SSL handshake - raise SecurityError(e) - - def _recv_msg(self, expect=None): - self._recv_pkt() - msg_code, = struct.unpack("B", self._inbuf[:1]) - if msg_code is MSG_CODE_ERROR_RESP: - err = self._parse_msg(msg_code, self._inbuf[1:]) - raise RiakError(bytes_to_str(err.errmsg)) - elif msg_code in MESSAGE_CLASSES: - msg = self._parse_msg(msg_code, self._inbuf[1:]) - else: - raise Exception("unknown msg code %s" % msg_code) - - if expect and msg_code != expect: - raise RiakError("unexpected protocol buffer message code: %d, %r" - % (msg_code, msg)) - return msg_code, msg - - def _recv_pkt(self): - nmsglen = self._socket.recv(4) - while len(nmsglen) < 4: - x = self._socket.recv(4 - len(nmsglen)) - if not x: - break - nmsglen += x - if len(nmsglen) != 4: - raise RiakError( - "Socket returned short packet length %d - expected 4" - % len(nmsglen)) - msglen, = struct.unpack('!i', nmsglen) - self._inbuf_len = msglen - if PY2: - self._inbuf = '' - else: - self._inbuf = bytes() - while len(self._inbuf) < msglen: - want_len = min(8192, msglen - len(self._inbuf)) - recv_buf = self._socket.recv(want_len) - if not recv_buf: - break - self._inbuf += recv_buf - if len(self._inbuf) != self._inbuf_len: - raise RiakError("Socket returned short packet %d - expected %d" - % (len(self._inbuf), self._inbuf_len)) - - def _connect(self): - if not self._socket: - if self._timeout: - self._socket = socket.create_connection(self._address, - self._timeout) - else: - self._socket = socket.create_connection(self._address) - if self._client._credentials: - self._init_security() - - def close(self): - """ - Closes the underlying socket of the PB connection. - """ - if self._socket: - self._socket.close() - del self._socket - - def _parse_msg(self, code, packet): - try: - pbclass = MESSAGE_CLASSES[code] - except KeyError: - pbclass = None - - if pbclass is None: - return None - - pbo = pbclass() - pbo.ParseFromString(packet) - return pbo - - # These are set in the RiakPbcTransport initializer - _address = None - _timeout = None diff --git a/riak/transports/pbc/stream.py b/riak/transports/pbc/stream.py deleted file mode 100644 index 88e7abac..00000000 --- a/riak/transports/pbc/stream.py +++ /dev/null @@ -1,183 +0,0 @@ -""" -Copyright 2012 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - - -import json -from riak_pb.messages import ( - MSG_CODE_LIST_KEYS_RESP, - MSG_CODE_MAP_RED_RESP, - MSG_CODE_LIST_BUCKETS_RESP, - MSG_CODE_INDEX_RESP -) -from riak.util import decode_index_value, bytes_to_str -from riak.client.index_page import CONTINUATION -from six import PY2 - - -class RiakPbcStream(object): - """ - Used internally by RiakPbcTransport to implement streaming - operations. Implements the iterator interface. - """ - - _expect = None - - def __init__(self, transport): - self.finished = False - self.transport = transport - self.resource = None - - def __iter__(self): - return self - - def next(self): - if self.finished: - raise StopIteration - - try: - msg_code, resp = self.transport._recv_msg(expect=self._expect) - except: - self.finished = True - raise - - if(self._is_done(resp)): - self.finished = True - - return resp - - def __next__(self): - # Python 3.x Version - return self.next() - - def _is_done(self, response): - # This could break if new messages don't name the field the - # same thing. - return response.done - - def attach(self, resource): - self.resource = resource - - def close(self): - # We have to drain the socket to make sure that we don't get - # weird responses when some other request comes after a - # failed/prematurely-terminated one. - try: - while self.next(): - pass - except StopIteration: - pass - self.resource.release() - - -class RiakPbcKeyStream(RiakPbcStream): - """ - Used internally by RiakPbcTransport to implement key-list streams. - """ - - _expect = MSG_CODE_LIST_KEYS_RESP - - def next(self): - response = super(RiakPbcKeyStream, self).next() - - if response.done and len(response.keys) is 0: - raise StopIteration - - return response.keys - - def __next__(self): - # Python 3.x Version - return self.next() - - -class RiakPbcMapredStream(RiakPbcStream): - """ - Used internally by RiakPbcTransport to implement MapReduce - streams. - """ - - _expect = MSG_CODE_MAP_RED_RESP - - def next(self): - response = super(RiakPbcMapredStream, self).next() - - if response.done and not response.HasField('response'): - raise StopIteration - - return response.phase, json.loads(bytes_to_str(response.response)) - - def __next__(self): - # Python 3.x Version - return self.next() - - -class RiakPbcBucketStream(RiakPbcStream): - """ - Used internally by RiakPbcTransport to implement key-list streams. - """ - - _expect = MSG_CODE_LIST_BUCKETS_RESP - - def next(self): - response = super(RiakPbcBucketStream, self).next() - - if response.done and len(response.buckets) is 0: - raise StopIteration - - return response.buckets - - def __next__(self): - # Python 3.x Version - return self.next() - - -class RiakPbcIndexStream(RiakPbcStream): - """ - Used internally by RiakPbcTransport to implement Secondary Index - streams. - """ - - _expect = MSG_CODE_INDEX_RESP - - def __init__(self, transport, index, return_terms=False): - super(RiakPbcIndexStream, self).__init__(transport) - self.index = index - self.return_terms = return_terms - - def next(self): - response = super(RiakPbcIndexStream, self).next() - - if response.done and not (response.keys or - response.results or - response.continuation): - raise StopIteration - - if self.return_terms and response.results: - return [(decode_index_value(self.index, r.key), - bytes_to_str(r.value)) - for r in response.results] - elif response.keys: - if PY2: - return response.keys[:] - else: - return [bytes_to_str(key) for key in response.keys] - elif response.continuation: - return CONTINUATION(bytes_to_str(response.continuation)) - - def __next__(self): - # Python 3.x Version - return self.next() diff --git a/riak/transports/pbc/transport.py b/riak/transports/pbc/transport.py deleted file mode 100644 index c021e56c..00000000 --- a/riak/transports/pbc/transport.py +++ /dev/null @@ -1,701 +0,0 @@ -""" -Copyright 2012 Basho Technologies, Inc. -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" - -import riak_pb -from riak import RiakError -from riak.transports.transport import RiakTransport -from riak.riak_object import VClock -from riak.util import decode_index_value, str_to_bytes, bytes_to_str -from riak.transports.pbc.connection import RiakPbcConnection -from riak.transports.pbc.stream import (RiakPbcKeyStream, - RiakPbcMapredStream, - RiakPbcBucketStream, - RiakPbcIndexStream) -from riak.transports.pbc.codec import RiakPbcCodec -from six import PY2, PY3 - -from riak_pb.messages import ( - MSG_CODE_PING_REQ, - MSG_CODE_PING_RESP, - MSG_CODE_GET_CLIENT_ID_REQ, - MSG_CODE_GET_CLIENT_ID_RESP, - MSG_CODE_SET_CLIENT_ID_REQ, - MSG_CODE_SET_CLIENT_ID_RESP, - MSG_CODE_GET_SERVER_INFO_REQ, - MSG_CODE_GET_SERVER_INFO_RESP, - MSG_CODE_GET_REQ, - MSG_CODE_GET_RESP, - MSG_CODE_PUT_REQ, - MSG_CODE_PUT_RESP, - MSG_CODE_DEL_REQ, - MSG_CODE_DEL_RESP, - MSG_CODE_LIST_BUCKETS_REQ, - MSG_CODE_LIST_BUCKETS_RESP, - MSG_CODE_LIST_KEYS_REQ, - MSG_CODE_GET_BUCKET_REQ, - MSG_CODE_GET_BUCKET_RESP, - MSG_CODE_SET_BUCKET_REQ, - MSG_CODE_SET_BUCKET_RESP, - MSG_CODE_GET_BUCKET_TYPE_REQ, - MSG_CODE_SET_BUCKET_TYPE_REQ, - MSG_CODE_MAP_RED_REQ, - MSG_CODE_INDEX_REQ, - MSG_CODE_INDEX_RESP, - MSG_CODE_SEARCH_QUERY_REQ, - MSG_CODE_SEARCH_QUERY_RESP, - MSG_CODE_RESET_BUCKET_REQ, - MSG_CODE_RESET_BUCKET_RESP, - MSG_CODE_COUNTER_UPDATE_REQ, - MSG_CODE_COUNTER_UPDATE_RESP, - MSG_CODE_COUNTER_GET_REQ, - MSG_CODE_COUNTER_GET_RESP, - MSG_CODE_YOKOZUNA_INDEX_GET_REQ, - MSG_CODE_YOKOZUNA_INDEX_GET_RESP, - MSG_CODE_YOKOZUNA_INDEX_PUT_REQ, - MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ, - MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ, - MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP, - MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ, - MSG_CODE_DT_FETCH_REQ, - MSG_CODE_DT_FETCH_RESP, - MSG_CODE_DT_UPDATE_REQ, - MSG_CODE_DT_UPDATE_RESP -) - - -class RiakPbcTransport(RiakTransport, RiakPbcConnection, RiakPbcCodec): - """ - The RiakPbcTransport object holds a connection to the protocol - buffers interface on the riak server. - """ - - def __init__(self, - node=None, - client=None, - timeout=None, - *unused_options): - """ - Construct a new RiakPbcTransport object. - """ - super(RiakPbcTransport, self).__init__() - - self._client = client - self._node = node - self._address = (node.host, node.pb_port) - self._timeout = timeout - self._socket = None - - # FeatureDetection API - def _server_version(self): - return bytes_to_str(self.get_server_info()['server_version']) - - def ping(self): - """ - Ping the remote server - """ - - msg_code, msg = self._request(MSG_CODE_PING_REQ) - if msg_code == MSG_CODE_PING_RESP: - return True - else: - return False - - def get_server_info(self): - """ - Get information about the server - """ - msg_code, resp = self._request(MSG_CODE_GET_SERVER_INFO_REQ, - expect=MSG_CODE_GET_SERVER_INFO_RESP) - return {'node': bytes_to_str(resp.node), - 'server_version': bytes_to_str(resp.server_version)} - - def _get_client_id(self): - msg_code, resp = self._request(MSG_CODE_GET_CLIENT_ID_REQ, - expect=MSG_CODE_GET_CLIENT_ID_RESP) - return bytes_to_str(resp.client_id) - - def _set_client_id(self, client_id): - req = riak_pb.RpbSetClientIdReq() - req.client_id = str_to_bytes(client_id) - - msg_code, resp = self._request(MSG_CODE_SET_CLIENT_ID_REQ, req, - MSG_CODE_SET_CLIENT_ID_RESP) - - self._client_id = client_id - - client_id = property(_get_client_id, _set_client_id, - doc="""the client ID for this connection""") - - def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, - notfound_ok=None): - """ - Serialize get request and deserialize response - """ - bucket = robj.bucket - - req = riak_pb.RpbGetReq() - if r: - req.r = self._encode_quorum(r) - if self.quorum_controls(): - if pr: - req.pr = self._encode_quorum(pr) - if basic_quorum is not None: - req.basic_quorum = basic_quorum - if notfound_ok is not None: - req.notfound_ok = notfound_ok - if self.client_timeouts() and timeout: - req.timeout = timeout - if self.tombstone_vclocks(): - req.deletedvclock = True - - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - - req.key = str_to_bytes(robj.key) - - msg_code, resp = self._request(MSG_CODE_GET_REQ, req, - MSG_CODE_GET_RESP) - - if resp is not None: - if resp.HasField('vclock'): - robj.vclock = VClock(resp.vclock, 'binary') - # We should do this even if there are no contents, i.e. - # the object is tombstoned - self._decode_contents(resp.content, robj) - else: - # "not found" returns an empty message, - # so let's make sure to clear the siblings - robj.siblings = [] - - return robj - - def put(self, robj, w=None, dw=None, pw=None, return_body=True, - if_none_match=False, timeout=None): - bucket = robj.bucket - - req = riak_pb.RpbPutReq() - if w: - req.w = self._encode_quorum(w) - if dw: - req.dw = self._encode_quorum(dw) - if self.quorum_controls() and pw: - req.pw = self._encode_quorum(pw) - - if return_body: - req.return_body = 1 - if if_none_match: - req.if_none_match = 1 - if self.client_timeouts() and timeout: - req.timeout = timeout - - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - - if robj.key: - req.key = str_to_bytes(robj.key) - if robj.vclock: - req.vclock = robj.vclock.encode('binary') - - self._encode_content(robj, req.content) - - msg_code, resp = self._request(MSG_CODE_PUT_REQ, req, - MSG_CODE_PUT_RESP) - - if resp is not None: - if resp.HasField('key'): - robj.key = bytes_to_str(resp.key) - if resp.HasField("vclock"): - robj.vclock = VClock(resp.vclock, 'binary') - if resp.content: - self._decode_contents(resp.content, robj) - elif not robj.key: - raise RiakError("missing response object") - - return robj - - def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, - timeout=None): - req = riak_pb.RpbDelReq() - if rw: - req.rw = self._encode_quorum(rw) - if r: - req.r = self._encode_quorum(r) - if w: - req.w = self._encode_quorum(w) - if dw: - req.dw = self._encode_quorum(dw) - - if self.quorum_controls(): - if pr: - req.pr = self._encode_quorum(pr) - if pw: - req.pw = self._encode_quorum(pw) - - if self.client_timeouts() and timeout: - req.timeout = timeout - - use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') - and robj.vclock) - if use_vclocks: - req.vclock = robj.vclock.encode('binary') - - bucket = robj.bucket - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - req.key = str_to_bytes(robj.key) - - msg_code, resp = self._request(MSG_CODE_DEL_REQ, req, - MSG_CODE_DEL_RESP) - return self - - def get_keys(self, bucket, timeout=None): - """ - Lists all keys within a bucket. - """ - keys = [] - for keylist in self.stream_keys(bucket, timeout=timeout): - for key in keylist: - keys.append(bytes_to_str(key)) - - return keys - - def stream_keys(self, bucket, timeout=None): - """ - Streams keys from a bucket, returning an iterator that yields - lists of keys. - """ - req = riak_pb.RpbListKeysReq() - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - if self.client_timeouts() and timeout: - req.timeout = timeout - - self._send_msg(MSG_CODE_LIST_KEYS_REQ, req) - - return RiakPbcKeyStream(self) - - def get_buckets(self, bucket_type=None, timeout=None): - """ - Serialize bucket listing request and deserialize response - """ - req = riak_pb.RpbListBucketsReq() - self._add_bucket_type(req, bucket_type) - - if self.client_timeouts() and timeout: - req.timeout = timeout - - msg_code, resp = self._request(MSG_CODE_LIST_BUCKETS_REQ, req, - MSG_CODE_LIST_BUCKETS_RESP) - return resp.buckets - - def stream_buckets(self, bucket_type=None, timeout=None): - """ - Stream list of buckets through an iterator - """ - - if not self.bucket_stream(): - raise NotImplementedError('Streaming list-buckets is not ' - 'supported') - - req = riak_pb.RpbListBucketsReq() - req.stream = True - self._add_bucket_type(req, bucket_type) - # Bucket streaming landed in the same release as timeouts, so - # we don't need to check the capability. - if timeout: - req.timeout = timeout - - self._send_msg(MSG_CODE_LIST_BUCKETS_REQ, req) - - return RiakPbcBucketStream(self) - - def get_bucket_props(self, bucket): - """ - Serialize bucket property request and deserialize response - """ - req = riak_pb.RpbGetBucketReq() - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - - msg_code, resp = self._request(MSG_CODE_GET_BUCKET_REQ, req, - MSG_CODE_GET_BUCKET_RESP) - - return self._decode_bucket_props(resp.props) - - def set_bucket_props(self, bucket, props): - """ - Serialize set bucket property request and deserialize response - """ - req = riak_pb.RpbSetBucketReq() - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - - if not self.pb_all_bucket_props(): - for key in props: - if key not in ('n_val', 'allow_mult'): - raise NotImplementedError('Server only supports n_val and ' - 'allow_mult properties over PBC') - - self._encode_bucket_props(props, req) - - msg_code, resp = self._request(MSG_CODE_SET_BUCKET_REQ, req, - MSG_CODE_SET_BUCKET_RESP) - return True - - def clear_bucket_props(self, bucket): - """ - Clear bucket properties, resetting them to their defaults - """ - if not self.pb_clear_bucket_props(): - return False - - req = riak_pb.RpbResetBucketReq() - req.bucket = str_to_bytes(bucket.name) - self._add_bucket_type(req, bucket.bucket_type) - self._request(MSG_CODE_RESET_BUCKET_REQ, req, - MSG_CODE_RESET_BUCKET_RESP) - return True - - def get_bucket_type_props(self, bucket_type): - """ - Fetch bucket-type properties - """ - self._check_bucket_types(bucket_type) - - req = riak_pb.RpbGetBucketTypeReq() - req.type = str_to_bytes(bucket_type.name) - - msg_code, resp = self._request(MSG_CODE_GET_BUCKET_TYPE_REQ, req, - MSG_CODE_GET_BUCKET_RESP) - - return self._decode_bucket_props(resp.props) - - def set_bucket_type_props(self, bucket_type, props): - """ - Set bucket-type properties - """ - self._check_bucket_types(bucket_type) - - req = riak_pb.RpbSetBucketTypeReq() - req.type = str_to_bytes(bucket_type.name) - - self._encode_bucket_props(props, req) - - msg_code, resp = self._request(MSG_CODE_SET_BUCKET_TYPE_REQ, req, - MSG_CODE_SET_BUCKET_RESP) - return True - - def mapred(self, inputs, query, timeout=None): - # dictionary of phase results - each content should be an encoded array - # which is appended to the result for that phase. - result = {} - for phase, content in self.stream_mapred(inputs, query, timeout): - if phase in result: - result[phase] += content - else: - result[phase] = content - - # If a single result - return the same as the HTTP interface does - # otherwise return all the phase information - if not len(result): - return None - elif len(result) == 1: - return result[max(result.keys())] - else: - return result - - def stream_mapred(self, inputs, query, timeout=None): - # Construct the job, optionally set the timeout... - content = self._construct_mapred_json(inputs, query, timeout) - - req = riak_pb.RpbMapRedReq() - req.request = str_to_bytes(content) - req.content_type = str_to_bytes("application/json") - - self._send_msg(MSG_CODE_MAP_RED_REQ, req) - - return RiakPbcMapredStream(self) - - def get_index(self, bucket, index, startkey, endkey=None, - return_terms=None, max_results=None, continuation=None, - timeout=None, term_regex=None): - if not self.pb_indexes(): - return self._get_index_mapred_emu(bucket, index, startkey, endkey) - - if term_regex and not self.index_term_regex(): - raise NotImplementedError("Secondary index term_regex is not " - "supported") - - req = self._encode_index_req(bucket, index, startkey, endkey, - return_terms, max_results, continuation, - timeout, term_regex) - - msg_code, resp = self._request(MSG_CODE_INDEX_REQ, req, - MSG_CODE_INDEX_RESP) - - if return_terms and resp.results: - results = [(decode_index_value(index, pair.key), - bytes_to_str(pair.value)) - for pair in resp.results] - else: - results = resp.keys[:] - if PY3: - results = [bytes_to_str(key) for key in resp.keys] - - if max_results is not None and resp.HasField('continuation'): - return (results, bytes_to_str(resp.continuation)) - else: - return (results, None) - - def stream_index(self, bucket, index, startkey, endkey=None, - return_terms=None, max_results=None, continuation=None, - timeout=None, term_regex=None): - if not self.stream_indexes(): - raise NotImplementedError("Secondary index streaming is not " - "supported") - - if term_regex and not self.index_term_regex(): - raise NotImplementedError("Secondary index term_regex is not " - "supported") - - req = self._encode_index_req(bucket, index, startkey, endkey, - return_terms, max_results, continuation, - timeout, term_regex) - req.stream = True - - self._send_msg(MSG_CODE_INDEX_REQ, req) - - return RiakPbcIndexStream(self, index, return_terms) - - def create_search_index(self, index, schema=None, n_val=None): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - index = str_to_bytes(index) - idx = riak_pb.RpbYokozunaIndex(name=index) - if schema: - idx.schema = str_to_bytes(schema) - if n_val: - idx.n_val = n_val - req = riak_pb.RpbYokozunaIndexPutReq(index=idx) - - self._request(MSG_CODE_YOKOZUNA_INDEX_PUT_REQ, req, - MSG_CODE_PUT_RESP) - return True - - def get_search_index(self, index): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - req = riak_pb.RpbYokozunaIndexGetReq(name=str_to_bytes(index)) - - msg_code, resp = self._request(MSG_CODE_YOKOZUNA_INDEX_GET_REQ, req, - MSG_CODE_YOKOZUNA_INDEX_GET_RESP) - if len(resp.index) > 0: - return self._decode_search_index(resp.index[0]) - else: - raise RiakError('notfound') - - def list_search_indexes(self): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - req = riak_pb.RpbYokozunaIndexGetReq() - - msg_code, resp = self._request(MSG_CODE_YOKOZUNA_INDEX_GET_REQ, req, - MSG_CODE_YOKOZUNA_INDEX_GET_RESP) - - return [self._decode_search_index(index) for index in resp.index] - - def delete_search_index(self, index): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - req = riak_pb.RpbYokozunaIndexDeleteReq(name=str_to_bytes(index)) - - self._request(MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ, req, - MSG_CODE_DEL_RESP) - - return True - - def create_search_schema(self, schema, content): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - scma = riak_pb.RpbYokozunaSchema(name=str_to_bytes(schema), - content=str_to_bytes(content)) - req = riak_pb.RpbYokozunaSchemaPutReq(schema=scma) - - self._request(MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ, req, - MSG_CODE_PUT_RESP) - return True - - def get_search_schema(self, schema): - if not self.pb_search_admin(): - raise NotImplementedError("Search 2.0 administration is not " - "supported for this version") - req = riak_pb.RpbYokozunaSchemaGetReq(name=str_to_bytes(schema)) - - msg_code, resp = self._request(MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ, req, - MSG_CODE_YOKOZUNA_SCHEMA_GET_RESP) - result = {} - result['name'] = bytes_to_str(resp.schema.name) - result['content'] = bytes_to_str(resp.schema.content) - return result - - def search(self, index, query, **params): - if not self.pb_search(): - return self._search_mapred_emu(index, query) - - if PY2 and isinstance(query, unicode): - query = query.encode('utf8') - - req = riak_pb.RpbSearchQueryReq(index=str_to_bytes(index), - q=str_to_bytes(query)) - self._encode_search_query(req, params) - - msg_code, resp = self._request(MSG_CODE_SEARCH_QUERY_REQ, req, - MSG_CODE_SEARCH_QUERY_RESP) - - result = {} - if resp.HasField('max_score'): - result['max_score'] = resp.max_score - if resp.HasField('num_found'): - result['num_found'] = resp.num_found - result['docs'] = [self._decode_search_doc(doc) for doc in resp.docs] - return result - - def get_counter(self, bucket, key, **params): - if not bucket.bucket_type.is_default(): - raise NotImplementedError("Counters are not " - "supported with bucket-types, " - "use datatypes instead.") - - if not self.counters(): - raise NotImplementedError("Counters are not supported") - - req = riak_pb.RpbCounterGetReq() - req.bucket = str_to_bytes(bucket.name) - req.key = str_to_bytes(key) - if params.get('r') is not None: - req.r = self._encode_quorum(params['r']) - if params.get('pr') is not None: - req.pr = self._encode_quorum(params['pr']) - if params.get('basic_quorum') is not None: - req.basic_quorum = params['basic_quorum'] - if params.get('notfound_ok') is not None: - req.notfound_ok = params['notfound_ok'] - - msg_code, resp = self._request(MSG_CODE_COUNTER_GET_REQ, req, - MSG_CODE_COUNTER_GET_RESP) - if resp.HasField('value'): - return resp.value - else: - return None - - def update_counter(self, bucket, key, value, **params): - if not bucket.bucket_type.is_default(): - raise NotImplementedError("Counters are not " - "supported with bucket-types, " - "use datatypes instead.") - - if not self.counters(): - raise NotImplementedError("Counters are not supported") - - req = riak_pb.RpbCounterUpdateReq() - req.bucket = str_to_bytes(bucket.name) - req.key = str_to_bytes(key) - req.amount = value - if params.get('w') is not None: - req.w = self._encode_quorum(params['w']) - if params.get('dw') is not None: - req.dw = self._encode_quorum(params['dw']) - if params.get('pw') is not None: - req.pw = self._encode_quorum(params['pw']) - if params.get('returnvalue') is not None: - req.returnvalue = params['returnvalue'] - - msg_code, resp = self._request(MSG_CODE_COUNTER_UPDATE_REQ, req, - MSG_CODE_COUNTER_UPDATE_RESP) - if resp.HasField('value'): - return resp.value - else: - return True - - def fetch_datatype(self, bucket, key, **options): - - if bucket.bucket_type.is_default(): - raise NotImplementedError("Datatypes cannot be used in the default" - " bucket-type.") - - if not self.datatypes(): - raise NotImplementedError("Datatypes are not supported.") - - req = riak_pb.DtFetchReq() - req.type = str_to_bytes(bucket.bucket_type.name) - req.bucket = str_to_bytes(bucket.name) - req.key = str_to_bytes(key) - self._encode_dt_options(req, options) - - msg_code, resp = self._request(MSG_CODE_DT_FETCH_REQ, req, - MSG_CODE_DT_FETCH_RESP) - - return self._decode_dt_fetch(resp) - - def update_datatype(self, datatype, **options): - - if datatype.bucket.bucket_type.is_default(): - raise NotImplementedError("Datatypes cannot be used in the default" - " bucket-type.") - - if not self.datatypes(): - raise NotImplementedError("Datatypes are not supported.") - - op = datatype.to_op() - type_name = datatype.type_name - if not op: - raise ValueError("No operation to send on datatype {!r}". - format(datatype)) - - req = riak_pb.DtUpdateReq() - req.bucket = str_to_bytes(datatype.bucket.name) - req.type = str_to_bytes(datatype.bucket.bucket_type.name) - - if datatype.key: - req.key = str_to_bytes(datatype.key) - if datatype._context: - req.context = datatype._context - - self._encode_dt_options(req, options) - - self._encode_dt_op(type_name, req, op) - - msg_code, resp = self._request(MSG_CODE_DT_UPDATE_REQ, req, - MSG_CODE_DT_UPDATE_RESP) - if resp.HasField('key'): - datatype.key = resp.key[:] - if resp.HasField('context'): - datatype._context = resp.context[:] - - if options.get('return_body'): - datatype._set_value(self._decode_dt_value(type_name, resp)) - - return True diff --git a/riak/transports/pool.py b/riak/transports/pool.py index 4b21fd8e..38a87b43 100644 --- a/riak/transports/pool.py +++ b/riak/transports/pool.py @@ -1,34 +1,49 @@ -""" -Copyright 2012 Basho Technologies, Inc. +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 +from __future__ import print_function -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +import threading -from __future__ import print_function from contextlib import contextmanager -import threading -# This file is a rough port of the Innertube Ruby library class BadResource(Exception): """ Users of a :class:`Pool` should raise this error when the pool resource currently in-use is bad and should be removed from the pool. + + :param mid_stream: did this exception happen mid-streaming op? + :type mid_stream: boolean + """ + def __init__(self, ex, mid_stream=False): + super(BadResource, self).__init__(ex) + self.mid_stream = mid_stream + + +class ConnectionClosed(BadResource): + """ + Users of a :class:`Pool` should raise this error when the pool + resource currently in-use has been closed and should be removed + from the pool. + + :param mid_stream: did this exception happen mid-streaming op? + :type mid_stream: boolean """ - pass + def __init__(self, ex, mid_stream=False): + super(ConnectionClosed, self).__init__(ex, mid_stream) class Resource(object): @@ -46,20 +61,26 @@ def __init__(self, obj, pool): :type obj: object """ - self.object = obj """The wrapped pool resource.""" + self.object = obj - self.claimed = False """Whether the resource is currently in use.""" + self.claimed = False - self.pool = pool """The pool that this resource belongs to.""" + self.pool = pool + + """True if this Resource errored.""" + self.errored = False def release(self): """ Releases this resource back to the pool it came from. """ - self.pool.release(self) + if self.errored: + self.pool.delete_resource(self) + else: + self.pool.release(self) class Pool(object): @@ -76,7 +97,7 @@ class Pool(object): Example:: - from riak.Pool import Pool, BadResource + from riak.transports.pool import Pool class ListPool(Pool): def create_resource(self): return [] @@ -90,7 +111,6 @@ def destroy_resource(self): resource.append(1) with pool.transaction() as resource2: print(repr(resource2)) # should be [1] - """ def __init__(self): @@ -154,7 +174,7 @@ def release(self, resource): self.releaser.notify_all() @contextmanager - def transaction(self, _filter=None, default=None): + def transaction(self, _filter=None, default=None, yield_resource=False): """ transaction(_filter=None, default=None) @@ -168,10 +188,18 @@ def transaction(self, _filter=None, default=None): :type _filter: callable :param default: a value that will be used instead of calling :meth:`create_resource` if a new resource needs to be created + :param yield_resource: set to True to yield the Resource object + itself + :type yield_resource: boolean """ resource = self.acquire(_filter=_filter, default=default) try: - yield resource.object + if yield_resource: + yield resource + else: + yield resource.object + if resource.errored: + self.delete_resource(resource) except BadResource: self.delete_resource(resource) raise diff --git a/riak/transports/security.py b/riak/transports/security.py index 8a098449..01cf6315 100644 --- a/riak/transports/security.py +++ b/riak/transports/security.py @@ -1,32 +1,27 @@ -""" -Copyright 2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import socket -from six import PY2 -if PY2: +from riak.security import SecurityError, USE_STDLIB_SSL +if USE_STDLIB_SSL: + import ssl +else: import OpenSSL.SSL try: from cStringIO import StringIO except ImportError: from StringIO import StringIO -else: - import ssl -from riak.security import SecurityError def verify_cb(conn, cert, errnum, depth, ok): @@ -39,42 +34,10 @@ def verify_cb(conn, cert, errnum, depth, ok): return ok -if PY2: - def configure_pyopenssl_context(credentials): - """ - Set various options on the SSL context for Python 2.x. - - :param credentials: Riak Security Credentials - :type credentials: :class:`~riak.security.SecurityCreds` - :rtype ssl_ctx: :class:`~OpenSSL.SSL.Context` - """ - - ssl_ctx = OpenSSL.SSL.Context(credentials.ssl_version) - if credentials._has_credential('pkey'): - ssl_ctx.use_privatekey(credentials.pkey) - if credentials._has_credential('cert'): - ssl_ctx.use_certificate(credentials.cert) - if credentials._has_credential('cacert'): - store = ssl_ctx.get_cert_store() - cacerts = credentials.cacert - if not isinstance(cacerts, list): - cacerts = [cacerts] - for cacert in cacerts: - store.add_cert(cacert) - else: - raise SecurityError("cacert_file is required in SecurityCreds") - ciphers = credentials.ciphers - if ciphers is not None: - ssl_ctx.set_cipher_list(ciphers) - # Demand a certificate - ssl_ctx.set_verify(OpenSSL.SSL.VERIFY_PEER | - OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, - verify_cb) - return ssl_ctx -else: +if USE_STDLIB_SSL: def configure_ssl_context(credentials): """ - Set various options on the SSL context for Python 3.x. + Set various options on the SSL context for Python >= 2.7.9 and 3.x. N.B. versions earlier than 3.4 may not support all security measures, e.g., hostname check. @@ -105,6 +68,7 @@ def configure_ssl_context(credentials): pkeyfile = certfile if certfile: ssl_ctx.load_cert_chain(certfile, pkeyfile) + # TODO https://bugs.python.org/issue8813 if credentials.crl_file is not None: ssl_ctx.load_verify_locations(credentials.crl_file) ssl_ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF @@ -121,49 +85,79 @@ def configure_ssl_context(credentials): return ssl_ctx - -# Inspired by -# https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py -class RiakWrappedSocket(socket.socket): - def __init__(self, connection, socket): +else: + def configure_pyopenssl_context(credentials): """ - API-compatibility wrapper for Python OpenSSL's Connection-class. + Set various options on the SSL context for Python <= 2.7.8. - :param connection: OpenSSL connection - :type connection: OpenSSL.SSL.Connection - :param socket: Underlying already connected socket - :type socket: socket + :param credentials: Riak Security Credentials + :type credentials: :class:`~riak.security.SecurityCreds` + :rtype ssl_ctx: :class:`~OpenSSL.SSL.Context` """ - self.connection = connection - self.socket = socket - - def fileno(self): - return self.socket.fileno() - - def makefile(self, mode, bufsize=-1): - return fileobject(self.connection, mode, bufsize) - - def settimeout(self, timeout): - return self.socket.settimeout(timeout) - - def sendall(self, data): - # SSL seems to need bytes, so force the data to byte encoding - return self.connection.sendall(bytes(data)) - - def close(self): - try: - return self.connection.shutdown() - except OpenSSL.SSL.Error as err: - if err.args == ([],): - return False - else: - raise err + ssl_ctx = OpenSSL.SSL.Context(credentials.ssl_version) + if credentials._has_credential('pkey'): + ssl_ctx.use_privatekey(credentials.pkey) + if credentials._has_credential('cert'): + ssl_ctx.use_certificate(credentials.cert) + if credentials._has_credential('cacert'): + store = ssl_ctx.get_cert_store() + cacerts = credentials.cacert + if not isinstance(cacerts, list): + cacerts = [cacerts] + for cacert in cacerts: + store.add_cert(cacert) + else: + raise SecurityError("cacert_file is required in SecurityCreds") + ciphers = credentials.ciphers + if ciphers is not None: + ssl_ctx.set_cipher_list(ciphers) + # Demand a certificate + ssl_ctx.set_verify(OpenSSL.SSL.VERIFY_PEER | + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, + verify_cb) + return ssl_ctx -# Blatantly Stolen from -# https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py -# which is basically a port of the `socket._fileobject` class -if PY2: + # Inspired by + # https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py + class RiakWrappedSocket(socket.socket): + def __init__(self, connection, socket): + """ + API-compatibility wrapper for Python OpenSSL's Connection-class. + + :param connection: OpenSSL connection + :type connection: OpenSSL.SSL.Connection + :param socket: Underlying already connected socket + :type socket: socket + """ + self.connection = connection + self.socket = socket + + def fileno(self): + return self.socket.fileno() + + def makefile(self, mode, bufsize=-1): + return fileobject(self.connection, mode, bufsize) + + def settimeout(self, timeout): + return self.socket.settimeout(timeout) + + def sendall(self, data): + # SSL seems to need bytes, so force the data to byte encoding + return self.connection.sendall(bytes(data)) + + def close(self): + try: + return self.connection.shutdown() + except OpenSSL.SSL.Error as err: + if err.args == ([],): + return False + else: + raise err + + # Blatantly Stolen from + # https://github.com/shazow/urllib3/blob/master/urllib3/contrib/pyopenssl.py + # which is basically a port of the `socket._fileobject` class class fileobject(socket._fileobject): """ Extension of the socket module's fileobject to use PyOpenSSL. diff --git a/riak/transports/tcp/__init__.py b/riak/transports/tcp/__init__.py new file mode 100644 index 00000000..d58add2e --- /dev/null +++ b/riak/transports/tcp/__init__.py @@ -0,0 +1,75 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import errno +import socket + +from riak.transports.pool import Pool, ConnectionClosed +from riak.transports.tcp.transport import TcpTransport + + +class TcpPool(Pool): + """ + A resource pool of TCP transports. + """ + def __init__(self, client, **options): + super(TcpPool, self).__init__() + self._client = client + self._options = options + + def create_resource(self): + node = self._client._choose_node() + return TcpTransport(node=node, + client=self._client, + **self._options) + + def destroy_resource(self, tcp): + tcp.close() + + +# These are a specific set of socket errors +# that could be raised on send/recv that indicate +# that the socket is closed or reset, and is not +# usable. On seeing any of these errors, the socket +# should be closed, and the connection re-established. +CONN_CLOSED_ERRORS = ( + errno.EHOSTUNREACH, + errno.ECONNRESET, + errno.ECONNREFUSED, + errno.ECONNABORTED, + errno.ETIMEDOUT, + errno.EBADF, + errno.EPIPE +) + + +def is_retryable(err): + """ + Determines if the given exception is something that is + network/socket-related and should thus cause the TCP connection to + close and the operation retried on another node. + + :rtype: boolean + """ + if isinstance(err, ConnectionClosed): + # NB: only retryable if we're not mid-streaming + if err.mid_stream: + return False + else: + return True + elif isinstance(err, socket.error): + code = err.args[0] + return code in CONN_CLOSED_ERRORS + else: + return False diff --git a/riak/transports/tcp/connection.py b/riak/transports/tcp/connection.py new file mode 100644 index 00000000..13c02cf4 --- /dev/null +++ b/riak/transports/tcp/connection.py @@ -0,0 +1,283 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import errno +import logging +import socket +import struct +import six +import riak.pb.riak_pb2 +import riak.pb.messages + +from riak import RiakError +from riak.codecs.pbuf import PbufCodec +from riak.security import SecurityError, USE_STDLIB_SSL +from riak.transports.pool import BadResource, ConnectionClosed + +if USE_STDLIB_SSL: + import ssl + from riak.transports.security import configure_ssl_context +else: + from OpenSSL.SSL import Connection + from riak.transports.security import configure_pyopenssl_context + + +class TcpConnection(object): + def __init__(self): + self.bytes_required = False + + """ + Connection-related methods for TcpTransport. + """ + def _encode_msg(self, msg_code, data=None): + if data is None: + return struct.pack("!iB", 1, msg_code) + hdr = struct.pack("!iB", 1 + len(data), msg_code) + return hdr + data + + def _send_recv(self, msg_code, data=None): + self._send_msg(msg_code, data) + return self._recv_msg() + + def _non_connect_send_recv(self, msg_code, data=None): + """ + Similar to self._send_recv, but doesn't try to initiate a connection, + thus preventing an infinite loop. + """ + self._non_connect_send_msg(msg_code, data) + return self._recv_msg() + + def _non_connect_send_recv_msg(self, msg): + self._non_connect_send_msg(msg.msg_code, msg.data) + return self._recv_msg() + + def _non_connect_send_msg(self, msg_code, data): + """ + Similar to self._send, but doesn't try to initiate a connection, + thus preventing an infinite loop. + """ + try: + self._socket.sendall(self._encode_msg(msg_code, data)) + except (IOError, socket.error) as e: + if e.errno == errno.EPIPE: + raise ConnectionClosed(e) + else: + raise + + def _send_msg(self, msg_code, data): + self._connect() + self._non_connect_send_msg(msg_code, data) + + def _init_security(self): + """ + Initialize a secure connection to the server. + """ + if not self._starttls(): + raise SecurityError("Could not start TLS connection") + # _ssh_handshake() will throw an exception upon failure + self._ssl_handshake() + if not self._auth(): + raise SecurityError("Could not authorize connection") + + def _starttls(self): + """ + Exchange a STARTTLS message with Riak to initiate secure communications + return True is Riak responds with a STARTTLS response, False otherwise + """ + resp_code, _ = self._non_connect_send_recv( + riak.pb.messages.MSG_CODE_START_TLS) + if resp_code == riak.pb.messages.MSG_CODE_START_TLS: + return True + else: + return False + + def _auth(self): + """ + Perform an authorization request against Riak + returns True upon success, False otherwise + Note: Riak will sleep for a short period of time upon a failed + auth request/response to prevent denial of service attacks + """ + codec = PbufCodec() + username = self._client._credentials.username + password = self._client._credentials.password + if not password: + password = '' + msg = codec.encode_auth(username, password) + resp_code, _ = self._non_connect_send_recv_msg(msg) + if resp_code == riak.pb.messages.MSG_CODE_AUTH_RESP: + return True + else: + return False + + if not USE_STDLIB_SSL: + def _ssl_handshake(self): + """ + Perform an SSL handshake w/ the server. + Precondition: a successful STARTTLS exchange has + taken place with Riak + returns True upon success, otherwise an exception is raised + """ + if self._client._credentials: + try: + ssl_ctx = configure_pyopenssl_context(self. + _client._credentials) + # attempt to upgrade the socket to SSL + ssl_socket = Connection(ssl_ctx, self._socket) + ssl_socket.set_connect_state() + ssl_socket.do_handshake() + # ssl handshake successful + self._socket = ssl_socket + + self._client._credentials._check_revoked_cert(ssl_socket) + return True + except Exception as e: + # fail if *any* exceptions are thrown during SSL handshake + raise SecurityError(e) + else: + def _ssl_handshake(self): + """ + Perform an SSL handshake w/ the server. + Precondition: a successful STARTTLS exchange has + taken place with Riak + returns True upon success, otherwise an exception is raised + """ + credentials = self._client._credentials + if credentials: + try: + ssl_ctx = configure_ssl_context(credentials) + host = self._address[0] + ssl_socket = ssl.SSLSocket(sock=self._socket, + keyfile=credentials.pkey_file, + certfile=credentials.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=credentials. + cacert_file, + ciphers=credentials.ciphers, + server_hostname=host) + ssl_socket.context = ssl_ctx + # ssl handshake successful + ssl_socket.do_handshake() + self._socket = ssl_socket + return True + except ssl.SSLError as e: + raise SecurityError(e) + except Exception as e: + # fail if *any* exceptions are thrown during SSL handshake + raise SecurityError(e) + + def _recv_msg(self, mid_stream=False): + """ + :param mid_stream: are we receiving in a streaming operation? + :type mid_stream: boolean + """ + try: + msgbuf = self._recv_pkt() + except BadResource as e: + e.mid_stream = mid_stream + raise + except socket.timeout as e: + # A timeout can leave the socket in an inconsistent state because + # it might still receive the data later and mix up with a + # subsequent request. + # https://github.com/basho/riak-python-client/issues/425 + raise BadResource(e, mid_stream) + mv = memoryview(msgbuf) + mcb = mv[0:1] + if self.bytes_required: + mcb = mcb.tobytes() + try: + msg_code, = struct.unpack("B", mcb) + except struct.error: + # NB: Python 2.7.3 requires this + # http://bugs.python.org/issue10212 + msg_code, = struct.unpack("B", mv[0:1].tobytes()) + self.bytes_required = True + data = mv[1:].tobytes() + return (msg_code, data) + + def _recv_pkt(self): + # TODO FUTURE re-use buffer + msglen_buf = self._recv(4) + # NB: msg length is an unsigned int + if self.bytes_required: + msglen_buf = bytes(msglen_buf) + try: + msglen, = struct.unpack('!I', msglen_buf) + except struct.error: + # NB: Python 2.7.3 requires this + # http://bugs.python.org/issue10212 + msglen, = struct.unpack('!I', bytes(msglen_buf)) + self.bytes_required = True + return self._recv(msglen) + + def _recv(self, msglen): + # TODO FUTURE re-use buffer + # http://stackoverflow.com/a/15964489 + msgbuf = bytearray(msglen) + view = memoryview(msgbuf) + nread = 0 + toread = msglen + while toread: + nbytes = self._socket.recv_into(view, toread) + # https://docs.python.org/2/howto/sockets.html#using-a-socket + # https://github.com/basho/riak-python-client/issues/399 + if nbytes == 0: + msg = 'socket recv returned zero bytes unexpectedly, ' \ + 'expected {}'.format(toread) + ex = RiakError(msg) + raise ConnectionClosed(ex) + view = view[nbytes:] # slicing views is cheap + toread -= nbytes + nread += nbytes + if nread != msglen: + raise RiakError("Socket returned short packet %d - expected %d" + % (nread, msglen)) + return msgbuf + + def _connect(self): + if not self._socket: + if self._timeout: + self._socket = socket.create_connection(self._address, + self._timeout) + else: + self._socket = socket.create_connection(self._address) + if self._socket_tcp_options: + ka_opts = self._socket_tcp_options + for k, v in six.iteritems(ka_opts): + self._socket.setsockopt(socket.SOL_TCP, k, v) + if self._socket_keepalive: + self._socket.setsockopt( + socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + if self._client._credentials: + self._init_security() + + def close(self): + """ + Closes the underlying socket of the PB connection. + """ + if self._socket: + if USE_STDLIB_SSL: + # NB: Python 2.7.8 and earlier does not have a compatible + # shutdown() method due to the SSL lib + try: + self._socket.shutdown(socket.SHUT_RDWR) + except EnvironmentError: + # NB: sometimes these exceptions are raised if the initial + # connection didn't succeed correctly, or if shutdown() is + # called after the connection dies + logging.debug('Exception occurred while shutting ' + 'down socket.', exc_info=True) + self._socket.close() + del self._socket diff --git a/riak/transports/tcp/stream.py b/riak/transports/tcp/stream.py new file mode 100644 index 00000000..95436825 --- /dev/null +++ b/riak/transports/tcp/stream.py @@ -0,0 +1,214 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import riak.pb.messages + +from riak.util import decode_index_value, bytes_to_str +from riak.client.index_page import CONTINUATION +from riak.codecs.ttb import TtbCodec +from six import PY2 + + +class PbufStream(object): + """ + Used internally by TcpTransport to implement streaming + operations. Implements the iterator interface. + """ + + _expect = None + + def __init__(self, transport, codec): + self.finished = False + self.transport = transport + self.codec = codec + self.resource = None + self._mid_stream = False + + def __iter__(self): + return self + + def next(self): + if self.finished: + raise StopIteration + + try: + resp_code, data = self.transport._recv_msg( + mid_stream=self._mid_stream) + self.codec.maybe_riak_error(resp_code, data) + expect = self._expect + self.codec.maybe_incorrect_code(resp_code, expect) + resp = self.codec.parse_msg(expect, data) + except: + self.finished = True + raise + finally: + self._mid_stream = True + + if self._is_done(resp): + self.finished = True + + return resp + + def __next__(self): + # Python 3.x Version + return self.next() + + def _is_done(self, response): + # This could break if new messages don't name the field the + # same thing. + return response.done + + def attach(self, resource): + self.resource = resource + + def close(self): + # We have to drain the socket to make sure that we don't get + # weird responses when some other request comes after a + # failed/prematurely-terminated one. + try: + while self.next(): + pass + except StopIteration: + pass + self.resource.release() + + +class PbufKeyStream(PbufStream): + """ + Used internally by TcpTransport to implement key-list streams. + """ + + _expect = riak.pb.messages.MSG_CODE_LIST_KEYS_RESP + + def next(self): + response = super(PbufKeyStream, self).next() + + if response.done and len(response.keys) is 0: + raise StopIteration + + return response.keys + + def __next__(self): + # Python 3.x Version + return self.next() + + +class PbufMapredStream(PbufStream): + """ + Used internally by TcpTransport to implement MapReduce + streams. + """ + + _expect = riak.pb.messages.MSG_CODE_MAP_RED_RESP + + def next(self): + response = super(PbufMapredStream, self).next() + + if response.done and not response.HasField('response'): + raise StopIteration + + return response.phase, json.loads(bytes_to_str(response.response)) + + def __next__(self): + # Python 3.x Version + return self.next() + + +class PbufBucketStream(PbufStream): + """ + Used internally by TcpTransport to implement key-list streams. + """ + + _expect = riak.pb.messages.MSG_CODE_LIST_BUCKETS_RESP + + def next(self): + response = super(PbufBucketStream, self).next() + + if response.done and len(response.buckets) is 0: + raise StopIteration + + return response.buckets + + def __next__(self): + # Python 3.x Version + return self.next() + + +class PbufIndexStream(PbufStream): + """ + Used internally by TcpTransport to implement Secondary Index + streams. + """ + + _expect = riak.pb.messages.MSG_CODE_INDEX_RESP + + def __init__(self, transport, codec, index, return_terms=False): + super(PbufIndexStream, self).__init__(transport, codec) + self.index = index + self.return_terms = return_terms + + def next(self): + response = super(PbufIndexStream, self).next() + + if response.done and not (response.keys or + response.results or + response.continuation): + raise StopIteration + + if self.return_terms and response.results: + return [(decode_index_value(self.index, r.key), + bytes_to_str(r.value)) + for r in response.results] + elif response.keys: + if PY2: + return response.keys[:] + else: + return [bytes_to_str(key) for key in response.keys] + elif response.continuation: + return CONTINUATION(bytes_to_str(response.continuation)) + + def __next__(self): + # Python 3.x Version + return self.next() + + +class PbufTsKeyStream(PbufStream, TtbCodec): + """ + Used internally by TcpTransport to implement TS key-list streams. + """ + + _expect = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_RESP + + def __init__(self, transport, codec, convert_timestamp=False): + super(PbufTsKeyStream, self).__init__(transport, codec) + self._convert_timestamp = convert_timestamp + + def next(self): + response = super(PbufTsKeyStream, self).next() + + if response.done and len(response.keys) is 0: + raise StopIteration + + keys = [] + for tsrow in response.keys: + keys.append(self.codec.decode_timeseries_row(tsrow, + convert_timestamp=self._convert_timestamp)) + + return keys + + def __next__(self): + # Python 3.x Version + return self.next() diff --git a/riak/transports/tcp/transport.py b/riak/transports/tcp/transport.py new file mode 100644 index 00000000..5d3a1599 --- /dev/null +++ b/riak/transports/tcp/transport.py @@ -0,0 +1,574 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +import riak.pb.messages + +from riak import RiakError +from riak.codecs import Codec, Msg +from riak.codecs.pbuf import PbufCodec +from riak.codecs.ttb import TtbCodec +from riak.pb.messages import MSG_CODE_TS_TTB_MSG +from riak.transports.pool import BadResource +from riak.transports.transport import Transport +from riak.ts_object import TsObject + +from riak.transports.tcp.connection import TcpConnection +from riak.transports.tcp.stream import (PbufKeyStream, + PbufMapredStream, + PbufBucketStream, + PbufIndexStream, + PbufTsKeyStream) + + +class TcpTransport(Transport, TcpConnection): + """ + The TcpTransport object holds a connection to the TCP + socket on the Riak server. + """ + def __init__(self, + node=None, + client=None, + timeout=None, + **kwargs): + super(TcpTransport, self).__init__() + + self._client = client + self._node = node + self._address = (node.host, node.pb_port) + self._timeout = timeout + self._socket = None + self._pbuf_c = None + self._ttb_c = None + self._socket_tcp_options = \ + kwargs.get('socket_tcp_options', {}) + self._socket_keepalive = \ + kwargs.get('socket_keepalive', False) + self._ts_convert_timestamp = \ + kwargs.get('ts_convert_timestamp', False) + self._use_ttb = \ + kwargs.get('use_ttb', True) + + def _get_pbuf_codec(self): + if not self._pbuf_c: + self._pbuf_c = PbufCodec( + self.client_timeouts(), self.quorum_controls(), + self.tombstone_vclocks(), self.bucket_types()) + return self._pbuf_c + + def _get_ttb_codec(self): + if self._use_ttb: + if not self._ttb_c: + self._ttb_c = TtbCodec() + codec = self._ttb_c + else: + codec = self._get_pbuf_codec() + return codec + + def _get_codec(self, msg_code): + if msg_code == MSG_CODE_TS_TTB_MSG: + codec = self._get_ttb_codec() + elif msg_code == riak.pb.messages.MSG_CODE_TS_GET_REQ: + codec = self._get_ttb_codec() + elif msg_code == riak.pb.messages.MSG_CODE_TS_PUT_REQ: + codec = self._get_ttb_codec() + elif msg_code == riak.pb.messages.MSG_CODE_TS_QUERY_REQ: + codec = self._get_ttb_codec() + else: + codec = self._get_pbuf_codec() + return codec + + # FeatureDetection API + def _server_version(self): + server_info = self.get_server_info() + ver = server_info['server_version'] + (maj, min, patch) = [int(v) for v in ver.split('.')] + if maj == 0: + import datetime + now = datetime.datetime.now() + if now.year == 2016: + # GH-471 As of 20160509 Riak TS OSS 1.3.0 returns '0.8.0' as + # the version string. + return '2.1.1' + return ver + + def ping(self): + """ + Ping the remote server + """ + msg_code = riak.pb.messages.MSG_CODE_PING_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_ping() + resp_code, _ = self._request(msg, codec) + if resp_code == riak.pb.messages.MSG_CODE_PING_RESP: + return True + else: + return False + + def get_server_info(self): + """ + Get information about the server + """ + # NB: can't do it this way due to recursion + # codec = self._get_codec(ttb_supported=False) + codec = PbufCodec() + msg = Msg(riak.pb.messages.MSG_CODE_GET_SERVER_INFO_REQ, None, + riak.pb.messages.MSG_CODE_GET_SERVER_INFO_RESP) + resp_code, resp = self._request(msg, codec) + return codec.decode_get_server_info(resp) + + def _get_client_id(self): + msg_code = riak.pb.messages.MSG_CODE_GET_CLIENT_ID_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_client_id() + resp_code, resp = self._request(msg, codec) + return codec.decode_get_client_id(resp) + + def _set_client_id(self, client_id): + msg_code = riak.pb.messages.MSG_CODE_SET_CLIENT_ID_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_set_client_id(client_id) + resp_code, resp = self._request(msg, codec) + self._client_id = client_id + + client_id = property(_get_client_id, _set_client_id, + doc="""the client ID for this connection""") + + def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, + notfound_ok=None, head_only=False): + """ + Serialize get request and deserialize response + """ + msg_code = riak.pb.messages.MSG_CODE_GET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get(robj, r, pr, + timeout, basic_quorum, + notfound_ok, head_only) + resp_code, resp = self._request(msg, codec) + return codec.decode_get(robj, resp) + + def put(self, robj, w=None, dw=None, pw=None, return_body=True, + if_none_match=False, timeout=None): + msg_code = riak.pb.messages.MSG_CODE_PUT_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_put(robj, w, dw, pw, return_body, + if_none_match, timeout) + resp_code, resp = self._request(msg, codec) + return codec.decode_put(robj, resp) + + def ts_describe(self, table): + query = 'DESCRIBE {table}'.format(table=table.name) + return self.ts_query(table, query) + + def ts_get(self, table, key): + msg_code = MSG_CODE_TS_TTB_MSG + codec = self._get_codec(msg_code) + msg = codec.encode_timeseries_keyreq(table, key) + resp_code, resp = self._request(msg, codec) + tsobj = TsObject(self._client, table) + codec.decode_timeseries(resp, tsobj, + self._ts_convert_timestamp) + return tsobj + + def ts_put(self, tsobj): + msg_code = MSG_CODE_TS_TTB_MSG + codec = self._get_codec(msg_code) + msg = codec.encode_timeseries_put(tsobj) + resp_code, resp = self._request(msg, codec) + return codec.validate_timeseries_put_resp(resp_code, resp) + + def ts_delete(self, table, key): + msg_code = riak.pb.messages.MSG_CODE_TS_DEL_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_timeseries_keyreq(table, key, is_delete=True) + resp_code, resp = self._request(msg, codec) + if resp is not None: + return True + else: + raise RiakError("missing response object") + + def ts_query(self, table, query, interpolations=None): + msg_code = riak.pb.messages.MSG_CODE_TS_QUERY_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_timeseries_query(table, query, interpolations) + resp_code, resp = self._request(msg, codec) + tsobj = TsObject(self._client, table) + codec.decode_timeseries(resp, tsobj, + self._ts_convert_timestamp) + return tsobj + + def ts_stream_keys(self, table, timeout=None): + """ + Streams keys from a timeseries table, returning an iterator that + yields lists of keys. + """ + msg_code = riak.pb.messages.MSG_CODE_TS_LIST_KEYS_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_timeseries_listkeysreq(table, timeout) + self._send_msg(msg.msg_code, msg.data) + return PbufTsKeyStream(self, codec, self._ts_convert_timestamp) + + def delete(self, robj, rw=None, r=None, w=None, dw=None, + pr=None, pw=None, timeout=None): + msg_code = riak.pb.messages.MSG_CODE_DEL_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_delete(robj, rw, r, w, dw, pr, pw, timeout) + resp_code, resp = self._request(msg, codec) + return self + + def get_keys(self, bucket, timeout=None): + """ + Lists all keys within a bucket. + """ + msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ + codec = self._get_codec(msg_code) + stream = self.stream_keys(bucket, timeout=timeout) + return codec.decode_get_keys(stream) + + def stream_keys(self, bucket, timeout=None): + """ + Streams keys from a bucket, returning an iterator that yields + lists of keys. + """ + msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_stream_keys(bucket, timeout) + self._send_msg(msg.msg_code, msg.data) + return PbufKeyStream(self, codec) + + def get_buckets(self, bucket_type=None, timeout=None): + """ + Serialize bucket listing request and deserialize response + """ + msg_code = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_buckets(bucket_type, + timeout, streaming=False) + resp_code, resp = self._request(msg, codec) + return resp.buckets + + def stream_buckets(self, bucket_type=None, timeout=None): + """ + Stream list of buckets through an iterator + """ + if not self.bucket_stream(): + raise NotImplementedError('Streaming list-buckets is not ' + 'supported') + msg_code = riak.pb.messages.MSG_CODE_LIST_BUCKETS_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_buckets(bucket_type, + timeout, streaming=True) + self._send_msg(msg.msg_code, msg.data) + return PbufBucketStream(self, codec) + + def get_bucket_props(self, bucket): + """ + Serialize bucket property request and deserialize response + """ + msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_bucket_props(bucket) + resp_code, resp = self._request(msg, codec) + return codec.decode_bucket_props(resp.props) + + def set_bucket_props(self, bucket, props): + """ + Serialize set bucket property request and deserialize response + """ + if not self.pb_all_bucket_props(): + for key in props: + if key not in ('n_val', 'allow_mult'): + raise NotImplementedError('Server only supports n_val and ' + 'allow_mult properties over PBC') + msg_code = riak.pb.messages.MSG_CODE_SET_BUCKET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_set_bucket_props(bucket, props) + resp_code, resp = self._request(msg, codec) + return True + + def clear_bucket_props(self, bucket): + """ + Clear bucket properties, resetting them to their defaults + """ + if not self.pb_clear_bucket_props(): + return False + msg_code = riak.pb.messages.MSG_CODE_RESET_BUCKET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_clear_bucket_props(bucket) + self._request(msg, codec) + return True + + def get_bucket_type_props(self, bucket_type): + """ + Fetch bucket-type properties + """ + self._check_bucket_types(bucket_type) + msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_TYPE_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_bucket_type_props(bucket_type) + resp_code, resp = self._request(msg, codec) + return codec.decode_bucket_props(resp.props) + + def set_bucket_type_props(self, bucket_type, props): + """ + Set bucket-type properties + """ + self._check_bucket_types(bucket_type) + msg_code = riak.pb.messages.MSG_CODE_SET_BUCKET_TYPE_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_set_bucket_type_props(bucket_type, props) + resp_code, resp = self._request(msg, codec) + return True + + def mapred(self, inputs, query, timeout=None): + # dictionary of phase results - each content should be an encoded array + # which is appended to the result for that phase. + result = {} + for phase, content in self.stream_mapred(inputs, query, timeout): + if phase in result: + result[phase] += content + else: + result[phase] = content + # If a single result - return the same as the HTTP interface does + # otherwise return all the phase information + if not len(result): + return None + elif len(result) == 1: + return result[max(result.keys())] + else: + return result + + def stream_mapred(self, inputs, query, timeout=None): + # Construct the job, optionally set the timeout... + msg_code = riak.pb.messages.MSG_CODE_MAP_RED_REQ + codec = self._get_codec(msg_code) + content = self._construct_mapred_json(inputs, query, timeout) + msg = codec.encode_stream_mapred(content) + self._send_msg(msg.msg_code, msg.data) + return PbufMapredStream(self, codec) + + def get_index(self, bucket, index, startkey, endkey=None, + return_terms=None, max_results=None, continuation=None, + timeout=None, term_regex=None): + # TODO FUTURE NUKE THIS MAPRED + if not self.pb_indexes(): + return self._get_index_mapred_emu(bucket, index, startkey, endkey) + + if term_regex and not self.index_term_regex(): + raise NotImplementedError("Secondary index term_regex is not " + "supported") + + msg_code = riak.pb.messages.MSG_CODE_INDEX_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_index_req(bucket, index, startkey, endkey, + return_terms, max_results, + continuation, timeout, + term_regex, streaming=False) + resp_code, resp = self._request(msg, codec) + return codec.decode_index_req(resp, index, + return_terms, max_results) + + def stream_index(self, bucket, index, startkey, endkey=None, + return_terms=None, max_results=None, continuation=None, + timeout=None, term_regex=None): + if not self.stream_indexes(): + raise NotImplementedError("Secondary index streaming is not " + "supported") + if term_regex and not self.index_term_regex(): + raise NotImplementedError("Secondary index term_regex is not " + "supported") + msg_code = riak.pb.messages.MSG_CODE_INDEX_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_index_req(bucket, index, startkey, endkey, + return_terms, max_results, + continuation, timeout, + term_regex, streaming=True) + self._send_msg(msg.msg_code, msg.data) + return PbufIndexStream(self, codec, index, return_terms) + + def create_search_index(self, index, schema=None, n_val=None, + timeout=None): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_PUT_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_create_search_index(index, schema, n_val, timeout) + self._request(msg, codec) + return True + + def get_search_index(self, index): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_search_index(index) + resp_code, resp = self._request(msg, codec) + if len(resp.index) > 0: + return codec.decode_search_index(resp.index[0]) + else: + raise RiakError('notfound') + + def list_search_indexes(self): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_GET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_list_search_indexes() + resp_code, resp = self._request(msg, codec) + return [codec.decode_search_index(index) for index in resp.index] + + def delete_search_index(self, index): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_INDEX_DELETE_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_delete_search_index(index) + self._request(msg, codec) + return True + + def create_search_schema(self, schema, content): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_SCHEMA_PUT_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_create_search_schema(schema, content) + self._request(msg, codec) + return True + + def get_search_schema(self, schema): + if not self.pb_search_admin(): + raise NotImplementedError("Search 2.0 administration is not " + "supported for this version") + msg_code = riak.pb.messages.MSG_CODE_YOKOZUNA_SCHEMA_GET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_search_schema(schema) + resp_code, resp = self._request(msg, codec) + return codec.decode_get_search_schema(resp) + + def search(self, index, query, **kwargs): + # TODO FUTURE NUKE THIS MAPRED + if not self.pb_search(): + return self._search_mapred_emu(index, query) + if six.PY2 and isinstance(query, unicode): # noqa + query = query.encode('utf8') + msg_code = riak.pb.messages.MSG_CODE_SEARCH_QUERY_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_search(index, query, **kwargs) + resp_code, resp = self._request(msg, codec) + return codec.decode_search(resp) + + def get_counter(self, bucket, key, **kwargs): + if not bucket.bucket_type.is_default(): + raise NotImplementedError("Counters are not " + "supported with bucket-types, " + "use datatypes instead.") + if not self.counters(): + raise NotImplementedError("Counters are not supported") + msg_code = riak.pb.messages.MSG_CODE_COUNTER_GET_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_counter(bucket, key, **kwargs) + resp_code, resp = self._request(msg, codec) + if resp.HasField('value'): + return resp.value + else: + return None + + def update_counter(self, bucket, key, value, **kwargs): + if not bucket.bucket_type.is_default(): + raise NotImplementedError("Counters are not " + "supported with bucket-types, " + "use datatypes instead.") + if not self.counters(): + raise NotImplementedError("Counters are not supported") + msg_code = riak.pb.messages.MSG_CODE_COUNTER_UPDATE_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_update_counter(bucket, key, value, **kwargs) + resp_code, resp = self._request(msg, codec) + if resp.HasField('value'): + return resp.value + else: + return True + + def fetch_datatype(self, bucket, key, **kwargs): + if bucket.bucket_type.is_default(): + raise NotImplementedError("Datatypes cannot be used in the default" + " bucket-type.") + if not self.datatypes(): + raise NotImplementedError("Datatypes are not supported.") + msg_code = riak.pb.messages.MSG_CODE_DT_FETCH_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_fetch_datatype(bucket, key, **kwargs) + resp_code, resp = self._request(msg, codec) + return codec.decode_dt_fetch(resp) + + def update_datatype(self, datatype, **kwargs): + if datatype.bucket.bucket_type.is_default(): + raise NotImplementedError("Datatypes cannot be used in the default" + " bucket-type.") + if not self.datatypes(): + raise NotImplementedError("Datatypes are not supported.") + msg_code = riak.pb.messages.MSG_CODE_DT_UPDATE_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_update_datatype(datatype, **kwargs) + resp_code, resp = self._request(msg, codec) + codec.decode_update_datatype(datatype, resp, **kwargs) + return True + + def get_preflist(self, bucket, key): + """ + Get the preflist for a bucket/key + + :param bucket: Riak Bucket + :type bucket: :class:`~riak.bucket.RiakBucket` + :param key: Riak Key + :type key: string + :rtype: list of dicts + """ + if not self.preflists(): + raise NotImplementedError("fetching preflists is not supported.") + msg_code = riak.pb.messages.MSG_CODE_GET_BUCKET_KEY_PREFLIST_REQ + codec = self._get_codec(msg_code) + msg = codec.encode_get_preflist(bucket, key) + resp_code, resp = self._request(msg, codec) + return [codec.decode_preflist(item) for item in resp.preflist] + + def _request(self, msg, codec=None): + if isinstance(msg, Msg): + msg_code = msg.msg_code + data = msg.data + expect = msg.resp_code + else: + raise ValueError('expected a Msg argument') + + if not isinstance(codec, Codec): + raise ValueError('expected a Codec argument') + + resp_code, data = self._send_recv(msg_code, data) + # NB: decodes errors with msg code 0 + codec.maybe_riak_error(resp_code, data) + codec.maybe_incorrect_code(resp_code, expect) + if resp_code == MSG_CODE_TS_TTB_MSG or \ + resp_code in riak.pb.messages.MESSAGE_CLASSES: + msg = codec.parse_msg(resp_code, data) + else: + # NB: raise a BadResource to ensure this connection is + # closed and not re-used + raise BadResource('unknown msg code {}'.format(resp_code)) + return resp_code, msg diff --git a/riak/transports/transport.py b/riak/transports/transport.py index 85dcae43..258d24e8 100644 --- a/riak/transports/transport.py +++ b/riak/transports/transport.py @@ -1,33 +1,29 @@ -""" -Copyright 2010 Rusty Klophaus -Copyright 2010 Justin Sheehy -Copyright 2009 Jay Baird - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import base64 import random import threading import os import json import platform + from six import PY2 from riak.transports.feature_detect import FeatureDetection -class RiakTransport(FeatureDetection): +class Transport(FeatureDetection): """ Class to encapsulate transport details and methods. All protocol transports are subclasses of this class. @@ -72,7 +68,7 @@ def ping(self): raise NotImplementedError def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, - notfound_ok=None): + notfound_ok=None, head_only=False): """ Fetches an object. """ @@ -92,6 +88,42 @@ def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, """ raise NotImplementedError + def ts_describe(self, table): + """ + Retrieves a timeseries table description. + """ + raise NotImplementedError + + def ts_get(self, table, key): + """ + Retrieves a timeseries object. + """ + raise NotImplementedError + + def ts_put(self, tsobj): + """ + Stores a timeseries object. + """ + raise NotImplementedError + + def ts_delete(self, table, key): + """ + Deletes a timeseries object. + """ + raise NotImplementedError + + def ts_query(self, table, query, interpolations=None): + """ + Query timeseries data. + """ + raise NotImplementedError + + def ts_stream_keys(self, table, timeout=None): + """ + Streams the list of keys for the table through an iterator. + """ + raise NotImplementedError + def get_buckets(self, bucket_type=None, timeout=None): """ Gets the list of buckets as strings. @@ -172,7 +204,8 @@ def get_client_id(self): """ raise NotImplementedError - def create_search_index(self, index, schema=None, n_val=None): + def create_search_index(self, index, schema=None, n_val=None, + timeout=None): """ Creates a yokozuna search index. """ @@ -270,6 +303,13 @@ def update_datatype(self, datatype, w=None, dw=None, pw=None, """ raise NotImplementedError + def get_preflist(self, bucket, key): + """ + Fetches the preflist for a bucket/key. + """ + raise NotImplementedError + + # TODO FUTURE NUKE THIS MAPRED def _search_mapred_emu(self, index, query): """ Emulates a search request via MapReduce. Used in the case @@ -295,6 +335,7 @@ def _search_mapred_emu(self, index, query): result['docs'].append({u'id': key}) return result + # TODO FUTURE NUKE THIS MAPRED def _get_index_mapred_emu(self, bucket, index, startkey, endkey=None): """ Emulates a secondary index request via MapReduce. Used in the @@ -335,6 +376,5 @@ def _construct_mapred_json(self, inputs, query, timeout=None): def _check_bucket_types(self, bucket_type): if not self.bucket_types(): raise NotImplementedError('Server does not support bucket-types') - if bucket_type.is_default(): raise ValueError('Cannot manipulate the default bucket-type') diff --git a/riak/ts_object.py b/riak/ts_object.py new file mode 100644 index 00000000..2c7fddf5 --- /dev/null +++ b/riak/ts_object.py @@ -0,0 +1,65 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections + +from riak import RiakError +from riak.table import Table + +TsColumns = collections.namedtuple('TsColumns', ['names', 'types']) + + +class TsObject(object): + """ + The TsObject holds information about Timeseries data, plus the data + itself. + """ + def __init__(self, client, table, rows=None, columns=None): + """ + Construct a new TsObject. + + :param client: A RiakClient object. + :type client: :class:`RiakClient ` + :param table: The table for the timeseries data as a Table object. + :type table: :class:`Table` + :param rows: An list of lists with timeseries data + :type rows: list + :param columns: A TsColumns tuple. Optional + :type columns: :class:`TsColumns` + """ + + if not isinstance(table, Table): + raise ValueError('table must be an instance of Table.') + + self.client = client + self.table = table + + if rows is not None and not isinstance(rows, list): + raise RiakError("TsObject rows parameter must be a list.") + else: + self.rows = rows + + if columns is not None and \ + not isinstance(columns, TsColumns): + raise RiakError( + "TsObject columns parameter must be a TsColumns instance") + else: + self.columns = columns + + def store(self): + """ + Store the timeseries data in Riak. + :rtype: boolean + """ + return self.client.ts_put(self) diff --git a/riak/tz.py b/riak/tz.py new file mode 100644 index 00000000..fc44e32d --- /dev/null +++ b/riak/tz.py @@ -0,0 +1,33 @@ +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import tzinfo, timedelta + +ZERO = timedelta(0) + + +class UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO + + +utc = UTC() diff --git a/riak/util.py b/riak/util.py index f083a053..9101275b 100644 --- a/riak/util.py +++ b/riak/util.py @@ -1,26 +1,59 @@ -""" -Copyright 2014 Basho Technologies, Inc. - -This file is provided to you under the Apache License, -Version 2.0 (the "License"); you may not use this file -except in compliance with the License. You may obtain -a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -""" +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from __future__ import print_function + +import datetime +import sys import warnings + from collections import Mapping from six import string_types, PY2 +epoch = datetime.datetime.utcfromtimestamp(0) +try: + import pytz + epoch_tz = pytz.utc.localize(epoch) +except ImportError: + from riak.tz import utc + epoch_tz = datetime.datetime.fromtimestamp(0, tz=utc) + + +def unix_time_millis(dt): + if dt.tzinfo: + td = dt - epoch_tz + else: + td = dt - epoch + tdms = ((td.days * 24 * 3600) + td.seconds) * 1000 + ms = td.microseconds // 1000 + return tdms + ms + + +def datetime_from_unix_time_millis(ut): + if isinstance(ut, float): + raise ValueError('unix timestamp must not be a float, ' + 'it must be total milliseconds since ' + 'epoch as an integer') + utms = ut / 1000.0 + return datetime.datetime.utcfromtimestamp(utms) + + +def is_timeseries_supported(v=None): + if v is None: + v = sys.version_info + return v < (3,) or (v[:3] >= (3, 4, 4) and v[:3] != (3, 5, 0)) + def quacks_like_dict(object): """Check if object is dict-like""" @@ -48,8 +81,8 @@ def deep_merge(a, b): if key not in current_dst: current_dst[key] = current_src[key] else: - if (quacks_like_dict(current_src[key]) - and quacks_like_dict(current_dst[key])): + if (quacks_like_dict(current_src[key]) and + quacks_like_dict(current_dst[key])): stack.append((current_dst[key], current_src[key])) else: current_dst[key] = current_src[key] @@ -69,7 +102,6 @@ class lazy_property(object): memoization of an object attribute. The property should represent immutable data, as it replaces itself on first access. ''' - def __init__(self, fget): self.fget = fget self.func_name = fget.__name__ @@ -113,6 +145,6 @@ def str_to_long(value, base=10): if value is None: return None elif PY2: - return long(value, base) + return long(value, base) # noqa else: return int(value, base) diff --git a/riak_pb b/riak_pb new file mode 160000 index 00000000..cb15cc47 --- /dev/null +++ b/riak_pb @@ -0,0 +1 @@ +Subproject commit cb15cc4770f3748289ba56245d62b1c0d07c33f7 diff --git a/setup.py b/setup.py index 0935057e..37eb8da0 100755 --- a/setup.py +++ b/setup.py @@ -1,23 +1,36 @@ #!/usr/bin/env python -import platform + +import codecs +import sys + from setuptools import setup, find_packages from version import get_version -from commands import preconfigure, configure, create_bucket_types, \ - setup_security, enable_security, disable_security +from commands import setup_timeseries, build_messages + +install_requires = ['six >= 1.8.0', 'basho_erlastic >= 2.1.1'] +requires = ['six(>=1.8.0)', 'basho_erlastic(>= 2.1.1)'] -install_requires = ['six >= 1.8.0'] -requires = ['six(>=1.8.0)'] -if platform.python_version() < '3.0': +if sys.version_info[:3] <= (2, 7, 9): install_requires.append("pyOpenSSL >= 0.14") requires.append("pyOpenSSL(>=0.14)") - install_requires.append("riak_pb >=2.0.0") - requires.append("riak_pb(>=2.0.0)") + +if sys.version_info[:3] <= (3, 0, 0): + install_requires.append('protobuf >=2.4.1, <2.7.0') + requires.append('protobuf(>=2.4.1, <2.7.0)') else: - install_requires.append("python3_riak_pb >=2.0.0") - requires.append("python3_riak_pb(>=2.0.0)") -tests_require = [] -if platform.python_version() < '2.7': - tests_require.append("unittest2") + install_requires.append('python3_protobuf >=2.4.1, <2.6.0') + requires.append('python3_protobuf(>=2.4.1, <2.6.0)') + +with codecs.open('README.md', 'r', 'utf-8') as f: + readme_md = f.read() + +try: + import pypandoc + long_description = pypandoc.convert('README.md', 'rst') + with codecs.open('README.rst', 'w', 'utf-8') as f: + f.write(long_description) +except(IOError, ImportError): + long_description = readme_md setup( name='riak', @@ -25,9 +38,9 @@ packages=find_packages(), requires=requires, install_requires=install_requires, - tests_require=tests_require, package_data={'riak': ['erl_src/*']}, description='Python client for Riak', + long_description=long_description, zip_safe=True, options={'easy_install': {'allow_hosts': 'pypi.python.org'}}, include_package_data=True, @@ -37,14 +50,16 @@ author_email='clients@basho.com', test_suite='riak.tests.suite', url='https://github.com/basho/riak-python-client', - cmdclass={'create_bucket_types': create_bucket_types, - 'setup_security': setup_security, - 'preconfigure': preconfigure, - 'configure': configure, - 'enable_security': enable_security, - 'disable_security': disable_security}, + cmdclass={ + 'build_messages': build_messages, + 'setup_timeseries': setup_timeseries + }, classifiers=['License :: OSI Approved :: Apache Software License', 'Intended Audience :: Developers', 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Topic :: Database'] ) diff --git a/tools b/tools new file mode 160000 index 00000000..1f54803c --- /dev/null +++ b/tools @@ -0,0 +1 @@ +Subproject commit 1f54803ca7912a41a0ec47c0028c259b97475e1f diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..f411b799 --- /dev/null +++ b/tox.ini @@ -0,0 +1,14 @@ +# Tox (http://tox.testrun.org/) is a tool for running tests +# in multiple virtualenvs. This configuration file will run the +# test suite on all supported python versions. + +[tox] +envlist = py2, py3 + +[testenv] +install_command = pip install --upgrade {packages} +commands = {envpython} setup.py test +deps = + pip + pytz +passenv = RUN_* SKIP_* RIAK_* diff --git a/version.py b/version.py index 90f856a0..ca6a019c 100644 --- a/version.py +++ b/version.py @@ -1,4 +1,16 @@ -# This program is placed into the public domain. +# Copyright 2010-present Basho Technologies, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Gets the current version number. @@ -16,9 +28,6 @@ """ from __future__ import print_function - -__all__ = ['get_version'] - from os.path import dirname, isdir, join import re from subprocess import CalledProcessError, Popen, PIPE @@ -62,6 +71,8 @@ def check_output(*popenargs, **kwargs): version_re = re.compile('^Version: (.+)$', re.M) +__all__ = ['get_version'] + def get_version(): d = dirname(__file__) @@ -81,7 +92,8 @@ def get_version(): else: # Extract the version from the PKG-INFO file. - with open(join(d, 'PKG-INFO')) as f: + import codecs + with codecs.open(join(d, 'PKG-INFO'), 'r', 'utf-8') as f: version = version_re.search(f.read()).group(1) return version