diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..7af8a4f3d --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "maven" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml new file mode 100644 index 000000000..8571e5738 --- /dev/null +++ b/.github/workflows/master.yml @@ -0,0 +1,49 @@ +name: master + +on: + push: + branches: + - master + +jobs: + build: + runs-on: ubuntu-20.04 + + strategy: + matrix: + jdk: [3-openjdk-17-slim, 3-jdk-14, 3-jdk-8-slim] + influxdb: ['1.1', '1.6', '1.8', '2.3', '2.4', '2.5'] + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Build project + env: + MAVEN_JAVA_VERSION: "${{ matrix.jdk }}" + INFLUXDB_VERSION: "${{ matrix.influxdb }}" + run: ./compile-and-test.sh + + - name: codecov + run: | + sudo apt-get update + sudo apt-get install gpg libdigest-sha-perl -y + curl -Os https://uploader.codecov.io/latest/linux/codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM + curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig + curl -s https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import + gpgv codecov.SHA256SUM.sig codecov.SHA256SUM + shasum -a 256 -c codecov.SHA256SUM + chmod +x ./codecov + ./codecov + if: matrix.influxdb != '2.3' && matrix.influxdb != '2.4' && matrix.influxdb != '2.5' + + + # deploy: + # runs-on: ubuntu-20.04 + + # steps: + # - name: deploy snapshot + # env: + # secure: dAJK41xM2dN3q3xJMqAOP6uvrOvpjjUzmHr8mYNyepER8Lpms9/GqVUxqJv12wzCBqv1XZk/CXxrv3iBc2XjlxlrzIJGQChTinwDEigv0BMl/Gh0821ja7gwzMEUmg9f79m5tJxIFQ306cWz1gyRDqM3fLzskvM2ayzvynsNc/w= + # run: ./deploy-snapshot.sh diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 000000000..2a7b043e7 --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,43 @@ +name: Build from pull request + +on: + pull_request: + branches: + - master + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + matrix: + jdk: [3-openjdk-17-slim, 3-jdk-14, 3-jdk-8-slim] + influxdb: ['1.1', '1.6', '1.8', '2.3', '2.4', '2.5'] + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Figure out if running fork PR + id: fork + run: '["${{ secrets.DOCKER_REGISTRY_TOKEN }}" == ""] && echo "::set-output name=is_fork_pr::true" || echo "::set-output name=is_fork_pr::false"' + + - name: Build project + env: + MAVEN_JAVA_VERSION: "${{ matrix.jdk }}" + INFLUXDB_VERSION: "${{ matrix.influxdb }}" + run: ./compile-and-test.sh + + - name: codecov + run: | + sudo apt-get update + sudo apt-get install gpg libdigest-sha-perl -y + curl -Os https://uploader.codecov.io/latest/linux/codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM + curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig + curl -s https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import + gpgv codecov.SHA256SUM.sig codecov.SHA256SUM + shasum -a 256 -c codecov.SHA256SUM + chmod +x ./codecov + ./codecov + if: matrix.influxdb != '2.3' && matrix.influxdb != '2.4' && matrix.influxdb != '2.5' diff --git a/.gitignore b/.gitignore index 4bfa435bc..eac1a868b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ target/ test-output/ .idea/ *iml +.m2/ +.checkstyle diff --git a/.maven-settings.xml b/.maven-settings.xml new file mode 100644 index 000000000..864a9ec04 --- /dev/null +++ b/.maven-settings.xml @@ -0,0 +1,27 @@ + + + + + ossrh + ${env.SONATYPE_USERNAME} + ${env.SONATYPE_PASSWORD} + + + + + + ossrh + + true + + + ${env.GPG_EXECUTABLE} + ${env.GPG_PASSPHRASE} + + + + + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 434bf711d..000000000 --- a/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: java -sudo: required - -jdk: - - oraclejdk8 - -addons: - apt: - packages: - - oracle-java8-installer # Updates JDK 8 to the latest available. - -services: - - docker -script: ./compile-and-test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) - -after_failure: - - cat target/surefire-reports/*.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index e593f0a49..0cdc03965 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,83 +1,287 @@ -## v2.6 [unreleased] +# Changelog -#### Features +## 2.25 [2025-03-26] - - Switch to Java 1.8 - - Support chunking - - Add a databaseExists method to InfluxDB interface - - [Issue #289] (https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`. - - Add a listener to notify asynchronous errors during batch flushes (https://github.com/influxdata/influxdb-java/pull/318). +### Improvements +- Add support for parameter binding to built queries [PR #1010](https://github.com/influxdata/influxdb-java/pull/1010) -#### Fixes +## 2.24 [2023-12-14] - - [Issue #263] (https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface. +### Improvements +- `allFields` mode to Measurement annotation [PR #972](https://github.com/influxdata/influxdb-java/pull/972) +- Support generic POJO super classes [PR #980](https://github.com/influxdata/influxdb-java/pull/980) -#### Improvements +## 2.23 [2022-07-07] - - Update retrofit from 2.1 to 2.2 - - Update slf4j from 1.7.22 to 1.7.24 - - Update okhttp3 from 3.5 to 3.6 - - automatically adjust batch processor capacity [PR #282] +### Improvements +- Add implementation information to `Jar` manifest [PR #847](https://github.com/influxdata/influxdb-java/pull/847) + +### Fixes +- Only the request to /write endpoint should be compressed by GZIP [PR #851](https://github.com/influxdata/influxdb-java/pull/851) + +## 2.22 [2021-09-17] + +### Improvements + +- `POST` query variants serializes `'q'` parameter into HTTP body [PR #765](https://github.com/influxdata/influxdb-java/pull/765) + +## 2.21 [2020-12-04] + +### Fixes + +- Binary compatibility with old version [PR #692](https://github.com/influxdata/influxdb-java/pull/692) +- Wrong statement in manual [PR #695](https://github.com/influxdata/influxdb-java/pull/695) + +## 2.20 [2020-08-14] + +### Features +- Add an option in `BatchOption` to prevent `InfluxDB#write` from blocking when actions queue is exhausted. [Issue #668](https://github.com/influxdata/influxdb-java/issues/688) +- Added new signature to InfluxDBMapper.query() with params final Query query, final Class clazz, final String measurementName to leverage InfluxDBResultMapper.toPojo method with identical signature. + +### Improvements + +- Test: Added test for new InfluxDBMapper.query() signature, as well as test for existing InfluxDBMapper.query(Class clazz) signature (previously only InfluxDBMapper.query(Query query, Class clazz) was tested). + +## 2.19 [2020-05-18] + +## 2.18 [2020-04-17] + +### Fixes + +- Update to okhttp 4.x [PR #644](https://github.com/influxdata/influxdb-java/pull/644) + +## 2.17 [2019-12-06] + +### Fixes + +- Fixed runtime exception propagation in chunked query [Issue #639](https://github.com/influxdata/influxdb-java/issues/639) + +## 2.16 [2019-10-25] + +### Fixes + +- Add new annotation called TimeColumn for timestamp field in POJO bean, this can set Point time and precision field correctly, also avoid UnableToParseException when flush Point to influx. +- Skip fields with NaN and infinity values when writing to InfluxDB + [Issue #614](https://github.com/influxdata/influxdb-java/issues/614) + +## 2.15 [2019-02-22] + +### Fixes + +- Close underlying OkHttpClient when closing [Issue #359](https://github.com/influxdata/influxdb-java/issues/359) +- Update OkHttp to 3.13.1 which disables TLSv1 and TLSv1.1 by default, if still required you can enable them: + +```java +OkHttpClient client = new OkHttpClient.Builder() + .connectionSpecs(Arrays.asList(ConnectionSpec.COMPATIBLE_TLS)) + .build(); +``` + +### Features + +- Query and BatchPoints do not mandate a database name, in which case the InfluxDB database + would be used [Issue #548](https://github.com/influxdata/influxdb-java/issues/548) +- Add BatchPoints.Builder.points(Collection) + [Issue #451](https://github.com/influxdata/influxdb-java/issues/451) +- @Column supports class inheritance + [Issue #367](https://github.com/influxdata/influxdb-java/issues/367) +- BatchOptions to have .precision() + [Issue #532](https://github.com/influxdata/influxdb-java/issues/532) +- Point.Builder.addFieldsFromPOJO to add Column fields from super class + [Issue #613](https://github.com/influxdata/influxdb-java/issues/613) + +## 2.14 [2018-10-12] + +### Fixes + +- Fixed chunked query exception handling [Issue #523](https://github.com/influxdata/influxdb-java/issues/523) +- Memory leak in StringBuilder cache for Point.lineprotocol() [Issue #526](https://github.com/influxdata/influxdb-java/issues/521) + +## 2.13 [2018-09-12] + +### Fixes +- MessagePack queries: Exception during parsing InfluxDB version [macOS] [PR #487](https://github.com/influxdata/influxdb-java/issues/487) +- The InfluxDBResultMapper is able to handle results with a different time precision [PR #501](https://github.com/influxdata/influxdb-java/pull/501) +- UDP target host address is cached [PR #502](https://github.com/influxdata/influxdb-java/issues/502) +- Error messages from server not parsed correctly when using msgpack [PR #506](https://github.com/influxdata/influxdb-java/issues/506) +- Response body must be closed properly in case of JSON response [PR #514](https://github.com/influxdata/influxdb-java/issues/514) +- Time is serialized not consistently in MsgPack and Json, missing millis and nanos in MsgPack[PR #517](https://github.com/influxdata/influxdb-java/issues/517) + +### Features + +- Support for Basic Authentication [PR #492](https://github.com/influxdata/influxdb-java/pull/492) +- Added possibility to reuse client as a core part of [influxdb-java-reactive](https://github.com/bonitoo-io/influxdb-java-reactive) client [PR #493](https://github.com/influxdata/influxdb-java/pull/493) +- Retry capability for writing of BatchPoints [PR #503](https://github.com/influxdata/influxdb-java/issues/503) +- Added `BiConsumer` with capability to discontinue a streaming query [Issue #515](https://github.com/influxdata/influxdb-java/issues/515) +- Added `onComplete` action that is invoked after successfully end of streaming query [Issue #515](https://github.com/influxdata/influxdb-java/issues/515) + +## 2.12 [2018-07-31] + +### Fixes + +- Remove code which checks for unsupported influxdb versions [PR #474](https://github.com/influxdata/influxdb-java/pull/474) +- Unpredictable errors when OkHttpClient.Builder instance is reused [PR #478](https://github.com/influxdata/influxdb-java/pull/478) + +### Features + +- Support for MessagePack [PR #471](https://github.com/influxdata/influxdb-java/pull/471) +- Cache version per influxdb instance and reduce ping() calls for every query call [PR #472](https://github.com/influxdata/influxdb-java/pull/472) +- FAQ list for influxdb-java [PR #475](https://github.com/influxdata/influxdb-java/pull/475) + +### Improvements + +- Test: Unit test to ensure tags should be sorted by key in line protocol (to reduce db server overheads) [PR #476](https://github.com/influxdata/influxdb-java/pull/476) + +## 2.11 [2018-07-02] + +### Features + +- Allow write precision of TimeUnit other than Nanoseconds [PR #321](https://github.com/influxdata/influxdb-java/pull/321) +- Support dynamic measurement name in InfluxDBResultMapper [PR #423](https://github.com/influxdata/influxdb-java/pull/423) +- Debug mode which allows HTTP requests being sent to the database to be logged [PR #450](https://github.com/influxdata/influxdb-java/pull/450) +- Fix problem of connecting to the influx api with URL which does not points to the url root (e.g. localhots:80/influx-api/) [PR #400] (https://github.com/influxdata/influxdb-java/pull/400) + +## 2.10 [2018-04-26] + +### Fixes +- Fix IllegalAccessException on setting value to POJOs, InfluxDBResultMapper is now more thread-safe [PR #432](https://github.com/influxdata/influxdb-java/pull/432) + +### Features + +- Support for parameter binding in queries ("prepared statements") [PR #429](https://github.com/influxdata/influxdb-java/pull/429) +- Allow to figure out whether the Point.Builder has any field or not [PR #434](https://github.com/influxdata/influxdb-java/pull/434) + +### Improvements + +- Performance: use chained StringBuilder calls instead of single calls [PR #426](https://github.com/influxdata/influxdb-java/pull/426) +- Performance: Escape fields and keys more efficiently [PR #424](https://github.com/influxdata/influxdb-java/pull/424) +- Build: Speed up travis build [PR #435](https://github.com/influxdata/influxdb-java/pull/435) +- Test: Update junit from 5.1.0 to 5.1.1 [PR #441](https://github.com/influxdata/influxdb-java/pull/441) + +## 2.9 [2018-02-27] + +### Features + +- New extensible API to configure batching properties. [PR #409] +- New configuration property 'jitter interval' to avoid multiple clients hit the server periodically at the same time. [PR #409] +- New strategy on handling errors, client performs retries writes when server gets overloaded [PR #410] +- New exceptions give the client user easier way to classify errors reported by the server. [PR #410] + +## 2.8 [2017-12-06] + +### Fixes + +- InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345) +- InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350) + +### Features + +- API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351) +- API: add InfluxDB#query that uses callbacks + +### Improvements + +- Build: all unit and integration test are now running with jdk8 and jdk9. +- Test: migration to junit5 + +## v2.7 [2017-06-26] + +### Features + +- Simplify write() methods for use cases writing all points to same database and retention policy [PR #327](https://github.com/influxdata/influxdb-java/pull/327) +- QueryResult to Object mapper added [PR #341](https://github.com/influxdata/influxdb-java/pull/341) + +### Fixes + +- Replace RuntimeException with InfluxDBException [Issue #323](https://github.com/influxdata/influxdb-java/issues/323) + +### Improvements + +- Significant (~35%) performance improvements for write speed with less memory footprint. [PR #330](https://github.com/influxdata/influxdb-java/pull/330) +- Drop guava runtime dependency which reduces jar size from 1MB -> 49KB [PR #322](https://github.com/influxdata/influxdb-java/pull/322) + +## v2.6 [2017-06-08] + +### Features + +- Switch to Java 1.8 +- Support chunking +- Add a databaseExists method to InfluxDB interface +- [Issue #289](https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`. +- Add a listener to notify asynchronous errors during batch flushes [PR #318](https://github.com/influxdata/influxdb-java/pull/318). + +### Fixes + +- [Issue #263](https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface. + +### Improvements + +- Update retrofit from 2.1 to 2.2 +- Update slf4j from 1.7.22 to 1.7.24 +- Update okhttp3 from 3.5 to 3.6 +- automatically adjust batch processor capacity [PR #282](https://github.com/influxdata/influxdb-java/pull/282) ## v2.5 [2016-12-05] -#### Features +### Features - - Support writing by UDP protocal. - - Support gzip compress for http request body. - - Support setting thread factory for batch processor. - - Support chunking +- Support writing by UDP protocal. +- Support gzip compress for http request body. +- Support setting thread factory for batch processor. +- Support chunking -#### Fixes +### Fixes - - [Issue #162] (https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp. - - [Issue #214] (https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result. - - Write can't be always async if batch is enabled. +- [Issue #162](https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp. +- [Issue #214](https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result. +- Write can't be always async if batch is enabled. -#### Improvements +### Improvements - - Remove the limit for database name: not contain '-'. - - Support creating influxdb instance without username and password. - - Add time related util methods for converting influxdb timestamp or unix epoch time. - - correct exception type when disable batch twice. +- Remove the limit for database name: not contain '-'. +- Support creating influxdb instance without username and password. +- Add time related util methods for converting influxdb timestamp or unix epoch time. +- correct exception type when disable batch twice. ## v2.4 [2016-10-24] -#### Features - - now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp. - - in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET). +### Features + +- now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp. +- in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET). ## v2.2 [2016-04-11] -#### Features +### Features + +- Allow writing of pre-constructed line protocol strings - - Allow writing of pre-constructed line protocol strings +### Fixes -#### Fixes +- Correct escaping of database names for create and delete database actions +- Many bug fixes / improvements in general - - Correct escaping of database names for create and delete database actions - - Many bug fixes / improvements in general +### Other -#### Other - - Deprecated `field()` method in preference for `addField()` methods. +- Deprecated `field()` method in preference for `addField()` methods. ## v2.1 [2015-12-05] -#### Features +### Features - - Extensions to fluent builder classes - - Convenience methods for building Points - - Allow integer types as field values +- Extensions to fluent builder classes +- Convenience methods for building Points +- Allow integer types as field values -#### Fixes +### Fixes - - Fixed escaping of tag and field values - - Always uses nanosecond precision for time - - Uses NumberFormat class for safer formatting of large numbers. +- Fixed escaping of tag and field values +- Always uses nanosecond precision for time +- Uses NumberFormat class for safer formatting of large numbers. ## v2.0 [2015-07-17] -#### Features +### Features - Compatible with InfluxDB version 0.9+ - Support for lineprotocol @@ -89,7 +293,7 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.3 [2014-10-22] -#### Features +### Features - Compatible with InfluxDB Version up to 0.8 - API: add a InfluxDB#createDatabase(DatabaseConfiguration) to be able to create a new Database with ShardSpaces defined. @@ -102,26 +306,26 @@ No major functional changes or improvements. Mainly library updates and code str ## v1.2 [2014-06-28] -#### Features +### Features -- [Issue #2] (https://github.com/influxdb/influxdb-java/issues/2) Implement the last missing api calls ( interfaces, sync, forceCompaction, servers, shards) +- [Issue #2](https://github.com/influxdb/influxdb-java/issues/2) Implement the last missing api calls ( interfaces, sync, forceCompaction, servers, shards) - use (http://square.github.io/okhttp/, okhttp) instead of java builtin httpconnection to get failover for the http endpoint. -#### Tasks +### Tasks -- [Issue #8] (https://github.com/influxdb/influxdb-java/issues/8) Use com.github.docker-java which replaces com.kpelykh for Integration tests. -- [Issue #6] (https://github.com/influxdb/influxdb-java/issues/6) Update Retrofit to 1.6.0 -- [Issue #7] (https://github.com/influxdb/influxdb-java/issues/7) Update Guava to 17.0 +- [Issue #8](https://github.com/influxdb/influxdb-java/issues/8) Use com.github.docker-java which replaces com.kpelykh for Integration tests. +- [Issue #6](https://github.com/influxdb/influxdb-java/issues/6) Update Retrofit to 1.6.0 +- [Issue #7](https://github.com/influxdb/influxdb-java/issues/7) Update Guava to 17.0 - fix dependency to guava. ## v1.1 [2014-05-31] -#### Features +### Features - Add InfluxDB#version() to get the InfluxDB Server version information. -- Changed InfluxDB#createDatabase() to match (https://github.com/influxdb/influxdb/issues/489) without replicationFactor. +- Changed InfluxDB#createDatabase() to match [Issue #489](https://github.com/influxdb/influxdb/issues/489) without replicationFactor. - Updated Retrofit from 1.5.0 -> 1.5.1 ## v1.0 [2014-05-6] - * Initial Release +- Initial Release diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 000000000..621c1526a --- /dev/null +++ b/FAQ.md @@ -0,0 +1,173 @@ +# Frequently Asked Questions + +## Functionality + +- [Frequently Asked Questions](#frequently-asked-questions) + - [Functionality](#functionality) + - [Security](#security) + - [Is the batch part of the client thread safe](#is-the-batch-part-of-the-client-thread-safe) + - [If multiple threads are accessing it, are they all adding Points to the same batch ?](#if-multiple-threads-are-accessing-it-are-they-all-adding-points-to-the-same-batch) + - [And if so, is there a single thread in the background that is emptying batch to the server ?](#and-if-so-is-there-a-single-thread-in-the-background-that-is-emptying-batch-to-the-server) + - [If there is an error during this background process, is it propagated to the rest of the client ?](#if-there-is-an-error-during-this-background-process-is-it-propagated-to-the-rest-of-the-client) + - [How the client responds to concurrent write backpressure from server ?](#how-the-client-responds-to-concurrent-write-backpressure-from-server) + - [Is there a way to tell that all query chunks have arrived ?](#is-there-a-way-to-tell-that-all-query-chunks-have-arrived) + - [How to handle exceptions while using async chunked queries ?](#how-to-handle-exceptions-while-using-async-chunked-queries) + - [Is there a way to tell the system to stop sending more chunks once I've found what I'm looking for ?](#is-there-a-way-to-tell-the-system-to-stop-sending-more-chunks-once-ive-found-what-im-looking-for) + - [Is default config security setup TLS 1.2 ?](#is-default-config-security-setup-tls-12) + - [How to use SSL client certificate authentication](#how-to-use-ssl-client-certificate-authentication) + +## Security + +- [Is default config security setup TLS 1.2 ?](#is-default-config-security-setup-tls-12-) +- [How to use SSL client certificate authentication](#how-to-use-ssl-client-certificate-authentication-) + +## Is the batch part of the client thread safe + +Yes, the __BatchProcessor__ uses a __BlockingQueue__ and the __RetryCapableBatchWriter__ is synchronized on its __write__ method + +```java +org.influxdb.impl.RetryCapableBatchWriter.write(Collection) + +``` + +## If multiple threads are accessing it, are they all adding Points to the same batch ? + +If they share the same InfluxDbImpl instance, so the answer is Yes (all writing points are put to the __BlockingQueue__) + +## And if so, is there a single thread in the background that is emptying batch to the server ? + +Yes, there is one worker thread that is scheduled to periodically flush the __BlockingQueue__ + +## If there is an error during this background process, is it propagated to the rest of the client ? + +Yes, on initializing BatchOptions, you can pass an exceptionHandler, this handler is used to handle any batch writing that causes a non-recoverable exception or when a batch is evicted due to a retry buffer capacity +(please refer to __BatchOptions.bufferLimit(int)__ for more details) +(list of non-recoverable error : [Handling-errors-of-InfluxDB-under-high-load](https://github.com/influxdata/influxdb-java/wiki/Handling-errors-of-InfluxDB-under-high-load)) + +## How the client responds to concurrent write backpressure from server ? + +Concurrent WRITE throttling at server side is controlled by the trio (__max-concurrent-write-limit__, __max-enqueued-write-limit__, __enqueued-write-timeout__) +for example, you can have these in influxdb.conf + +```properties +max-concurrent-write-limit = 2 +max-enqueued-write-limit = 1 +enqueued-write-timeout = 1000 + +``` + +(more info at this [PR #9888 HTTP Write Throttle](https://github.com/influxdata/influxdb/pull/9888/files)) + +If the number of concurrent writes reach the threshold, then any further write will be immidiately returned with + +```bash +org.influxdb.InfluxDBIOException: java.net.SocketException: Connection reset by peer: socket write error + at org.influxdb.impl.InfluxDBImpl.execute(InfluxDBImpl.java:692) + at org.influxdb.impl.InfluxDBImpl.write(InfluxDBImpl.java:428) + +``` + +Form version 2.9, influxdb-java introduces new error handling feature, the client will try to back off and rewrite failed wites on some recoverable errors (list of recoverable error : [Handling-errors-of-InfluxDB-under-high-load](https://github.com/influxdata/influxdb-java/wiki/Handling-errors-of-InfluxDB-under-high-load)) + +So in case the number of write requests exceeds Concurrent write setting at server side, influxdb-java can try to make sure no writing points get lost (due to rejection from server) + +## Is there a way to tell that all query chunks have arrived ? + +Yes, there is __onComplete__ action that is invoked after successfully end of stream. + +```java +influxDB.query(new Query("SELECT * FROM disk", "telegraf"), 10_000, + queryResult -> { + System.out.println("result = " + queryResult); + }, + () -> { + System.out.println("The query successfully finished."); + }); +``` + +## How to handle exceptions while using async chunked queries ? + +Exception handling for chunked queries can be handled by __onFailure__ error +consumer. + +```java + +influxDB.query(query, chunksize, + //onNext result consumer + (cancellable, queryResult) -> { + System.out.println("Process queryResult - " + queryResult.toString()); + } + //onComplete executable + , () -> { + System.out.println("On Complete - the query finished successfully."); + }, + //onFailure error handler + throwable -> { + System.out.println("On Failure - " + throwable.getLocalizedMessage()); + }); +``` + +## Is there a way to tell the system to stop sending more chunks once I've found what I'm looking for ? + +Yes, there is __onNext__ bi-consumer with capability to discontinue a streaming query. + +```java +influxDB.query(new Query("SELECT * FROM disk", "telegraf"), 10_000, (cancellable, queryResult) -> { + + // found what I'm looking for ? + if (foundRequest(queryResult)) { + // yes => cancel query + cancellable.cancel(); + } + + // no => process next result + processResult(queryResult); +}); +``` + +## Is default config security setup TLS 1.2 ? + +(answer need to be verified) + +To construct an InfluxDBImpl you will need to pass a OkHttpClient.Builder instance. +At this point you are able to set your custom SSLSocketFactory via method OkHttpClient.Builder.sslSocketFactory(…) + +In case you don’t set it, OkHttp will use the system default (Java platform dependent), I tested in Java 8 (influxdb-java has CI test in Java 8 and 10) and see the default SSLContext support these protocols +SSLv3/TLSv1/TLSv1.1/TLSv1.2 + +So if the server supports TLS1.2, the communication should be encrypted by TLS 1.2 (during the handshake the client will provide the list of accepted security protocols and the server will pick one, so this case the server would pick TLS 1.2) + +## How to use SSL client certificate authentication + +To use SSL certificate authentication you need to setup `SslSocketFactory` on OkHttpClient.Builder. + +Here is the example, how to create InfluxDB client with the new SSLContext with custom identity keystore (p12) and truststore (jks): + +```java +KeyStore keyStore = KeyStore.getInstance("PKCS12"); +keyStore.load(new FileInputStream("conf/keystore.p12"), "changeme".toCharArray()); + +KeyStore trustStore = KeyStore.getInstance("JKS"); +trustStore.load(new FileInputStream("conf/trustStore.jks"), "changeme".toCharArray()); + +SSLContext sslContext = SSLContext.getInstance("SSL"); + +KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); +keyManagerFactory.init(keyStore, "changeme".toCharArray()); + +TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); +trustManagerFactory.init(trustStore); + +TrustManager[] trustManagers = trustManagerFactory.getTrustManagers(); + +sslContext.init(keyManagerFactory.getKeyManagers(), trustManagers, new SecureRandom()); +sslContext.getDefaultSSLParameters().setNeedClientAuth(true); + +OkHttpClient.Builder okhttpClientBuilder = new OkHttpClient.Builder(); +okhttpClientBuilder.sslSocketFactory(sslContext.getSocketFactory(), (X509TrustManager) trustManagers[0]); + +InfluxDB influxDB = InfluxDBFactory.connect("https://proxy_host:9086", okhttpClientBuilder); +``` + +InfluxDB (v1.6.2) does not have built-in support for client certificate ssl authentication. +SSL must be handled by http proxy such as Haproxy, nginx... diff --git a/INFLUXDB_MAPPER.md b/INFLUXDB_MAPPER.md new file mode 100644 index 000000000..6540f389d --- /dev/null +++ b/INFLUXDB_MAPPER.md @@ -0,0 +1,50 @@ +### InfluxDBMapper + +In case you want to use models only, you can use the InfluxDBMapper to save and load measurements. +You can create models that specify the database the measurement and the retention policy. + +```Java +@Measurement(name = "cpu",database="servers", retentionPolicy="autogen",timeUnit = TimeUnit.MILLISECONDS) +public class Cpu { + @Column(name = "time") + private Instant time; + @Column(name = "host", tag = true) + private String hostname; + @Column(name = "region", tag = true) + private String region; + @Column(name = "idle") + private Double idle; + @Column(name = "happydevop") + private Boolean happydevop; + @Column(name = "uptimesecs") + private Long uptimeSecs; + // getters (and setters if you need) +} +``` + +Save operation using a model. + +```Java +Cpu cpu = .., create the cpu measure +influxDBMapper.save(cpu); +``` + +Load data using a model. + +```java +Cpu persistedCpu = influxDBMapper.query(Cpu.class).get(0); +``` + +Load data using a query and specify the model for mapping. + +```java +Query query = ... create your query +List persistedMeasure = influxDBMapper.query(query,Cpu.class); +``` + +#### InfluxDBMapper limitations + +Tags are automatically converted to strings, since tags are strings to influxdb +Supported values for fields are boolean, int, long, double, Boolean, Integer, Long, Double. +The time field should be of type instant. +If you do not specify the time or set a value then the current system time shall be used with the timeunit specified. diff --git a/LICENSE b/LICENSE index 766a0a595..f21351ced 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) {{{year}}} {{{fullname}}} +Copyright (c) 2014-2017 Stefan Majer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MANUAL.md b/MANUAL.md new file mode 100644 index 000000000..99e1248b5 --- /dev/null +++ b/MANUAL.md @@ -0,0 +1,460 @@ +# Manual + +## Quick start + +The code below is similar to the one found on the README.md file but with comments removed and rows numbered for better reference. + +```Java +final String serverURL = "http://127.0.0.1:8086", username = "root", password = "root"; +final InfluxDB influxDB = InfluxDBFactory.connect(serverURL, username, password); // (1) + +String databaseName = "NOAA_water_database"; +influxDB.query(new Query("CREATE DATABASE " + databaseName)); +influxDB.setDatabase(databaseName); // (2) + +String retentionPolicyName = "one_day_only"; +influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName + + " ON " + databaseName + " DURATION 1d REPLICATION 1 DEFAULT")); +influxDB.setRetentionPolicy(retentionPolicyName); // (3) + +influxDB.enableBatch( + BatchOptions.DEFAULTS + .threadFactory(runnable -> { + Thread thread = new Thread(runnable); + thread.setDaemon(true); + return thread; + }) +); // (4) + +Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close)); // (5) + +influxDB.write(Point.measurement("h2o_feet") // (6) + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .tag("location", "santa_monica") + .addField("level description", "below 3 feet") + .addField("water_level", 2.064d) + .build()); + +influxDB.write(Point.measurement("h2o_feet") // (6) + .tag("location", "coyote_creek") + .addField("level description", "between 6 and 9 feet") + .addField("water_level", 8.12d) + .build()); + +Thread.sleep(5_000L); + +QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet")); // (7) + +System.out.println(queryResult); +// It will print something like: +// QueryResult [results=[Result [series=[Series [name=h2o_feet, tags=null, +// columns=[time, level description, location, water_level], +// values=[ +// [2020-03-22T20:50:12.929Z, below 3 feet, santa_monica, 2.064], +// [2020-03-22T20:50:12.929Z, between 6 and 9 feet, coyote_creek, 8.12] +// ]]], error=null]], error=null] +``` + +### Connecting to InfluxDB + +(1) The `InfluxDB` client is thread-safe and our recommendation is to have a single instance per application and reuse it, when possible. Every `InfluxDB` instance keeps multiple data structures, including those used to manage different pools like HTTP clients for reads and writes. + +It's possible to have just one client even when reading or writing to multiple InfluxDB databases and this will be shown later here. + +### Setting a default database (optional) + +(2) If you are not querying different databases with a single `InfluxDB` client, it's possible to set a default database name and all queries (reads and writes) from this `InfluxDB` client will be executed against the default database. + +If we only comment out the line (2) then all reads and writes queries would fail. To avoid this, we need to pass the database name as parameter to `BatchPoints` (writes) and to `Query` (reads). For example: + +```Java +// ... +String databaseName = "NOAA_water_database"; +// influxDB.setDatabase() won't be called... +String retentionPolicyName = "one_day_only"; +// ... + +BatchPoints batchPoints = BatchPoints.database(databaseName).retentionPolicy(retentionPolicyName).build(); + +batchPoints.point(Point.measurement("h2o_feet") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .tag("location", "santa_monica") + .addField("level description", "below 3 feet") + .addField("water_level", 2.064d) + .build()); + +// ... +influxDB.write(batchPoints); +// ... +QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet", databaseName)); +// ... +influxDB.close(); +``` + +It's possible to use both approaches at the same time: set a default database using `influxDB.setDatabase` and read/write passing a `databaseName` as parameter. On this case, the `databaseName` passed as parameter will be used. + +### Setting a default retention policy (optional) + +(3) TODO: like setting a default database, explain here how it works with RP. + +### Enabling batch writes + +(4) TODO: explanation about BatchOption parameters: + +```Java + // default values here are consistent with Telegraf + public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; + public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; + public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + public static final int DEFAULT_BUFFER_LIMIT = 10000; + public static final TimeUnit DEFAULT_PRECISION = TimeUnit.NANOSECONDS; + public static final boolean DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION = false; +``` +#### Configuring behaviour of batch writes when the action queue exhausts +With batching enabled, the client provides two options on how to deal with **action queue** (where the points are accumulated as a batch) exhaustion. +1. When `dropActionsOnQueueExhaustion` is `false` (default value), `InfluxDB#write` will be blocked till the space is created in the action queue. +2. When `dropActionsOnQueueExhaustion` is `true`, new writes using `InfluxDB#write` will dropped and `droppedActionHandler` will be called. + Example usage: + ```Java + influxDB.enableBatch(BatchOptions.DEFAULTS.dropActionsOnQueueExhaustion(true) + .droppedActionHandler((point) -> log.error("Point dropped due to action queue exhaustion."))); + ``` + + +#### Configuring the jitter interval for batch writes + +When using large number of influxdb-java clients against a single server it may happen that all the clients +will submit their buffered points at the same time and possibly overloading the server. This is usually happening +when all the clients are started at once - for instance as members of cloud hosted large cluster networks. +If all the clients have the same flushDuration set this situation will repeat periodically. + +To solve this situation the influxdb-java offers an option to offset the flushDuration by a random interval so that +the clients will flush their buffers in different intervals: + +```Java +influxDB.enableBatch(BatchOptions.DEFAULTS.jitterDuration(500)); +``` + +#### Error handling with batch writes + +With batching enabled the client provides two strategies how to deal with errors thrown by the InfluxDB server. + + 1. 'One shot' write - on failed write request to InfluxDB server an error is reported to the client using the means mentioned above. + 2. 'Retry on error' write (used by default) - on failed write the request by the client is repeated after batchInterval elapses (if there is a chance the write will succeed - the error was caused by overloading the server, a network error etc.) + When new data points are written before the previous (failed) points are successfully written, those are queued inside the client and wait until older data points are successfully written. + Size of this queue is limited and configured by `BatchOptions.bufferLimit` property. When the limit is reached, the oldest points in the queue are dropped. 'Retry on error' strategy is used when individual write batch size defined by `BatchOptions.actions` is lower than `BatchOptions.bufferLimit`. + +#### Ensure application exit when batching is enabled +`BatchOptions.DEFAULTS` creates a non-daemon thread pool which prevents the JVM from initiating shutdown in the case of +exceptions or successful completion of the main thread. This will prevent shutdown hooks (many frameworks and plain JVM +applications use these to close/ cleanup resources) from running, preventing graceful termination of the application. + +Thus, configuring batch options with a daemon thread pool will solve this issue and will for example ensure that the registered +(5) shutdown hook is run to close the `InfluxDB` client properly (flushing and closing of resources will happen). + +### Close InfluxDB Client on JVM Termination +(5) In order to ensure that in-flight points are flushed and resources are released properly, it is essential to call +`influxDB.close()` the client when it is no longer required. + +Registering a shutdown hook is a good way to ensure that this is done on application termination regardless of exceptions +that are thrown in the main thread of the code. Note that if you are using a framework, do check the documentation for its +way of configuring shutdown lifecycle hooks or if it might already be calling `close` automatically. + + +### Writing to InfluxDB + +(6) ... + +`----8<----BEGIN DRAFT----8<----` + +Any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level. +If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method: + +```Java +influxDB.enableBatch(BatchOptions.DEFAULTS.exceptionHandler( + (failedPoints, throwable) -> { /* custom error handling here */ }) +); +``` + +`----8<----END DRAFT----8<----` + +#### Writing synchronously to InfluxDB (not recommended) + +If you want to write the data points synchronously to InfluxDB and handle the errors (as they may happen) with every write: + +`----8<----BEGIN DRAFT----8<----` + +```Java +InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); +String dbName = "aTimeSeries"; +influxDB.query(new Query("CREATE DATABASE " + dbName)); +String rpName = "aRetentionPolicy"; +influxDB.query(new Query("CREATE RETENTION POLICY " + rpName + " ON " + dbName + " DURATION 30h REPLICATION 2 DEFAULT")); + +BatchPoints batchPoints = BatchPoints + .database(dbName) + .tag("async", "true") + .retentionPolicy(rpName) + .consistency(ConsistencyLevel.ALL) + .build(); +Point point1 = Point.measurement("cpu") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("idle", 90L) + .addField("user", 9L) + .addField("system", 1L) + .build(); +Point point2 = Point.measurement("disk") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .addField("used", 80L) + .addField("free", 1L) + .build(); +batchPoints.point(point1); +batchPoints.point(point2); +influxDB.write(batchPoints); +Query query = new Query("SELECT idle FROM cpu", dbName); +influxDB.query(query); +influxDB.query(new Query("DROP RETENTION POLICY " + rpName + " ON " + dbName)); +influxDB.query(new Query("DROP DATABASE " + dbName)); +``` + +`----8<----END DRAFT----8<----` + +### Reading from InfluxDB + +(7) ... + +#### Query using Callbacks + +influxdb-java now supports returning results of a query via callbacks. Only one +of the following consumers are going to be called once : + +```Java +this.influxDB.query(new Query("SELECT idle FROM cpu", dbName), queryResult -> { + // Do something with the result... +}, throwable -> { + // Do something with the error... +}); +``` + +#### Query using parameter binding (a.k.a. "prepared statements") + +If your Query is based on user input, it is good practice to use parameter binding to avoid [injection attacks](https://en.wikipedia.org/wiki/SQL_injection). +You can create queries with parameter binding with the help of the QueryBuilder: + +```Java +Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE idle > $idle AND system > $system") + .forDatabase(dbName) + .bind("idle", 90) + .bind("system", 5) + .create(); +QueryResult results = influxDB.query(query); +``` + +The values of the bind() calls are bound to the placeholders in the query ($idle, $system). + +## Advanced Usage + +### Gzip's support + +influxdb-java client doesn't enable gzip compress for http request body by default. If you want to enable gzip to reduce transfer data's size , you can call: + +```Java +influxDB.enableGzip() +``` + +### UDP's support + +influxdb-java client support udp protocol now. you can call following methods directly to write through UDP. + +```Java +public void write(final int udpPort, final String records); +public void write(final int udpPort, final List records); +public void write(final int udpPort, final Point point); +``` + +Note: make sure write content's total size should not > UDP protocol's limit(64K), or you should use http instead of udp. + +### Chunking support + +influxdb-java client now supports influxdb chunking. The following example uses a chunkSize of 20 and invokes the specified Consumer (e.g. System.out.println) for each received QueryResult + +```Java +Query query = new Query("SELECT idle FROM cpu", dbName); +influxDB.query(query, 20, queryResult -> System.out.println(queryResult)); +``` + +### QueryResult mapper to POJO + +An alternative way to handle the QueryResult object is now available. +Supposing that you have a measurement _CPU_: + +```sql +> INSERT cpu,host=serverA,region=us_west idle=0.64,happydevop=false,uptimesecs=123456789i +> +> select * from cpu +name: cpu +time happydevop host idle region uptimesecs +---- ---------- ---- ---- ------ ---------- +2017-06-20T15:32:46.202829088Z false serverA 0.64 us_west 123456789 +``` + +And the following tag keys: + +```sql +> show tag keys from cpu +name: cpu +tagKey +------ +host +region +``` + +1. Create a POJO to represent your measurement. For example: + +```Java +public class Cpu { + private Instant time; + private String hostname; + private String region; + private Double idle; + private Boolean happydevop; + private Long uptimeSecs; + // getters (and setters if you need) +} +``` + +2. Add @Measurement, @TimeColumn and @Column annotations (column names default to field names unless otherwise specified): + +```Java +@Measurement(name = "cpu") +public class Cpu { + @TimeColumn + @Column + private Instant time; + @Column(name = "host", tag = true) + private String hostname; + @Column(tag = true) + private String region; + @Column + private Double idle; + @Column + private Boolean happydevop; + @Column(name = "uptimesecs") + private Long uptimeSecs; + // getters (and setters if you need) +} +``` + +Alternatively, you can use: + +```Java +@Measurement(name = "cpu", allFields = true) +public class Cpu { + @TimeColumn + private Instant time; + @Column(name = "host", tag = true) + private String hostname; + @Column(tag = true) + private String region; + private Double idle; + private Boolean happydevop; + @Column(name = "uptimesecs") + private Long uptimeSecs; + // getters (and setters if you need) +} +``` + +Or (if you're on JDK14+ and/or [Android SDK34+](https://android-developers.googleblog.com/2023/06/records-in-android-studio-flamingo.html)): + +```Java +@Measurement(name = "cpu", allFields = true) +public record Cpu( + @TimeColumn + Instant time, + @Column(name = "host", tag = true) + String hostname, + @Column(tag = true) + String region, + Double idle, + Boolean happydevop, + @Column(name = "uptimesecs") + Long uptimeSecs +) {} +``` + +3. Call _InfluxDBResultMapper.toPOJO(...)_ to map the QueryResult to your POJO: + +```java +InfluxDB influxDB = InfluxDBFactory.connect("http://localhost:8086", "root", "root"); +String dbName = "myTimeseries"; +QueryResult queryResult = influxDB.query(new Query("SELECT * FROM cpu", dbName)); + +InfluxDBResultMapper resultMapper = new InfluxDBResultMapper(); // thread-safe - can be reused +List cpuList = resultMapper.toPOJO(queryResult, Cpu.class); +``` + +### Writing using POJO + +The same way we use `annotations` to transform data to POJO, we can write data as POJO. +Having the same POJO class Cpu + +```java +String dbName = "myTimeseries"; +String rpName = "aRetentionPolicy"; +// Cpu has annotations @Measurement,@TimeColumn and @Column +Cpu cpu = new Cpu(); +// ... setting data + +Point point = Point.measurementByPOJO(cpu.getClass()).addFieldsFromPOJO(cpu).build(); + +influxDB.write(dbName, rpName, point); +``` + +#### QueryResult mapper limitations + +* If your InfluxDB query contains multiple SELECT clauses, you will have to call InfluxResultMapper#toPOJO() multiple times to map every measurement returned by QueryResult to the respective POJO; +* If your InfluxDB query contains multiple SELECT clauses **for the same measurement**, InfluxResultMapper will process all results because there is no way to distinguish which one should be mapped to your POJO. It may result in an invalid collection being returned; +* A Class field annotated with _@Column(..., tag = true)_ (i.e. a [InfluxDB Tag](https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value)) must be declared as _String_. + +#### QueryBuilder + +An alternative way to create InfluxDB queries is available. By using the [QueryBuilder](QUERY_BUILDER.md) you can create queries using java instead of providing the influxdb queries as strings. + +#### Generic POJO super classes + +POJO classes can have generic super classes, for cases where multiple measurements have a similar structure, and differ by type(s), as in: + +```java +public class SuperMeasurement { + @Column + @TimeColumn + private Instant time; + @Column + T value; + // Other common columns and tags +} + +public class SubMeasurement extends SuperMeasurement { + // Any specific columns and tags +} +``` + +### InfluxDBMapper + +In case you want to save and load data using models you can use the [InfluxDBMapper](INFLUXDB_MAPPER.md). + +### Other Usages + +For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") + +### Publishing + +This is a +[link](https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide) +to the sonatype oss guide to publishing. I'll update this section once +the [jira ticket](https://issues.sonatype.org/browse/OSSRH-9728) is +closed and I'm able to upload artifacts to the sonatype repositories. + +### Frequently Asked Questions + +This is a [FAQ](FAQ.md) list for influxdb-java. diff --git a/QUERY_BUILDER.md b/QUERY_BUILDER.md new file mode 100644 index 000000000..d84e6d255 --- /dev/null +++ b/QUERY_BUILDER.md @@ -0,0 +1,606 @@ +# QueryBuilder + +Supposing that you have a measurement _h2o_feet_: + +```sqlite-psql +> SELECT * FROM "h2o_feet" + +name: h2o_feet +-------------- +time level description location water_level +2015-08-18T00:00:00Z below 3 feet santa_monica 2.064 +2015-08-18T00:00:00Z between 6 and 9 feet coyote_creek 8.12 +[...] +2015-09-18T21:36:00Z between 3 and 6 feet santa_monica 5.066 +2015-09-18T21:42:00Z between 3 and 6 feet santa_monica 4.938 +``` + +## The basic SELECT statement + +Issue simple select statements + +```java +Query query = select().from(DATABASE,"h2o_feet"); +``` + +```sqlite-psql +SELECT * FROM "h2o_feet" +``` + +Select specific tags and fields from a single measurement + +```java +Query query = select("level description","location","water_level").from(DATABASE,"h2o_feet"); +``` + +```sqlite-psql +SELECT "level description",location,water_level FROM h2o_feet; +``` + +Select specific tags and fields from a single measurement, and provide their identifier type + +```java +Query query = select().column("\"level description\"::field").column("\"location\"::tag").column("\"water_level\"::field").from(DATABASE,"h2o_feet"); +``` + +```sqlite-psql +SELECT "level description"::field,"location"::tag,"water_level"::field FROM h2o_feet; +``` + +Select all fields from a single measurement + +```java +Query query = select().raw("*::field").from(DATABASE,"h2o_feet"); +``` + +```sqlite-psql +SELECT *::field FROM h2o_feet; +``` + +Select a specific field from a measurement and perform basic arithmetic + +```java +Query query = select().op(op(cop("water_level",MUL,2),"+",4)).from(DATABASE,"h2o_feet"); +``` + +```sqlite-psql +SELECT (water_level * 2) + 4 FROM h2o_feet; +``` + +Select all data from more than one measurement + +```java +Query query = select().from(DATABASE,"\"h2o_feet\",\"h2o_pH\""); +``` + +```sqlite-psql +SELECT * FROM "h2o_feet","h2o_pH"; +``` + +Select all data from a fully qualified measurement + +```java +Query query = select().from(DATABASE,"\"NOAA_water_database\".\"autogen\".\"h2o_feet\""); +``` + +```sqlite-psql +SELECT * FROM "NOAA_water_database"."autogen"."h2o_feet"; +``` + +Select data that have specific field key-values + +```java +Query query = select().from(DATABASE,"h2o_feet").where(gt("water_level",8)); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE water_level > 8; +``` + +Select data that have a specific string field key-value + +```java +Query query = select().from(DATABASE,"h2o_feet").where(eq("level description","below 3 feet")); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE "level description" = 'below 3 feet'; +``` + +Select data that have a specific field key-value and perform basic arithmetic + +```java +Query query = select().from(DATABASE,"h2o_feet").where(gt(cop("water_level",ADD,2),11.9)); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE (water_level + 2) > 11.9; +``` + +Select data that have a specific tag key-value + +```java +Query query = select().column("water_level").from(DATABASE,"h2o_feet").where(eq("location","santa_monica")); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica'; +``` + +Select data that have specific field key-values and tag key-values + +```java +Query query = select().column("water_level").from(DATABASE,"h2o_feet") + .where(neq("location","santa_monica")) + .andNested() + .and(lt("water_level",-0.59)) + .or(gt("water_level",9.95)) + .close(); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location <> 'santa_monica' AND (water_level < -0.59 OR water_level > 9.95); +``` + +Select data that have specific timestamps + +```java +Query query = select().from(DATABASE,"h2o_feet") + .where(gt("time",subTime(7,DAY))); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE time > now() - 7d; +``` + +## The GROUP BY clause + +Group query results by a single tag + +```java +Query query = select().mean("water_level").from(DATABASE,"h2o_feet") .groupBy("location"); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet GROUP BY location; +``` + +Group query results by more than one tag + +```java +Query query = select().mean("index").from(DATABASE,"h2o_feet") + .groupBy("location","randtag"); +``` + +```sqlite-psql +SELECT MEAN(index) FROM h2o_feet GROUP BY location,randtag; +``` + +Group query results by all tags + +```java +Query query = select().mean("index").from(DATABASE,"h2o_feet") + .groupBy(raw("*")); +``` + +```sqlite-psql +SELECT MEAN(index) FROM h2o_feet GROUP BY *; +``` + +## GROUP BY time interval + +Group query results into 12 minute intervals + +```java +Query query = select().count("water_level").from(DATABASE,"h2o_feet") + .where(eq("location","coyote_creek")) + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:30:00Z'")) + .groupBy(time(12l,MINUTE)); +``` + +```sqlite-psql +SELECT COUNT(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z'' GROUP BY time(12m); +``` + +Group query results into 12 minutes intervals and by a tag key + +```java + Query query = select().count("water_level").from(DATABASE,"h2o_feet") + .where() + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:30:00Z'")) + .groupBy(time(12l,MINUTE),"location"); +``` + +```sqlite-psql +SELECT COUNT(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z'' GROUP BY time(12m),location; +``` + +## Advanced GROUP BY time() syntax + +Group query results into 18 minute intervals and shift the preset time boundaries forward + +```java +Query query = select().mean("water_level").from(DATABASE,"h2o_feet") + .where(eq("location","coyote_creek")) + .and(gte("time","2015-08-18T00:06:00Z")) + .and(lte("time","2015-08-18T00:54:00Z")) + .groupBy(time(18l,MINUTE,6l,MINUTE)); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:06:00Z' AND time <= '2015-08-18T00:54:00Z' GROUP BY time(18m,6m); +``` + +Group query results into 12 minute intervals and shift the preset time boundaries back + +```java +Query query = select().mean("water_level").from(DATABASE,"h2o_feet") + .where(eq("location","coyote_creek")) + .and(gte("time","2015-08-18T00:06:00Z")) + .and(lte("time","2015-08-18T00:54:00Z")) + .groupBy(time(18l,MINUTE,-12l,MINUTE)); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:06:00Z' AND time <= '2015-08-18T00:54:00Z' GROUP BY time(18m,-12m); +``` + +## GROUP BY time intervals and fill() + +```java +Query select = select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op(ti(24043524l, MINUTE), SUB, ti(6l, MINUTE)))) + .groupBy("water_level") + .fill(100); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE time > 24043524m - 6m GROUP BY water_level fill(100);" +``` + +## The INTO clause + +Rename a database + +```java +Query select = select() + .into("\"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT") + .from(DATABASE, "\"NOAA_water_database\".\"autogen\"./.*/") + .groupBy(new RawText("*")); +``` + +```sqlite-psql +SELECT * INTO "copy_NOAA_water_database"."autogen".:MEASUREMENT FROM "NOAA_water_database"."autogen"./.*/ GROUP BY *; +``` + +Write the results of a query to a measurement + +```java +Query select = select().column("water_level").into("h2o_feet_copy_1").from(DATABASE,"h2o_feet").where(eq("location","coyote_creek")); +``` + +```sqlite-psql +SELECT water_level INTO h2o_feet_copy_1 FROM h2o_feet WHERE location = 'coyote_creek'; +``` + +Write aggregated results to a measurement + +```java +Query select = select() + .mean("water_level") + .into("all_my_averages") + .from(DATABASE,"h2o_feet") + .where(eq("location","coyote_creek")) + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:30:00Z")) + .groupBy(time(12l,MINUTE)); +``` + +```sqlite-psql +SELECT MEAN(water_level) INTO all_my_averages FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z' GROUP BY time(12m); +``` + +Write aggregated results for more than one measurement to a different database (downsampling with backreferencing) + +```java +Query select = select() + .mean(raw("*")) + .into("\"where_else\".\"autogen\".:MEASUREMENT") + .fromRaw(DATABASE, "/.*/") + .where(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:06:00Z")) + .groupBy(time(12l,MINUTE)); +``` + +```sqlite-psql +SELECT MEAN(*) INTO "where_else"."autogen".:MEASUREMENT FROM /.*/ WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:06:00Z' GROUP BY time(12m); +``` + +## ORDER BY time DESC + +Return the newest points first + +```java +Query select = select().from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .orderBy(desc()); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE location = 'santa_monica' ORDER BY time DESC; +``` + +Return the newest points first and include a GROUP BY time() clause + +```java +Query select = select().mean("water_level") + .from(DATABASE,"h2o_feet") + .where(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:42:00Z")) + .groupBy(time(12l,MINUTE)) + .orderBy(desc()); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY time(12m) ORDER BY time DESC; +``` + +## The LIMIT clause + +Limit the number of points returned + +```java +Query select = select("water_level","location") + .from(DATABASE,"h2o_feet").limit(3); +``` + +```sqlite-psql +SELECT water_level,location FROM h2o_feet LIMIT 3; +``` + +Limit the number points returned and include a GROUP BY clause + +```java +Query select = select().mean("water_level") + .from(DATABASE,"h2o_feet") + .where() + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:42:00Z")) + .groupBy(raw("*"),time(12l,MINUTE)) + .limit(2); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) LIMIT 2; +``` + +## The SLIMIT clause + +Limit the number of series returned + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_fleet") + .groupBy(raw("*")) + .sLimit(1); +``` + +```sqlite-psql +SELECT water_level FROM "h2o_feet" GROUP BY * SLIMIT 1 +``` + +Limit the number of series returned and include a GROUP BY time() clause + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where() + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:42:00Z")) + .groupBy(raw("*"),time(12l,MINUTE)) + .sLimit(1); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) SLIMIT 1; +``` + +## The OFFSET clause + +Paginate points + +```java +Query select = select("water_level","location").from(DATABASE,"h2o_feet").limit(3,3); +``` + +```sqlite-psql +SELECT water_level,location FROM h2o_feet LIMIT 3 OFFSET 3; +``` + +## The SOFFSET clause + +Paginate series and include all clauses + +```java +Query select = select().mean("water_level") + .from(DATABASE,"h2o_feet") + .where() + .and(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:42:00Z")) + .groupBy(raw("*"),time(12l,MINUTE)) + .orderBy(desc()) + .limit(2,2) + .sLimit(1,1); +``` + +```sqlite-psql +SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) ORDER BY time DESC LIMIT 2 OFFSET 2 SLIMIT 1 SOFFSET 1; +``` + +## The Time Zone clause + +Return the UTC offset for Chicago’s time zone + +```java +Query select = select() + .column("test1") + .from(DATABASE, "h2o_feet") + .groupBy("test2", "test3") + .sLimit(1) + .tz("America/Chicago"); +``` + +```sqlite-psql +SELECT test1 FROM foobar GROUP BY test2,test3 SLIMIT 1 tz('America/Chicago'); +``` + +## Time Syntax + +Specify a time range with RFC3339 date-time strings + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .and(gte("time","2015-08-18T00:00:00.000000000Z")) + .and(lte("time","2015-08-18T00:12:00Z")); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= '2015-08-18T00:00:00.000000000Z' AND time <= '2015-08-18T00:12:00Z'; +``` + +Specify a time range with second-precision epoch timestamps + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .and(gte("time",ti(1439856000l,SECOND))) + .and(lte("time",ti(1439856720l,SECOND))); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= 1439856000s AND time <= 1439856720s; +``` + +Perform basic arithmetic on an RFC3339-like date-time string + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .and(gte("time",op("2015-09-18T21:24:00Z",SUB,ti(6l,MINUTE)))); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= '2015-09-18T21:24:00Z' - 6m; +``` + +Perform basic arithmetic on an epoch timestamp + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .and(gte("time",op(ti(24043524l,MINUTE),SUB,ti(6l,MINUTE)))); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= 24043524m - 6m; +``` + +Specify a time range with relative time + +```java +Query select = select().column("water_level") + .from(DATABASE,"h2o_feet") + .where(eq("location","santa_monica")) + .and(gte("time",subTime(1l,HOUR))); +``` + +```sqlite-psql +SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= now() - 1h; +``` + +## Regular expressions + +Use a regular expression to specify field keys and tag keys in the SELECT clause + +```java +Query select = select().regex("l").from(DATABASE,"h2o_feet").limit(1); +``` + +```sqlite-psql +SELECT /l/ FROM h2o_feet LIMIT 1; +``` + +Use a regular expression to specify field keys with a function in the SELECT clause + +```java +Query select = select().regex("l").distinct().from(DATABASE,"h2o_feet").limit(1); +``` + +```sqlite-psql +SELECT DISTINCT /l/ FROM h2o_feet LIMIT 1; +``` + +Use a regular expression to specify measurements in the FROM clause + +```java +Query select = select().mean("degrees").fromRaw(DATABASE,"/temperature/"); +``` + +```sqlite-psql +SELECT MEAN(degrees) FROM /temperature/; +``` + +Use a regular expression to specify a field value in the WHERE clause + +```java +Query select = select().regex("/l/").from(DATABASE,"h2o_feet").where(regex("level description","/between/")).limit(1); +``` + +```sqlite-psql +SELECT /l/ FROM h2o_feet WHERE "level description" =~ /between/ LIMIT 1; +``` + +Use a regular expression to specify tag keys in the GROUP BY clause + +```java +Query select = select().regex("/l/").from(DATABASE,"h2o_feet").where(regex("level description","/between/")).groupBy(raw("/l/")).limit(1); +``` + +```sqlite-psql +SELECT /l/ FROM h2o_feet WHERE "level description" =~ /between/ GROUP BY /l/ LIMIT 1; +``` + +Function with no direct implementation can be supported by raw expressions + +```java +Query select = select().raw("an expression on select").from(dbName, "cpu").where("an expression as condition"); +``` + +```sqlite-psql +SELECT an expression on select FROM h2o_feet WHERE an expression as condition; +``` + +Binding parameters + +If your Query is based on user input, it is good practice to use parameter binding to avoid [injection attacks](https://en.wikipedia.org/wiki/SQL_injection). +You can create queries with parameter binding: + +```java +Query query = select().from(DATABASE,"h2o_feet").where(gt("water_level", FunctionFactory.placeholder("level"))) + .bindParameter("level", 8); +``` + +```sqlite-psql +SELECT * FROM h2o_feet WHERE water_level > $level; +``` + +The values of bindParameter() calls are bound to the placeholders in the query (`level`). diff --git a/README.md b/README.md index 94e1eb2e2..74f6a5ba1 100644 --- a/README.md +++ b/README.md @@ -1,172 +1,174 @@ -influxdb-java -============= +# influxdb-java -[![Build Status](https://travis-ci.org/influxdata/influxdb-java.svg?branch=master)](https://travis-ci.org/influxdata/influxdb-java) +[![Build Status](https://github.com/influxdata/influxdb-java/workflows/master/badge.svg)](https://github.com/influxdata/influxdb-java/actions) [![codecov.io](http://codecov.io/github/influxdata/influxdb-java/coverage.svg?branch=master)](http://codecov.io/github/influxdata/influxdb-java?branch=master) [![Issue Count](https://codeclimate.com/github/influxdata/influxdb-java/badges/issue_count.svg)](https://codeclimate.com/github/influxdata/influxdb-java) -This is the Java Client library which is only compatible with InfluxDB 0.9 and higher. Maintained by [@majst01](https://github.com/majst01). - -To connect to InfluxDB 0.8.x you need to use influxdb-java version 1.6. - -This implementation is meant as a Java rewrite of the influxdb-go package. -All low level REST Api calls are available. - -## Usages - -### Basic Usages: - -```java -InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); -String dbName = "aTimeSeries"; -influxDB.createDatabase(dbName); - -BatchPoints batchPoints = BatchPoints - .database(dbName) - .tag("async", "true") - .retentionPolicy("autogen") - .consistency(ConsistencyLevel.ALL) - .build(); -Point point1 = Point.measurement("cpu") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("idle", 90L) - .addField("user", 9L) - .addField("system", 1L) - .build(); -Point point2 = Point.measurement("disk") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("used", 80L) - .addField("free", 1L) - .build(); -batchPoints.point(point1); -batchPoints.point(point2); -influxDB.write(batchPoints); -Query query = new Query("SELECT idle FROM cpu", dbName); -influxDB.query(query); -influxDB.deleteDatabase(dbName); -``` -Note : If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen' - -If your application produces only single Points, you can enable the batching functionality of influxdb-java: - -```java -InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root"); -String dbName = "aTimeSeries"; -influxDB.createDatabase(dbName); - -// Flush every 2000 Points, at least every 100ms -influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); - -Point point1 = Point.measurement("cpu") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("idle", 90L) - .addField("user", 9L) - .addField("system", 1L) - .build(); -Point point2 = Point.measurement("disk") - .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) - .addField("used", 80L) - .addField("free", 1L) - .build(); - -influxDB.write(dbName, "autogen", point1); -influxDB.write(dbName, "autogen", point2); -Query query = new Query("SELECT idle FROM cpu", dbName); -influxDB.query(query); -influxDB.deleteDatabase(dbName); -``` -Note that the batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()``` +This is the official (and community-maintained) Java client library for [InfluxDB](https://www.influxdata.com/products/influxdb-overview/) (1.x), the open source time series database that is part of the TICK (Telegraf, InfluxDB, Chronograf, Kapacitor) stack. -Also note that any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level. +For InfluxDB 3.0 users, this library is succeeded by the lightweight [v3 client library](https://github.com/InfluxCommunity/influxdb3-java). -If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method: - -```java -// Flush every 2000 Points, at least every 100ms -influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS, Executors.defaultThreadFactory(), (failedPoints, throwable) -> { /* custom error handling here */ }); -``` +_Note: This library is for use with InfluxDB 1.x and [2.x compatibility API](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/). For full supports of InfluxDB 2.x features, please use the [influxdb-client-java](https://github.com/influxdata/influxdb-client-java) client._ -### Advanced Usages: +## Adding the library to your project -#### Gzip's support (version 2.5+ required): +The library artifact is published in Maven central, available at [https://search.maven.org/artifact/org.influxdb/influxdb-java](https://search.maven.org/artifact/org.influxdb/influxdb-java). -influxdb-java client doesn't enable gzip compress for http request body by default. If you want to enable gzip to reduce transfer data's size , you can call: -```java -influxDB.enableGzip() -``` +### Release versions -#### UDP's support (version 2.5+ required): +Maven dependency: -influxdb-java client support udp protocol now. you can call followed methods directly to write through UDP. -```java -public void write(final int udpPort, final String records); -public void write(final int udpPort, final List records); -public void write(final int udpPort, final Point point); +```xml + + org.influxdb + influxdb-java + ${influxdbClient.version} + ``` -note: make sure write content's total size should not > UDP protocol's limit(64K), or you should use http instead of udp. +Gradle dependency: -#### chunking support (version 2.6+ required, unreleased): - -influxdb-java client now supports influxdb chunking. The following example uses a chunkSize of 20 and invokes the specified Consumer (e.g. System.out.println) for each received QueryResult -```java -Query query = new Query("SELECT idle FROM cpu", dbName); -influxDB.query(query, 20, queryResult -> System.out.println(queryResult)); +```bash +compile group: 'org.influxdb', name: 'influxdb-java', version: "${influxdbClientVersion}" ``` +## Features + +* Querying data using: + * [Influx Query Language (InfluxQL)](https://docs.influxdata.com/influxdb/v1.7/query_language/), with support for [bind parameters](https://docs.influxdata.com/influxdb/v1.7/tools/api/#bind-parameters) (similar to [JDBC PreparedStatement parameters](https://docs.oracle.com/javase/tutorial/jdbc/basics/prepared.html#supply_values_ps)); + * it's own [QueryBuilder](https://github.com/influxdata/influxdb-java/blob/master/QUERY_BUILDER.md), as you would do with e.g. EclipseLink or Hibernate; + * Message Pack (requires InfluxDB [1.4+](https://www.influxdata.com/blog/whats-new-influxdb-oss-1-4/)); +* Writing data using: + * Data Point (an object provided by this library that represents a ... data point); + * Your own POJO (you need to add a few Java Annotations); + * [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/) (for the braves only); + * UDP, as [supported by InfluxDB](https://docs.influxdata.com/influxdb/v1.7/supported_protocols/udp/); +* Support synchronous and asynchronous writes; +* Batch support configurable with `jitter` interval, `buffer` size and `flush` interval. + +## Quick start + +```Java +// Create an object to handle the communication with InfluxDB. +// (best practice tip: reuse the 'influxDB' instance when possible) +final String serverURL = "http://127.0.0.1:8086", username = "root", password = "root"; +final InfluxDB influxDB = InfluxDBFactory.connect(serverURL, username, password); + +// Create a database... +// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/ +String databaseName = "NOAA_water_database"; +influxDB.query(new Query("CREATE DATABASE " + databaseName)); +influxDB.setDatabase(databaseName); + +// ... and a retention policy, if necessary. +// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/ +String retentionPolicyName = "one_day_only"; +influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName + + " ON " + databaseName + " DURATION 1d REPLICATION 1 DEFAULT")); +influxDB.setRetentionPolicy(retentionPolicyName); + +// Enable batch writes to get better performance. +influxDB.enableBatch( + BatchOptions.DEFAULTS + .threadFactory(runnable -> { + Thread thread = new Thread(runnable); + thread.setDaemon(true); + return thread; + }) +); + +// Close it if your application is terminating or you are not using it anymore. +Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close)); + +// Write points to InfluxDB. +influxDB.write(Point.measurement("h2o_feet") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .tag("location", "santa_monica") + .addField("level description", "below 3 feet") + .addField("water_level", 2.064d) + .build()); + +influxDB.write(Point.measurement("h2o_feet") + .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) + .tag("location", "coyote_creek") + .addField("level description", "between 6 and 9 feet") + .addField("water_level", 8.12d) + .build()); + +// Wait a few seconds in order to let the InfluxDB client +// write your points asynchronously (note: you can adjust the +// internal time interval if you need via 'enableBatch' call). +Thread.sleep(5_000L); + +// Query your data using InfluxQL. +// https://docs.influxdata.com/influxdb/v1.7/query_language/data_exploration/#the-basic-select-statement +QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet")); + +System.out.println(queryResult); +// It will print something like: +// QueryResult [results=[Result [series=[Series [name=h2o_feet, tags=null, +// columns=[time, level description, location, water_level], +// values=[ +// [2020-03-22T20:50:12.929Z, below 3 feet, santa_monica, 2.064], +// [2020-03-22T20:50:12.929Z, between 6 and 9 feet, coyote_creek, 8.12] +// ]]], error=null]], error=null] +``` -### Other Usages: -For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java") - -## Version +## Contribute -The latest version for maven dependence: -```xml - - org.influxdb - influxdb-java - 2.5 - -``` For version change history have a look at [ChangeLog](https://github.com/influxdata/influxdb-java/blob/master/CHANGELOG.md). - ### Build Requirements * Java 1.8+ -* Maven 3.0+ -* Docker daemon running +* Maven 3.5+ +* Docker (for Unit testing) Then you can build influxdb-java with all tests with: ```bash -$ mvn clean install -``` +$> export INFLUXDB_IP=127.0.0.1 -If you don't have Docker running locally, you can skip tests with -DskipTests flag set to true: +$> mvn clean install -```bash -$ mvn clean install -DskipTests=true ``` -If you have Docker running, but it is not at localhost (e.g. you are on a Mac and using `docker-machine`) you can set an optional environment variable `INFLUXDB_IP` to point to the correct IP address: +There is a shell script running InfluxDB and Maven from inside a Docker container and you can execute it by running: ```bash -$ export INFLUXDB_IP=192.168.99.100 -$ mvn test +$> ./compile-and-test.sh ``` -For convenience we provide a small shell script which starts a influxdb server locally and executes `mvn clean install` with all tests inside docker containers. +## Useful links -```bash -$ ./compile-and-test.sh -``` +* [Manual](MANUAL.md) (main documentation); +* [InfluxDB Object Mapper](INFLUXDB_MAPPER.md); +* [Query Builder](QUERY_BUILDER.md); +* [FAQ](FAQ.md); +* [Changelog](CHANGELOG.md). + +## License +```license +The MIT License (MIT) -### Publishing +Copyright (c) 2014 Stefan Majer -This is a -[link](https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide) -to the sonatype oss guide to publishing. I'll update this section once -the [jira ticket](https://issues.sonatype.org/browse/OSSRH-9728) is -closed and I'm able to upload artifacts to the sonatype repositories. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +``` diff --git a/checkstyle.xml b/checkstyle.xml index d27f8b51d..921bab224 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -27,9 +27,13 @@ - - + + + + + + @@ -61,17 +65,10 @@ - - - - - - - diff --git a/compile-and-test.sh b/compile-and-test.sh index 660181a21..633ff51e2 100755 --- a/compile-and-test.sh +++ b/compile-and-test.sh @@ -4,29 +4,72 @@ # set -e -INFLUXDB_VERSIONS="1.2 1.1" - -for version in ${INFLUXDB_VERSIONS} -do - echo "Tesing againts influxdb ${version}" - docker kill influxdb || true - docker rm influxdb || true - docker pull influxdb:${version}-alpine || true - docker run \ - --detach \ - --name influxdb \ - --publish 8086:8086 \ - --publish 8089:8089/udp \ - --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ - influxdb:${version}-alpine - - docker run -it --rm \ - --volume $PWD:/usr/src/mymaven \ - --volume $PWD/.m2:/root/.m2 \ - --workdir /usr/src/mymaven \ - --link=influxdb \ - --env INFLUXDB_IP=influxdb \ - maven:alpine mvn clean install - - docker kill influxdb || true -done +DEFAULT_INFLUXDB_VERSION="1.8" +DEFAULT_MAVEN_JAVA_VERSION="3-openjdk-17-slim" + +INFLUXDB_VERSION="${INFLUXDB_VERSION:-$DEFAULT_INFLUXDB_VERSION}" +MAVEN_JAVA_VERSION="${MAVEN_JAVA_VERSION:-$DEFAULT_MAVEN_JAVA_VERSION}" + +echo "Run tests with maven:${MAVEN_JAVA_VERSION} on influxdb-${INFLUXDB_VERSION}" +docker kill influxdb || true +docker rm influxdb || true +docker pull influxdb:${INFLUXDB_VERSION}-alpine || true +docker run \ + --detach \ + --name influxdb \ + --publish 8086:8086 \ + --publish 8089:8089/udp \ + --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \ + --env DOCKER_INFLUXDB_INIT_MODE=setup \ + --env DOCKER_INFLUXDB_INIT_USERNAME=my-user \ + --env DOCKER_INFLUXDB_INIT_PASSWORD=my-password \ + --env DOCKER_INFLUXDB_INIT_ORG=my-org \ + --env DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \ + --env DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-token \ + influxdb:${INFLUXDB_VERSION}-alpine + +echo "Starting Nginx" +docker kill nginx || true +docker rm nginx || true + +docker run \ + --detach \ + --name nginx \ + --publish 8080:8080 \ + --publish 8080:8080/udp \ + --volume ${PWD}/src/test/nginx/nginx.conf:/etc/nginx/nginx.conf:ro \ + --link influxdb:influxdb \ + nginx:stable-alpine nginx '-g' 'daemon off;' + +echo "Running tests" +PROXY_API_URL=http://nginx:8080/influx-api/ +PROXY_UDP_PORT=8080 +if [[ "$INFLUXDB_VERSION" == "2."* ]] +then + TEST_EXPRESSION="InfluxDB2Test" + # Wait to start InfluxDB + docker run --link influxdb:influxdb ubuntu:20.04 bash -c "apt-get update \ + && apt-get install wget --yes \ + && wget -S --spider --tries=20 --retry-connrefused --waitretry=5 http://influxdb:8086/ping" + # Create DBRP Mapping + BUCKET_ID=$(docker exec influxdb bash -c "influx bucket list -o my-org -n my-bucket | grep my-bucket | xargs | cut -d ' ' -f 0") + docker exec influxdb bash -c "influx v1 dbrp create -o my-org --db mydb --rp autogen --default --bucket-id ${BUCKET_ID}" + docker exec influxdb bash -c "influx v1 auth create -o my-org --username my-user --password my-password --read-bucket ${BUCKET_ID} --write-bucket ${BUCKET_ID}" +else + TEST_EXPRESSION="*" +fi + +docker run --rm \ + --volume ${PWD}:/usr/src/mymaven \ + --volume ${PWD}/.m2:/root/.m2 \ + --workdir /usr/src/mymaven \ + --link=influxdb \ + --link=nginx \ + --env INFLUXDB_VERSION=${INFLUXDB_VERSION} \ + --env INFLUXDB_IP=influxdb \ + --env PROXY_API_URL=${PROXY_API_URL} \ + --env PROXY_UDP_PORT=${PROXY_UDP_PORT} \ + maven:${MAVEN_JAVA_VERSION} mvn clean install -Dtest="${TEST_EXPRESSION}" + +docker kill influxdb || true +docker kill nginx || true diff --git a/deploy-snapshot.sh b/deploy-snapshot.sh new file mode 100755 index 000000000..60a454ec2 --- /dev/null +++ b/deploy-snapshot.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +# Parse project version from pom.xml +PROJECT_VERSION=$(xmllint --xpath "//*[local-name()='project']/*[local-name()='version']/text()" pom.xml) +export PROJECT_VERSION +echo "Project version: $PROJECT_VERSION" + +# Skip if not *SNAPSHOT +if [[ $PROJECT_VERSION != *SNAPSHOT ]]; then + echo "$PROJECT_VERSION is not SNAPSHOT - skip deploy."; + exit; +fi + + +DEFAULT_MAVEN_JAVA_VERSION="3-jdk-8-slim" +MAVEN_JAVA_VERSION="${MAVEN_JAVA_VERSION:-$DEFAULT_MAVEN_JAVA_VERSION}" +echo "Deploy snapshot with maven:${MAVEN_JAVA_VERSION}" + +docker run --rm \ + --volume ${PWD}:/usr/src/mymaven \ + --volume ${PWD}/.m2:/root/.m2 \ + --workdir /usr/src/mymaven \ + --env SONATYPE_USERNAME=${SONATYPE_USERNAME} \ + --env SONATYPE_PASSWORD=${SONATYPE_PASSWORD} \ + maven:${MAVEN_JAVA_VERSION} mvn -s .maven-settings.xml -DskipTests=true clean package deploy diff --git a/format-sources.sh b/format-sources.sh new file mode 100755 index 000000000..98d0bc92f --- /dev/null +++ b/format-sources.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +wget https://github.com/google/google-java-format/releases/download/google-java-format-1.4/google-java-format-1.4-all-deps.jar + +JAVA_FILES=$(find src/ -name "*.java") + +for JAVA_FILE in ${JAVA_FILES} +do + echo "formatting ${JAVA_FILE}" + docker run -it --rm \ + -v $PWD:/mnt \ + openjdk java -jar /mnt/google-java-format-1.4-all-deps.jar -r /mnt/${JAVA_FILE} +done diff --git a/mvn.sh b/mvn.sh index 6711aec46..cd99af3e6 100755 --- a/mvn.sh +++ b/mvn.sh @@ -8,4 +8,4 @@ docker run -it --rm \ -v $PWD:/usr/src/mymaven \ -v $PWD/.m2:/root/.m2 \ -w /usr/src/mymaven \ - maven:alpine mvn clean "$@" + maven:3-openjdk-17-slim mvn clean "$@" diff --git a/pom.xml b/pom.xml index 2c04b3516..21ec65cf4 100644 --- a/pom.xml +++ b/pom.xml @@ -1,24 +1,14 @@ - - org.sonatype.oss - oss-parent - 7 - - 4.0.0 org.influxdb influxdb-java jar - 2.5-SNAPSHOT + 2.26-SNAPSHOT influxdb java bindings Java API to access the InfluxDB REST API http://www.influxdb.org - - 3.2.1 - - The MIT License (MIT) @@ -34,6 +24,7 @@ scm:git:git@github.com:influxdata/influxdb-java.git scm:git:git@github.com:influxdata/influxdb-java.git git@github.com:influxdata/influxdb-java.git + influxdb-java-2.25 @@ -48,7 +39,7 @@ org.codehaus.mojo findbugs-maven-plugin - 3.0.4 + 3.0.5 true @@ -57,35 +48,163 @@ + + + + ossrh + https://central.sonatype.com/repository/maven-snapshots + + + ossrh + https://ossrh-staging-api.central.sonatype.com/service/local/staging/deploy/maven2/ + + + + + + + src/main/resources + + docker-compose.yml + + + + + org.codehaus.mojo + versions-maven-plugin + 2.16.2 + org.apache.maven.plugins maven-compiler-plugin - 3.6.1 + 3.12.1 1.8 1.8 + + + -parameters + org.apache.maven.plugins maven-surefire-plugin - 2.20 + 3.2.5 org.apache.maven.plugins maven-site-plugin - 3.6 + 3.12.1 + + + org.apache.maven.plugins + maven-clean-plugin + 3.3.2 + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.1 + + + org.apache.maven.plugins + maven-install-plugin + 3.1.1 + + + org.apache.maven.plugins + maven-jar-plugin + 3.3.0 + + + + true + + + + + + org.apache.maven.plugins + maven-resources-plugin + 3.3.1 + + + org.apache.maven.plugins + maven-release-plugin + 3.0.1 + + org.apache.maven.plugins + maven-enforcer-plugin + 3.4.1 + + + enforce-maven + + enforce + + + + + 3.3.9 + + + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.13 + true + + ossrh + https://ossrh-staging-api.central.sonatype.com/ + true + 15 + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.6.3 + + 8 + + + + attach-javadocs + + jar + + + + org.jacoco jacoco-maven-plugin - 0.7.9 + 0.8.11 @@ -104,7 +223,14 @@ org.apache.maven.plugins maven-checkstyle-plugin - 2.17 + 3.3.1 + + + com.puppycrawl.tools + checkstyle + 9.3 + + true checkstyle.xml @@ -121,99 +247,213 @@ org.apache.maven.plugins - maven-shade-plugin - 3.0.0 - - - package - - shade - - - + maven-release-plugin - true - - - com.google.guava:guava - - - - - com.google.common - org.influxdb.com.google.guava - - - - - *:* - - META-INF/license/** - META-INF/* - META-INF/maven/** - LICENSE - NOTICE - /*.txt - build.properties - - - + + release - - junit - junit - 4.12 + org.junit.jupiter + junit-jupiter-engine + 5.9.3 test - org.assertj - assertj-core - 3.7.0 + org.junit.platform + junit-platform-runner + 1.9.3 test - org.mockito - mockito-core - 2.8.9 + org.hamcrest + hamcrest-all + 1.3 test - org.slf4j - slf4j-simple - 1.7.25 + org.assertj + assertj-core + 3.27.7 test - com.google.guava - guava - 21.0 + org.mockito + mockito-core + 4.10.0 + test com.squareup.retrofit2 retrofit - 2.3.0 + 2.9.0 + + + com.squareup.okhttp3 + okhttp + + com.squareup.retrofit2 converter-moshi - 2.3.0 + 2.9.0 + + + com.squareup.okio + okio + + + + + org.msgpack + msgpack-core + 0.9.11 + of the influxdb server address resolves to all influxdb server ips. --> com.squareup.okhttp3 okhttp - 3.8.0 + 4.12.0 com.squareup.okhttp3 logging-interceptor - 3.8.0 + 4.12.0 + + + release + + influxdb:alpine + + + + + maven-resources-plugin + 3.3.1 + + + copy-resources + + validate + + copy-resources + + + ${project.build.directory} + + + src/main/resources + true + + docker-compose.yml + + + + + + + + + com.dkanejs.maven.plugins + docker-compose-maven-plugin + 4.0.0 + + + up + process-test-resources + + up + + + ${project.build.directory}/docker-compose.yml + true + + + + down + post-integration-test + + down + + + ${project.build.directory}/docker-compose.yml + true + + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 3.1.0 + + + sign-artifacts + verify + + sign + + + + + + + + + + java17 + + 17 + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.3.0 + + + add-test-source + generate-test-sources + + add-test-source + + + + src/test-jdk17/java + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + true + + + 17 + 17 + + + -parameters + --add-opens=java.base/java.lang=ALL-UNNAMED + --add-opens=java.base/java.util=ALL-UNNAMED + + + + + + + diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java new file mode 100644 index 000000000..af1302a7e --- /dev/null +++ b/src/main/java/org/influxdb/BatchOptions.java @@ -0,0 +1,250 @@ +package org.influxdb; + +import org.influxdb.dto.Point; + +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * BatchOptions are used to configure batching of individual data point writes + * into InfluxDB. See {@link InfluxDB#enableBatch(BatchOptions)} + */ +public final class BatchOptions implements Cloneable { + + // default values here are consistent with Telegraf + public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000; + public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000; + public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0; + public static final int DEFAULT_BUFFER_LIMIT = 10000; + public static final TimeUnit DEFAULT_PRECISION = TimeUnit.NANOSECONDS; + public static final boolean DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION = false; + + + /** + * Default batch options. This class is immutable, each configuration + * is built by taking the DEFAULTS and setting specific configuration + * properties. + */ + public static final BatchOptions DEFAULTS = new BatchOptions(); + + private int actions = DEFAULT_BATCH_ACTIONS_LIMIT; + private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION; + private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION; + private int bufferLimit = DEFAULT_BUFFER_LIMIT; + private TimeUnit precision = DEFAULT_PRECISION; + private boolean dropActionsOnQueueExhaustion = DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION; + private Consumer droppedActionHandler = (point) -> { + }; + + private ThreadFactory threadFactory = Executors.defaultThreadFactory(); + BiConsumer, Throwable> exceptionHandler = (points, throwable) -> { + }; + + private InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE; + + private BatchOptions() { + } + + /** + * @param actions the number of actions to collect + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions actions(final int actions) { + BatchOptions clone = getClone(); + clone.actions = actions; + return clone; + } + + /** + * @param flushDuration the time to wait at most (milliseconds). + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions flushDuration(final int flushDuration) { + BatchOptions clone = getClone(); + clone.flushDuration = flushDuration; + return clone; + } + + /** + * Jitters the batch flush interval by a random amount. This is primarily to avoid + * large write spikes for users running a large number of client instances. + * ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s. + * + * @param jitterDuration (milliseconds) + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions jitterDuration(final int jitterDuration) { + BatchOptions clone = getClone(); + clone.jitterDuration = jitterDuration; + return clone; + } + + /** + * The client maintains a buffer for failed writes so that the writes will be retried later on. This may + * help to overcome temporary network problems or InfluxDB load spikes. + * When the buffer is full and new points are written, oldest entries in the buffer are lost. + * + * To disable this feature set buffer limit to a value smaller than {@link BatchOptions#getActions} + * + * @param bufferLimit maximum number of points stored in the retry buffer + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions bufferLimit(final int bufferLimit) { + BatchOptions clone = getClone(); + clone.bufferLimit = bufferLimit; + return clone; + } + + /** + * @param threadFactory a ThreadFactory instance to be used + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions threadFactory(final ThreadFactory threadFactory) { + BatchOptions clone = getClone(); + clone.threadFactory = threadFactory; + return clone; + } + + /** + * @param exceptionHandler a consumer function to handle asynchronous errors + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) { + BatchOptions clone = getClone(); + clone.exceptionHandler = exceptionHandler; + return clone; + } + + /** + * @param consistency cluster consistency setting (how many nodes have to store data points + * to treat a write as a success) + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) { + BatchOptions clone = getClone(); + clone.consistency = consistency; + return clone; + } + + /** + * Set the time precision to use for the whole batch. If unspecified, will default to {@link TimeUnit#NANOSECONDS}. + * @param precision sets the precision to use + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions precision(final TimeUnit precision) { + BatchOptions clone = getClone(); + clone.precision = precision; + return clone; + } + + /** + * Set to define the behaviour when the action queue exhausts. If unspecified, will default to false which means + * that the {@link InfluxDB#write(Point)} will be blocked till the space in the queue is created. + * true means that the newer actions being written to the queue will be dropped and + * {@link BatchOptions#droppedActionHandler} will be called. + * @param dropActionsOnQueueExhaustion sets the behavior + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions dropActionsOnQueueExhaustion(final boolean dropActionsOnQueueExhaustion) { + BatchOptions clone = getClone(); + clone.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion; + return clone; + } + + /** + * Handler to handle dropped actions due to queue actions. This is only valid when + * {@link BatchOptions#dropActionsOnQueueExhaustion} is set to true. + * @param droppedActionHandler to handle action drops on action queue exhaustion. + * @return the BatchOptions instance to be able to use it in a fluent manner. + */ + public BatchOptions droppedActionHandler(final Consumer droppedActionHandler) { + BatchOptions clone = getClone(); + clone.droppedActionHandler = droppedActionHandler; + return clone; + } + + + /** + * @return actions the number of actions to collect + */ + public int getActions() { + return actions; + } + + /** + * @return flushDuration the time to wait at most (milliseconds). + */ + public int getFlushDuration() { + return flushDuration; + } + + /** + * @return batch flush interval jitter value (milliseconds) + */ + public int getJitterDuration() { + return jitterDuration; + } + + /** + * @return Maximum number of points stored in the retry buffer, see {@link BatchOptions#bufferLimit(int)} + */ + public int getBufferLimit() { + return bufferLimit; + } + + /** + * @return a ThreadFactory instance to be used + */ + public ThreadFactory getThreadFactory() { + return threadFactory; + } + + /** + * @return a consumer function to handle asynchronous errors + */ + public BiConsumer, Throwable> getExceptionHandler() { + return exceptionHandler; + } + + /** + * @return cluster consistency setting (how many nodes have to store data points + * to treat a write as a success) + */ + public InfluxDB.ConsistencyLevel getConsistency() { + return consistency; + } + + /** + * @return the time precision + */ + public TimeUnit getPrecision() { + return precision; + } + + + /** + * @return a boolean determining whether to drop actions on action queue exhaustion. + */ + public boolean isDropActionsOnQueueExhaustion() { + return dropActionsOnQueueExhaustion; + } + + /** + * @return a consumer function to handle actions drops on action queue exhaustion. + */ + public Consumer getDroppedActionHandler() { + return droppedActionHandler; + } + + private BatchOptions getClone() { + try { + return (BatchOptions) this.clone(); + } catch (CloneNotSupportedException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/src/main/java/org/influxdb/BuilderException.java b/src/main/java/org/influxdb/BuilderException.java new file mode 100644 index 000000000..34ad6ca0d --- /dev/null +++ b/src/main/java/org/influxdb/BuilderException.java @@ -0,0 +1,18 @@ +package org.influxdb; + +/** + * Class for exceptions when using Point Builder. + * + * @author mirza99 + */ +public class BuilderException extends RuntimeException { + + /** + * Generated serial version UID. + */ + private static final long serialVersionUID = 4178882805281378918L; + + public BuilderException(final String message) { + super(message); + } +} diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java index cad445c05..56f842f35 100644 --- a/src/main/java/org/influxdb/InfluxDB.java +++ b/src/main/java/org/influxdb/InfluxDB.java @@ -5,6 +5,7 @@ import org.influxdb.dto.Pong; import org.influxdb.dto.Query; import org.influxdb.dto.QueryResult; +import retrofit2.Call; import java.util.List; import java.util.concurrent.ThreadFactory; @@ -24,7 +25,13 @@ * @author stefan.majer [at] gmail.com * */ -public interface InfluxDB { +public interface InfluxDB extends AutoCloseable { + + /** + * The system property key to set the http logging level across the JVM. + * @see LogLevel for available values + */ + public static final String LOG_LEVEL_PROPERTY = "org.influxdb.InfluxDB.logLevel"; /** Controls the level of logging of the REST layer. */ public enum LogLevel { @@ -40,6 +47,24 @@ public enum LogLevel { * Note: This requires that the entire request and response body be buffered in memory! */ FULL; + /** + * Parses the string argument as a LogLevel constant. + * @param value a {@code String} containing the {@code LogLevel constant} + * representation to be parsed + * @return the LogLevel constant representation of the param + * or {@code NONE} for null or any invalid String representation. + */ + public static LogLevel parseLogLevel(final String value) { + LogLevel logLevel = NONE; + if (value != null) { + try { + logLevel = valueOf(value.toUpperCase()); + } catch (IllegalArgumentException e) { + } + } + + return logLevel; + } } /** @@ -70,6 +95,37 @@ public String value() { } } + /** + * Format of HTTP Response body from InfluxDB server. + */ + public enum ResponseFormat { + /** application/json format. */ + JSON, + /** application/x-msgpack format. */ + MSGPACK + } + + /** + * A cancelable allows to discontinue a streaming query. + */ + public interface Cancellable { + + /** + * Cancel the streaming query call. + * + * @see Call#cancel() + */ + void cancel(); + + /** + * Return {@code true} if the {@link Cancellable#cancel()} was called. + * + * @return {@code true} if the {@link Cancellable#cancel()} was called + * @see Call#isCanceled() + */ + boolean isCanceled(); + } + /** * Set the loglevel which is used for REST related actions. * @@ -81,36 +137,102 @@ public String value() { /** * Enable Gzip compress for http request body. + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableGzip(); /** * Disable Gzip compress for http request body. + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB disableGzip(); /** * Returns whether Gzip compress for http request body is enabled. + * @return true if gzip is enabled. */ public boolean isGzipEnabled(); + /** + * Enable batching of single Point writes to speed up writes significantly. This is the same as calling + * InfluxDB.enableBatch(BatchOptions.DEFAULTS) + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB enableBatch(); + + /** + * Enable batching of single Point writes to speed up writes significantly. If either number of points written or + * flushDuration time limit is reached, a batch write is issued. + * Note that batch processing needs to be explicitly stopped before the application is shutdown. + * To do so call disableBatch(). + * + * @param batchOptions + * the options to set for batching the writes. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB enableBatch(final BatchOptions batchOptions); + /** * Enable batching of single Point writes as {@link #enableBatch(int, int, TimeUnit, ThreadFactory)}} * using {@linkplain java.util.concurrent.Executors#defaultThreadFactory() default thread factory}. * + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * * @see #enableBatch(int, int, TimeUnit, ThreadFactory) + * + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit); /** * Enable batching of single Point writes as - * {@link #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer, Throwable>)} + * {@link #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer)} * using with a exceptionHandler that does nothing. * - * @see #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer, Throwable>) + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * @param threadFactory + * a ThreadFactory instance to be used. + * + * @see #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer) + * + * @return the InfluxDB instance to be able to use it in a fluent manner. */ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory); + /** + * Enable batching of single Point writes with consistency set for an entire batch + * flushDurations is reached first, a batch write is issued. + * Note that batch processing needs to be explicitly stopped before the application is shutdown. + * To do so call disableBatch(). Default consistency is ONE. + * + * @param actions + * the number of actions to collect + * @param flushDuration + * the time to wait at most. + * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. + * @param threadFactory + * a ThreadFactory instance to be used. + * @param exceptionHandler + * a consumer function to handle asynchronous errors + * @param consistency + * a consistency setting for batch writes. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + + InfluxDB enableBatch(int actions, int flushDuration, TimeUnit flushDurationTimeUnit, + ThreadFactory threadFactory, BiConsumer, Throwable> exceptionHandler, + ConsistencyLevel consistency); /** * Enable batching of single Point writes to speed up writes significant. If either actions or @@ -123,7 +245,9 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti * @param flushDuration * the time to wait at most. * @param flushDurationTimeUnit + * the TimeUnit for the given flushDuration. * @param threadFactory + * a ThreadFactory instance to be used. * @param exceptionHandler * a consumer function to handle asynchronous errors * @return the InfluxDB instance to be able to use it in a fluent manner. @@ -139,6 +263,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti /** * Returns whether Batching is enabled. + * @return true if batch is enabled. */ public boolean isBatchEnabled(); @@ -156,6 +281,30 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti */ public String version(); + /** + * Write a single Point to the default database. + * + * @param point + * The point to write + */ + public void write(final Point point); + + /** + * Write a set of Points to the default database with the string records. + * + * @param records + * the points in the correct lineprotocol. + */ + public void write(final String records); + + /** + * Write a set of Points to the default database with the list of string records. + * + * @param records + * the List of points in the correct lineprotocol. + */ + public void write(final List records); + /** * Write a single Point to the database. * @@ -179,39 +328,111 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti public void write(final int udpPort, final Point point); /** - * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol. + * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol. * - * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"} + * @see 2696 * * @param batchPoints + * the points to write in BatchPoints. */ public void write(final BatchPoints batchPoints); + /** + * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol. + * + * If batching is enabled with appropriate {@code BatchOptions} settings + * ({@code BatchOptions.bufferLimit} greater than {@code BatchOptions.actions}) + * This method will try to retry in case of some recoverable errors. + * Otherwise it just works as {@link #write(BatchPoints)} + * + * @see 2696 + * @see + * Retry worth errors + * + * @param batchPoints + * the points to write in BatchPoints. + */ + public void writeWithRetry(final BatchPoints batchPoints); + /** * Write a set of Points to the influxdb database with the string records. * - * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"} + * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use * @param records + * the points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final String records); + /** + * Write a set of Points to the influxdb database with the string records. + * + * @see 2696 + * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use + * @param precision + * the time precision to use + * @param records + * the points in the correct lineprotocol. + */ + public void write(final String database, final String retentionPolicy, + final ConsistencyLevel consistency, final TimeUnit precision, final String records); + /** * Write a set of Points to the influxdb database with the list of string records. * - * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"} + * @see 2696 * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use * @param records + * the List of points in the correct lineprotocol. */ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final List records); + /** + * Write a set of Points to the influxdb database with the list of string records. + * + * @see 2696 + * + * @param database + * the name of the database to write + * @param retentionPolicy + * the retentionPolicy to use + * @param consistency + * the ConsistencyLevel to use + * @param precision + * the time precision to use + * @param records + * the List of points in the correct lineprotocol. + */ + public void write(final String database, final String retentionPolicy, + final ConsistencyLevel consistency, final TimeUnit precision, final List records); + /** * Write a set of Points to the influxdb database with the string records through UDP. * * @param udpPort - * @param records the content will be encoded by UTF-8 before sent. + * the udpPort where influxdb is listening + * @param records + * the content will be encoded by UTF-8 before sent. */ public void write(final int udpPort, final String records); @@ -219,7 +440,9 @@ public void write(final String database, final String retentionPolicy, * Write a set of Points to the influxdb database with the list of string records through UDP. * * @param udpPort - * @param records list of record, the content will be encoded by UTF-8 before sent. + * the udpPort where influxdb is listening + * @param records + * list of record, the content will be encoded by UTF-8 before sent. */ public void write(final int udpPort, final List records); @@ -232,6 +455,44 @@ public void write(final String database, final String retentionPolicy, */ public QueryResult query(final Query query); + /** + * Execute a query against a database. + * + * One of the consumers will be executed. + * + * @param query + * the query to execute. + * @param onSuccess + * the consumer to invoke when result is received + * @param onFailure + * the consumer to invoke when error is thrown + */ + public void query(final Query query, final Consumer onSuccess, final Consumer onFailure); + + /** + * Execute a streaming query against a database. + * + * @param query + * the query to execute. + * @param chunkSize + * the number of QueryResults to process in one chunk. + * @param onNext + * the consumer to invoke for each received QueryResult + */ + public void query(Query query, int chunkSize, Consumer onNext); + + /** + * Execute a streaming query against a database. + * + * @param query + * the query to execute. + * @param chunkSize + * the number of QueryResults to process in one chunk. + * @param onNext + * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query + */ + public void query(Query query, int chunkSize, BiConsumer onNext); + /** * Execute a streaming query against a database. * @@ -239,10 +500,43 @@ public void write(final String database, final String retentionPolicy, * the query to execute. * @param chunkSize * the number of QueryResults to process in one chunk. - * @param consumer + * @param onNext * the consumer to invoke for each received QueryResult + * @param onComplete + * the onComplete to invoke for successfully end of stream + */ + public void query(Query query, int chunkSize, Consumer onNext, Runnable onComplete); + + /** + * Execute a streaming query against a database. + * + * @param query + * the query to execute. + * @param chunkSize + * the number of QueryResults to process in one chunk. + * @param onNext + * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query + * @param onComplete + * the onComplete to invoke for successfully end of stream + */ + public void query(Query query, int chunkSize, BiConsumer onNext, Runnable onComplete); + + /** + * Execute a streaming query against a database. + * + * @param query + * the query to execute. + * @param chunkSize + * the number of QueryResults to process in one chunk. + * @param onNext + * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query + * @param onComplete + * the onComplete to invoke for successfully end of stream + * @param onFailure + * the consumer for error handling */ - public void query(Query query, int chunkSize, Consumer consumer); + public void query(Query query, int chunkSize, BiConsumer onNext, Runnable onComplete, + Consumer onFailure); /** * Execute a query against a database. @@ -259,7 +553,10 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the new database. + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a parameterized CREATE DATABASE query. */ + @Deprecated public void createDatabase(final String name); /** @@ -267,14 +564,20 @@ public void write(final String database, final String retentionPolicy, * * @param name * the name of the database to delete. + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a DROP DATABASE query. */ + @Deprecated public void deleteDatabase(final String name); /** * Describe all available databases. * * @return a List of all Database names. + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a SHOW DATABASES query. */ + @Deprecated public List describeDatabases(); /** @@ -284,7 +587,10 @@ public void write(final String database, final String retentionPolicy, * the name of the database to search. * * @return true if the database exists or false if it doesn't exist + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a SHOW DATABASES query and inspect the result. */ + @Deprecated public boolean databaseExists(final String name); /** @@ -300,4 +606,83 @@ public void write(final String database, final String retentionPolicy, */ public void close(); + /** + * Set the consistency level which is used for writing points. + * + * @param consistency + * the consistency level to set. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB setConsistency(final ConsistencyLevel consistency); + + /** + * Set the database which is used for writing points. + * + * @param database + * the database to set. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB setDatabase(final String database); + + /** + * Set the retention policy which is used for writing points. + * + * @param retentionPolicy + * the retention policy to set. + * @return the InfluxDB instance to be able to use it in a fluent manner. + */ + public InfluxDB setRetentionPolicy(final String retentionPolicy); + + /** + * Creates a retentionPolicy. + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param shardDuration the shardDuration + * @param replicationFactor the replicationFactor of the rp + * @param isDefault if the rp is the default rp for the database or not + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a parameterized CREATE RETENTION POLICY query. + */ + @Deprecated + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor, final boolean isDefault); + + /** + * Creates a retentionPolicy. (optional shardDuration) + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param replicationFactor the replicationFactor of the rp + * @param isDefault if the rp is the default rp for the database or not + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a parameterized CREATE RETENTION POLICY query. + */ + @Deprecated + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final int replicationFactor, final boolean isDefault); + + /** + * Creates a retentionPolicy. (optional shardDuration and isDefault) + * @param rpName the name of the retentionPolicy(rp) + * @param database the name of the database + * @param duration the duration of the rp + * @param shardDuration the shardDuration + * @param replicationFactor the replicationFactor of the rp + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a parameterized CREATE RETENTION POLICY query. + */ + @Deprecated + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor); + + /** + * Drops a retentionPolicy in a database. + * @param rpName the name of the retentionPolicy + * @param database the name of the database + * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query) + * to execute a DROP RETENTION POLICY query. + */ + @Deprecated + public void dropRetentionPolicy(final String rpName, final String database); } diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java new file mode 100644 index 000000000..9f826f8eb --- /dev/null +++ b/src/main/java/org/influxdb/InfluxDBException.java @@ -0,0 +1,195 @@ +package org.influxdb; + +import java.io.InputStream; + +import org.msgpack.core.MessagePack; +import org.msgpack.core.MessageUnpacker; +import org.msgpack.value.ImmutableMapValue; +import org.msgpack.value.impl.ImmutableStringValueImpl; + +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.Moshi; + +/** + * A wrapper for various exceptions caused while interacting with InfluxDB. + * + * @author Simon Legner + */ +public class InfluxDBException extends RuntimeException { + + public InfluxDBException(final String message) { + super(message); + } + + public InfluxDBException(final String message, final Throwable cause) { + super(message, cause); + } + + public InfluxDBException(final Throwable cause) { + super(cause); + } + + /** + * @return true if the operation may succeed if repeated, false otherwise. + */ + public boolean isRetryWorth() { + return true; + } + + /* See https://github.com/influxdata/influxdb/blob/master/tsdb/shard.go */ + static final String FIELD_TYPE_CONFLICT_ERROR = "field type conflict"; + /* See https://github.com/influxdata/influxdb/blob/master/coordinator/points_writer.go */ + static final String POINTS_BEYOND_RETENTION_POLICY_ERROR = "points beyond retention policy"; + /* See https://github.com/influxdata/influxdb/blob/master/models/points.go */ + static final String UNABLE_TO_PARSE_ERROR = "unable to parse"; + /* See https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb/influxdb.go */ + static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR = "hinted handoff queue not empty"; + /* See https://github.com/influxdata/influxdb/blob/master/tsdb/engine/tsm1/cache.go */ + static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR = "cache-max-memory-size exceeded"; + /* For all messages below see https://github.com/influxdata/influxdb/blob/master/services/httpd/handler.go */ + static final String DATABASE_NOT_FOUND_ERROR = "database not found"; + static final String USER_REQUIRED_ERROR = "user is required to write to database"; + static final String USER_NOT_AUTHORIZED_ERROR = "user is not authorized to write to database"; + static final String AUTHORIZATION_FAILED_ERROR = "authorization failed"; + static final String USERNAME_REQUIRED_ERROR = "username required"; + + public static final class DatabaseNotFoundException extends InfluxDBException { + private DatabaseNotFoundException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class HintedHandOffQueueNotEmptyException extends InfluxDBException { + private HintedHandOffQueueNotEmptyException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class UnableToParseException extends InfluxDBException { + private UnableToParseException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class FieldTypeConflictException extends InfluxDBException { + private FieldTypeConflictException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class PointsBeyondRetentionPolicyException extends InfluxDBException { + private PointsBeyondRetentionPolicyException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class CacheMaxMemorySizeExceededException extends InfluxDBException { + private CacheMaxMemorySizeExceededException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return true; + } + } + + public static final class RetryBufferOverrunException extends InfluxDBException { + public RetryBufferOverrunException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + public static final class AuthorizationFailedException extends InfluxDBException { + public AuthorizationFailedException(final String message) { + super(message); + } + + public boolean isRetryWorth() { + return false; + } + } + + private static InfluxDBException buildExceptionFromErrorMessage(final String errorMessage) { + if (errorMessage.contains(DATABASE_NOT_FOUND_ERROR)) { + return new DatabaseNotFoundException(errorMessage); + } + if (errorMessage.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) { + return new PointsBeyondRetentionPolicyException(errorMessage); + } + if (errorMessage.contains(FIELD_TYPE_CONFLICT_ERROR)) { + return new FieldTypeConflictException(errorMessage); + } + if (errorMessage.contains(UNABLE_TO_PARSE_ERROR)) { + return new UnableToParseException(errorMessage); + } + if (errorMessage.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) { + return new HintedHandOffQueueNotEmptyException(errorMessage); + } + if (errorMessage.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) { + return new CacheMaxMemorySizeExceededException(errorMessage); + } + if (errorMessage.contains(USER_REQUIRED_ERROR) + || errorMessage.contains(USER_NOT_AUTHORIZED_ERROR) + || errorMessage.contains(AUTHORIZATION_FAILED_ERROR) + || errorMessage.contains(USERNAME_REQUIRED_ERROR)) { + return new AuthorizationFailedException(errorMessage); + } + return new InfluxDBException(errorMessage); + } + + private static class ErrorMessage { + public String error; + } + + public static InfluxDBException buildExceptionForErrorState(final String errorBody) { + try { + Moshi moshi = new Moshi.Builder().build(); + JsonAdapter adapter = moshi.adapter(ErrorMessage.class).lenient(); + ErrorMessage errorMessage = adapter.fromJson(errorBody); + return InfluxDBException.buildExceptionFromErrorMessage(errorMessage.error); + } catch (Exception e) { + return new InfluxDBException(errorBody); + } + } + + /** + * Create corresponding InfluxDBException from the message pack error body. + * @param messagePackErrorBody + * the error body if any + * @return the Exception + */ + public static InfluxDBException buildExceptionForErrorState(final InputStream messagePackErrorBody) { + try { + MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(messagePackErrorBody); + ImmutableMapValue mapVal = (ImmutableMapValue) unpacker.unpackValue(); + return InfluxDBException.buildExceptionFromErrorMessage( + mapVal.map().get(new ImmutableStringValueImpl("error")).toString()); + } catch (Exception e) { + return new InfluxDBException(e); + } + } +} diff --git a/src/main/java/org/influxdb/InfluxDBFactory.java b/src/main/java/org/influxdb/InfluxDBFactory.java index 01c3281fa..aee28d73a 100644 --- a/src/main/java/org/influxdb/InfluxDBFactory.java +++ b/src/main/java/org/influxdb/InfluxDBFactory.java @@ -1,11 +1,12 @@ package org.influxdb; +import org.influxdb.InfluxDB.ResponseFormat; import org.influxdb.impl.InfluxDBImpl; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; - import okhttp3.OkHttpClient; +import org.influxdb.impl.Preconditions; + +import java.util.Objects; /** @@ -25,7 +26,7 @@ public enum InfluxDBFactory { * @return a InfluxDB adapter suitable to access a InfluxDB. */ public static InfluxDB connect(final String url) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty."); + Preconditions.checkNonEmptyString(url, "url"); return new InfluxDBImpl(url, null, null, new OkHttpClient.Builder()); } @@ -42,8 +43,8 @@ public static InfluxDB connect(final String url) { * @return a InfluxDB adapter suitable to access a InfluxDB. */ public static InfluxDB connect(final String url, final String username, final String password) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty."); - Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "The username may not be null or empty."); + Preconditions.checkNonEmptyString(url, "url"); + Preconditions.checkNonEmptyString(username, "username"); return new InfluxDBImpl(url, username, password, new OkHttpClient.Builder()); } @@ -57,8 +58,8 @@ public static InfluxDB connect(final String url, final String username, final St * @return a InfluxDB adapter suitable to access a InfluxDB. */ public static InfluxDB connect(final String url, final OkHttpClient.Builder client) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty."); - Preconditions.checkNotNull(client, "The client may not be null."); + Preconditions.checkNonEmptyString(url, "url"); + Objects.requireNonNull(client, "client"); return new InfluxDBImpl(url, null, null, client); } @@ -78,9 +79,30 @@ public static InfluxDB connect(final String url, final OkHttpClient.Builder clie */ public static InfluxDB connect(final String url, final String username, final String password, final OkHttpClient.Builder client) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty."); - Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "The username may not be null or empty."); - Preconditions.checkNotNull(client, "The client may not be null."); - return new InfluxDBImpl(url, username, password, client); + return connect(url, username, password, client, ResponseFormat.JSON); + } + + /** + * Create a connection to a InfluxDB. + * + * @param url + * the url to connect to. + * @param username + * the username which is used to authorize against the influxDB instance. + * @param password + * the password for the username which is used to authorize against the influxDB + * instance. + * @param client + * the HTTP client to use + * @param responseFormat + * The {@code ResponseFormat} to use for response from InfluxDB server + * @return a InfluxDB adapter suitable to access a InfluxDB. + */ + public static InfluxDB connect(final String url, final String username, final String password, + final OkHttpClient.Builder client, final ResponseFormat responseFormat) { + Preconditions.checkNonEmptyString(url, "url"); + Preconditions.checkNonEmptyString(username, "username"); + Objects.requireNonNull(client, "client"); + return new InfluxDBImpl(url, username, password, client, responseFormat); } } diff --git a/src/main/java/org/influxdb/InfluxDBIOException.java b/src/main/java/org/influxdb/InfluxDBIOException.java new file mode 100644 index 000000000..0a6858c76 --- /dev/null +++ b/src/main/java/org/influxdb/InfluxDBIOException.java @@ -0,0 +1,15 @@ +package org.influxdb; + +import java.io.IOException; + +/** + * A wrapper for {@link IOException} caused while interacting with InfluxDB. + * + * @author Simon Legner + */ +public class InfluxDBIOException extends InfluxDBException { + + public InfluxDBIOException(final IOException cause) { + super(cause); + } +} diff --git a/src/main/java/org/influxdb/InfluxDBMapperException.java b/src/main/java/org/influxdb/InfluxDBMapperException.java new file mode 100644 index 000000000..a79dd9c7f --- /dev/null +++ b/src/main/java/org/influxdb/InfluxDBMapperException.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb; + +/** + * @author fmachado + */ +public class InfluxDBMapperException extends RuntimeException { + + private static final long serialVersionUID = -7328402653918756407L; + + public InfluxDBMapperException(final String message, final Throwable cause) { + super(message, cause); + } + + public InfluxDBMapperException(final String message) { + super(message); + } + + public InfluxDBMapperException(final Throwable cause) { + super(cause); + } +} diff --git a/src/main/java/org/influxdb/annotation/Column.java b/src/main/java/org/influxdb/annotation/Column.java new file mode 100644 index 000000000..6edb256f8 --- /dev/null +++ b/src/main/java/org/influxdb/annotation/Column.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * @author fmachado + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.FIELD) +public @interface Column { + + /** + * If unset, the annotated field's name will be used as the column name. + */ + String name() default ""; + + boolean tag() default false; +} diff --git a/src/main/java/org/influxdb/annotation/Exclude.java b/src/main/java/org/influxdb/annotation/Exclude.java new file mode 100644 index 000000000..23e076797 --- /dev/null +++ b/src/main/java/org/influxdb/annotation/Exclude.java @@ -0,0 +1,41 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * When a POJO annotated with {@code @Measurement(allFields = true)} is loaded or saved, + * this annotation can be used to exclude some of its fields. + *

+ * Note: this is not considered when loading record measurements. + * + * @see Measurement#allFields() + * + * @author Eran Leshem + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.FIELD) +public @interface Exclude { +} diff --git a/src/main/java/org/influxdb/annotation/Measurement.java b/src/main/java/org/influxdb/annotation/Measurement.java new file mode 100644 index 000000000..6ea8142e0 --- /dev/null +++ b/src/main/java/org/influxdb/annotation/Measurement.java @@ -0,0 +1,54 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.concurrent.TimeUnit; + +/** + * @author fmachado + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface Measurement { + + String name(); + + String database() default "[unassigned]"; + + String retentionPolicy() default "autogen"; + + TimeUnit timeUnit() default TimeUnit.MILLISECONDS; + + /** + * If {@code true}, then all non-static fields of this measurement will be loaded or saved, + * regardless of any {@code @Column} annotations. + *

+ * Note: When loading record measurements, this is always implied to be true, + * since the record's canonical constructor is used to populate the record. + * + * @see Exclude + */ + boolean allFields() default false; +} diff --git a/src/main/java/org/influxdb/annotation/TimeColumn.java b/src/main/java/org/influxdb/annotation/TimeColumn.java new file mode 100644 index 000000000..94ed1d898 --- /dev/null +++ b/src/main/java/org/influxdb/annotation/TimeColumn.java @@ -0,0 +1,13 @@ +package org.influxdb.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.concurrent.TimeUnit; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.FIELD) +public @interface TimeColumn { + TimeUnit timeUnit() default TimeUnit.MILLISECONDS; +} diff --git a/src/main/java/org/influxdb/dto/BatchPoints.java b/src/main/java/org/influxdb/dto/BatchPoints.java index 0f39540d7..e32774628 100644 --- a/src/main/java/org/influxdb/dto/BatchPoints.java +++ b/src/main/java/org/influxdb/dto/BatchPoints.java @@ -1,18 +1,16 @@ package org.influxdb.dto; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import org.influxdb.InfluxDB.ConsistencyLevel; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Ordering; - /** * {Purpose of This Type}. * @@ -27,11 +25,21 @@ public class BatchPoints { private Map tags; private List points; private ConsistencyLevel consistency; + private TimeUnit precision; BatchPoints() { // Only visible in the Builder } + /** + * Create a new BatchPoints build to create a new BatchPoints in a fluent manner. + * + * @return the Builder to be able to add further Builder calls. + */ + public static Builder builder() { + return new Builder(null); + } + /** * Create a new BatchPoints build to create a new BatchPoints in a fluent manner. * @@ -49,9 +57,10 @@ public static Builder database(final String database) { public static final class Builder { private final String database; private String retentionPolicy; - private final Map tags = Maps.newTreeMap(Ordering.natural()); - private final List points = Lists.newArrayList(); + private final Map tags = new TreeMap<>(); + private final List points = new ArrayList<>(); private ConsistencyLevel consistency; + private TimeUnit precision; /** * @param database @@ -63,7 +72,7 @@ public static final class Builder { /** * The retentionPolicy to use. * - * @param policy + * @param policy the retentionPolicy to use * @return the Builder instance */ public Builder retentionPolicy(final String policy) { @@ -88,7 +97,7 @@ public Builder tag(final String tagName, final String value) { /** * Add a Point to this set of points. * - * @param pointToAdd + * @param pointToAdd the Point to add * @return the Builder instance */ public Builder point(final Point pointToAdd) { @@ -99,7 +108,7 @@ public Builder point(final Point pointToAdd) { /** * Add a set of Points to this set of points. * - * @param pointsToAdd + * @param pointsToAdd the Points to add * @return the Builder instance */ public Builder points(final Point... pointsToAdd) { @@ -107,10 +116,21 @@ public Builder points(final Point... pointsToAdd) { return this; } + /** + * Add a set of Points to this set of points. + * + * @param pointsToAdd the Points to add + * @return the Builder instance + */ + public Builder points(final Collection pointsToAdd) { + this.points.addAll(pointsToAdd); + return this; + } + /** * Set the ConsistencyLevel to use. If not given it defaults to {@link ConsistencyLevel#ONE} * - * @param consistencyLevel + * @param consistencyLevel the ConsistencyLevel * @return the Builder instance */ public Builder consistency(final ConsistencyLevel consistencyLevel) { @@ -118,14 +138,22 @@ public Builder consistency(final ConsistencyLevel consistencyLevel) { return this; } + /** + * Set the time precision to use for the whole batch. If unspecified, will default to {@link TimeUnit#NANOSECONDS} + * @param precision the precision of the points + * @return the Builder instance + */ + public Builder precision(final TimeUnit precision) { + this.precision = precision; + return this; + } + /** * Create a new BatchPoints instance. * * @return the created BatchPoints. */ public BatchPoints build() { - Preconditions.checkArgument(!Strings.isNullOrEmpty(this.database), - "Database must not be null or empty."); BatchPoints batchPoints = new BatchPoints(); batchPoints.setDatabase(this.database); for (Point point : this.points) { @@ -138,6 +166,10 @@ public BatchPoints build() { this.consistency = ConsistencyLevel.ONE; } batchPoints.setConsistency(this.consistency); + if (null == this.precision) { + this.precision = TimeUnit.NANOSECONDS; + } + batchPoints.setPrecision(this.precision); return batchPoints; } } @@ -187,10 +219,24 @@ void setPoints(final List points) { this.points = points; } + /** + * @return the time precision unit + */ + public TimeUnit getPrecision() { + return precision; + } + + /** + * @param precision the time precision to set for the batch points + */ + void setPrecision(final TimeUnit precision) { + this.precision = precision; + } + /** * Add a single Point to these batches. * - * @param point + * @param point the Point to add * @return this Instance to be able to daisy chain calls. */ public BatchPoints point(final Point point) { @@ -242,12 +288,13 @@ public boolean equals(final Object o) { && Objects.equals(retentionPolicy, that.retentionPolicy) && Objects.equals(tags, that.tags) && Objects.equals(points, that.points) - && consistency == that.consistency; + && consistency == that.consistency + && precision == that.precision; } @Override public int hashCode() { - return Objects.hash(database, retentionPolicy, tags, points, consistency); + return Objects.hash(database, retentionPolicy, tags, points, consistency, precision); } /** @@ -256,17 +303,19 @@ public int hashCode() { @Override public String toString() { StringBuilder builder = new StringBuilder(); - builder.append("BatchPoints [database="); - builder.append(this.database); - builder.append(", retentionPolicy="); - builder.append(this.retentionPolicy); - builder.append(", consistency="); - builder.append(this.consistency); - builder.append(", tags="); - builder.append(this.tags); - builder.append(", points="); - builder.append(this.points); - builder.append("]"); + builder.append("BatchPoints [database=") + .append(this.database) + .append(", retentionPolicy=") + .append(this.retentionPolicy) + .append(", consistency=") + .append(this.consistency) + .append(", tags=") + .append(this.tags) + .append(", precision=") + .append(this.precision) + .append(", points=") + .append(this.points) + .append("]"); return builder.toString(); } @@ -278,9 +327,37 @@ public String toString() { */ public String lineProtocol() { StringBuilder sb = new StringBuilder(); + for (Point point : this.points) { - sb.append(point.lineProtocol()).append("\n"); + sb.append(point.lineProtocol(this.precision)).append("\n"); } return sb.toString(); } + + /** + * Test whether is possible to merge two BatchPoints objects. + * + * @param that batch point to merge in + * @return true if the batch points can be sent in a single HTTP request write + */ + public boolean isMergeAbleWith(final BatchPoints that) { + return Objects.equals(database, that.database) + && Objects.equals(retentionPolicy, that.retentionPolicy) + && Objects.equals(tags, that.tags) + && consistency == that.consistency; + } + + /** + * Merge two BatchPoints objects. + * + * @param that batch point to merge in + * @return true if the batch points have been merged into this BatchPoints instance. Return false otherwise. + */ + public boolean mergeIn(final BatchPoints that) { + boolean mergeAble = isMergeAbleWith(that); + if (mergeAble) { + this.points.addAll(that.points); + } + return mergeAble; + } } diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java new file mode 100644 index 000000000..1f197289e --- /dev/null +++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java @@ -0,0 +1,36 @@ +package org.influxdb.dto; + +public final class BoundParameterQuery extends Query { + + private BoundParameterQuery(final String command, final String database) { + super(command, database); + } + + public static class QueryBuilder { + private BoundParameterQuery query; + private String influxQL; + + public static QueryBuilder newQuery(final String influxQL) { + QueryBuilder instance = new QueryBuilder(); + instance.influxQL = influxQL; + return instance; + } + + public QueryBuilder forDatabase(final String database) { + query = new BoundParameterQuery(influxQL, database); + return this; + } + + public QueryBuilder bind(final String placeholder, final Object value) { + if (query == null) { + query = new BoundParameterQuery(influxQL, null); + } + query.bindParameter(placeholder, value); + return this; + } + + public BoundParameterQuery create() { + return query; + } + } +} diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java old mode 100644 new mode 100755 index d1dd78a7c..1663913dc --- a/src/main/java/org/influxdb/dto/Point.java +++ b/src/main/java/org/influxdb/dto/Point.java @@ -1,20 +1,32 @@ package org.influxdb.dto; +import org.influxdb.BuilderException; +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Exclude; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.impl.Preconditions; +import org.influxdb.impl.TypeMapper; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; import java.math.BigDecimal; import java.math.BigInteger; +import java.math.RoundingMode; import java.text.NumberFormat; +import java.time.Instant; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; +import java.util.Optional; import java.util.TreeMap; import java.util.concurrent.TimeUnit; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.escape.Escaper; -import com.google.common.escape.Escapers; - /** * Representation of a InfluxDB database Point. * @@ -24,20 +36,23 @@ public class Point { private String measurement; private Map tags; - private Long time; + private Number time; private TimeUnit precision = TimeUnit.NANOSECONDS; private Map fields; - - private static final Escaper FIELD_ESCAPER = Escapers.builder() - .addEscape('\\', "\\\\") - .addEscape('"', "\\\"") - .build(); - private static final Escaper KEY_ESCAPER = Escapers.builder() - .addEscape(' ', "\\ ") - .addEscape(',', "\\,") - .addEscape('=', "\\=") - .build(); private static final int MAX_FRACTION_DIGITS = 340; + private static final ThreadLocal NUMBER_FORMATTER = + ThreadLocal.withInitial(() -> { + NumberFormat numberFormat = NumberFormat.getInstance(Locale.ENGLISH); + numberFormat.setMaximumFractionDigits(MAX_FRACTION_DIGITS); + numberFormat.setGroupingUsed(false); + numberFormat.setMinimumFractionDigits(1); + return numberFormat; + }); + + private static final int DEFAULT_STRING_BUILDER_SIZE = 1024; + private static final int MAX_STRING_BUILDER_SIZE = 64 * 1024; + private static final ThreadLocal CACHED_STRINGBUILDERS = + ThreadLocal.withInitial(() -> new StringBuilder(DEFAULT_STRING_BUILDER_SIZE)); Point() { } @@ -54,6 +69,28 @@ public static Builder measurement(final String measurement) { return new Builder(measurement); } + /** + * Create a new Point Build build to create a new Point in a fluent manner from a POJO. + * + * @param clazz Class of the POJO + * @return the Builder instance + */ + + public static Builder measurementByPOJO(final Class clazz) { + Objects.requireNonNull(clazz, "clazz"); + throwExceptionIfMissingAnnotation(clazz, Measurement.class); + String measurementName = findMeasurementName(clazz); + return new Builder(measurementName); + } + + private static void throwExceptionIfMissingAnnotation(final Class clazz, + final Class expectedClass) { + if (!clazz.isAnnotationPresent(expectedClass)) { + throw new IllegalArgumentException("Class " + clazz.getName() + " is not annotated with @" + + Measurement.class.getSimpleName()); + } + } + /** * Builder for a new Point. * @@ -61,10 +98,11 @@ public static Builder measurement(final String measurement) { * */ public static final class Builder { + private static final BigInteger NANOSECONDS_PER_SECOND = BigInteger.valueOf(1000000000L); private final String measurement; private final Map tags = new TreeMap<>(); - private Long time; - private TimeUnit precision = TimeUnit.NANOSECONDS; + private Number time; + private TimeUnit precision; private final Map fields = new TreeMap<>(); /** @@ -84,8 +122,8 @@ public static final class Builder { * @return the Builder instance. */ public Builder tag(final String tagName, final String value) { - Preconditions.checkArgument(tagName != null); - Preconditions.checkArgument(value != null); + Objects.requireNonNull(tagName, "tagName"); + Objects.requireNonNull(value, "value"); if (!tagName.isEmpty() && !value.isEmpty()) { tags.put(tagName, value); } @@ -121,20 +159,15 @@ public Builder field(final String field, Object value) { if (value instanceof Number) { if (value instanceof Byte) { value = ((Byte) value).doubleValue(); - } - if (value instanceof Short) { + } else if (value instanceof Short) { value = ((Short) value).doubleValue(); - } - if (value instanceof Integer) { + } else if (value instanceof Integer) { value = ((Integer) value).doubleValue(); - } - if (value instanceof Long) { + } else if (value instanceof Long) { value = ((Long) value).doubleValue(); - } - if (value instanceof BigInteger) { + } else if (value instanceof BigInteger) { value = ((BigInteger) value).doubleValue(); } - } fields.put(field, value); return this; @@ -155,15 +188,28 @@ public Builder addField(final String field, final double value) { return this; } + public Builder addField(final String field, final int value) { + fields.put(field, value); + return this; + } + + public Builder addField(final String field, final float value) { + fields.put(field, value); + return this; + } + + public Builder addField(final String field, final short value) { + fields.put(field, value); + return this; + } + public Builder addField(final String field, final Number value) { fields.put(field, value); return this; } public Builder addField(final String field, final String value) { - if (value == null) { - throw new IllegalArgumentException("Field value cannot be null"); - } + Objects.requireNonNull(value, "value"); fields.put(field, value); return this; @@ -184,42 +230,198 @@ public Builder fields(final Map fieldsToAdd) { /** * Add a time to this point. * - * @param precisionToSet - * @param timeToSet + * @param timeToSet the time for this point + * @param precisionToSet the TimeUnit * @return the Builder instance. */ - public Builder time(final long timeToSet, final TimeUnit precisionToSet) { - Preconditions.checkNotNull(precisionToSet, "Precision must be not null!"); + public Builder time(final Number timeToSet, final TimeUnit precisionToSet) { + Objects.requireNonNull(timeToSet, "timeToSet"); + Objects.requireNonNull(precisionToSet, "precisionToSet"); this.time = timeToSet; this.precision = precisionToSet; return this; } + /** + * Add a time to this point as long. + * only kept for binary compatibility with previous releases. + * + * @param timeToSet the time for this point as long + * @param precisionToSet the TimeUnit + * @return the Builder instance. + */ + public Builder time(final long timeToSet, final TimeUnit precisionToSet) { + return time((Number) timeToSet, precisionToSet); + } + + /** + * Add a time to this point as Long. + * only kept for binary compatibility with previous releases. + * + * @param timeToSet the time for this point as Long + * @param precisionToSet the TimeUnit + * @return the Builder instance. + */ + public Builder time(final Long timeToSet, final TimeUnit precisionToSet) { + return time((Number) timeToSet, precisionToSet); + } + + /** + * Does this builder contain any fields? + * + * @return true, if the builder contains any fields, false otherwise. + */ + public boolean hasFields() { + return !fields.isEmpty(); + } + + /** + * Adds field map from object by reflection using {@link org.influxdb.annotation.Column} + * annotation. + * + * @param pojo POJO Object with annotation {@link org.influxdb.annotation.Column} on fields + * @return the Builder instance + */ + public Builder addFieldsFromPOJO(final Object pojo) { + + Class clazz = pojo.getClass(); + Measurement measurement = clazz.getAnnotation(Measurement.class); + boolean allFields = measurement != null && measurement.allFields(); + + while (clazz != null) { + + TypeMapper typeMapper = TypeMapper.empty(); + while (clazz != null) { + for (Field field : clazz.getDeclaredFields()) { + + Column column = field.getAnnotation(Column.class); + + if (column == null && !(allFields + && !field.isAnnotationPresent(Exclude.class) && !Modifier.isStatic(field.getModifiers()))) { + continue; + } + + field.setAccessible(true); + + String fieldName; + if (column != null && !column.name().isEmpty()) { + fieldName = column.name(); + } else { + fieldName = field.getName(); + } + + addFieldByAttribute(pojo, field, column != null && column.tag(), fieldName, typeMapper); + } + + Class superclass = clazz.getSuperclass(); + Type genericSuperclass = clazz.getGenericSuperclass(); + if (genericSuperclass instanceof ParameterizedType) { + typeMapper = TypeMapper.of((ParameterizedType) genericSuperclass, superclass); + } else { + typeMapper = TypeMapper.empty(); + } + + clazz = superclass; + } + } + + if (this.fields.isEmpty()) { + throw new BuilderException("Class " + pojo.getClass().getName() + + " has no @" + Column.class.getSimpleName() + " annotation"); + } + + return this; + } + + private void addFieldByAttribute(final Object pojo, final Field field, final boolean tag, + final String fieldName, final TypeMapper typeMapper) { + try { + Object fieldValue = field.get(pojo); + + TimeColumn tc = field.getAnnotation(TimeColumn.class); + Class fieldType = (Class) typeMapper.resolve(field.getGenericType()); + if (tc != null) { + if (Instant.class.isAssignableFrom(fieldType)) { + Optional.ofNullable((Instant) fieldValue).ifPresent(instant -> { + TimeUnit timeUnit = tc.timeUnit(); + if (timeUnit == TimeUnit.NANOSECONDS || timeUnit == TimeUnit.MICROSECONDS) { + this.time = BigInteger.valueOf(instant.getEpochSecond()) + .multiply(NANOSECONDS_PER_SECOND) + .add(BigInteger.valueOf(instant.getNano())) + .divide(BigInteger.valueOf(TimeUnit.NANOSECONDS.convert(1, timeUnit))); + } else { + this.time = timeUnit.convert(instant.toEpochMilli(), TimeUnit.MILLISECONDS); + } + this.precision = timeUnit; + }); + return; + } + + throw new InfluxDBMapperException( + "Unsupported type " + fieldType + " for time: should be of Instant type"); + } + + if (tag) { + if (fieldValue != null) { + this.tags.put(fieldName, (String) fieldValue); + } + } else { + if (fieldValue != null) { + setField(fieldType, fieldName, fieldValue); + } + } + + } catch (IllegalArgumentException | IllegalAccessException e) { + // Can not happen since we use metadata got from the object + throw new BuilderException( + "Field " + fieldName + " could not found on class " + pojo.getClass().getSimpleName()); + } + } + /** * Create a new Point. * * @return the newly created Point. */ public Point build() { - Preconditions - .checkArgument(!Strings.isNullOrEmpty(this.measurement), - "Point name must not be null or empty."); - Preconditions - .checkArgument(this.fields.size() > 0, - "Point must have at least one field specified."); + Preconditions.checkNonEmptyString(this.measurement, "measurement"); + Preconditions.checkPositiveNumber(this.fields.size(), "fields size"); Point point = new Point(); point.setFields(this.fields); point.setMeasurement(this.measurement); if (this.time != null) { point.setTime(this.time); point.setPrecision(this.precision); - } else { - point.setTime(System.currentTimeMillis()); - point.setPrecision(TimeUnit.MILLISECONDS); } point.setTags(this.tags); return point; } + + private void setField( + final Class fieldType, + final String columnName, + final Object value) { + if (boolean.class.isAssignableFrom(fieldType) || Boolean.class.isAssignableFrom(fieldType)) { + addField(columnName, (boolean) value); + } else if (long.class.isAssignableFrom(fieldType) || Long.class.isAssignableFrom(fieldType)) { + addField(columnName, (long) value); + } else if (double.class.isAssignableFrom(fieldType) || Double.class.isAssignableFrom(fieldType)) { + addField(columnName, (double) value); + } else if (float.class.isAssignableFrom(fieldType) || Float.class.isAssignableFrom(fieldType)) { + addField(columnName, (float) value); + } else if (int.class.isAssignableFrom(fieldType) || Integer.class.isAssignableFrom(fieldType)) { + addField(columnName, (int) value); + } else if (short.class.isAssignableFrom(fieldType) || Short.class.isAssignableFrom(fieldType)) { + addField(columnName, (short) value); + } else if (String.class.isAssignableFrom(fieldType)) { + addField(columnName, (String) value); + } else if (Enum.class.isAssignableFrom(fieldType)) { + addField(columnName, ((Enum) value).name()); + } else { + throw new InfluxDBMapperException( + "Unsupported type " + fieldType + " for column " + columnName); + } + } } /** @@ -234,7 +436,7 @@ void setMeasurement(final String measurement) { * @param time * the time to set */ - void setTime(final Long time) { + void setTime(final Number time) { this.time = time; } @@ -261,6 +463,13 @@ void setPrecision(final TimeUnit precision) { this.precision = precision; } + /** + * @return the fields + */ + Map getFields() { + return this.fields; + } + /** * @param fields * the fields to set @@ -298,12 +507,16 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("Point [name="); builder.append(this.measurement); - builder.append(", time="); - builder.append(this.time); + if (this.time != null) { + builder.append(", time="); + builder.append(this.time); + } builder.append(", tags="); builder.append(this.tags); - builder.append(", precision="); - builder.append(this.precision); + if (this.precision != null) { + builder.append(", precision="); + builder.append(this.precision); + } builder.append(", fields="); builder.append(this.fields); builder.append("]"); @@ -311,78 +524,169 @@ public String toString() { } /** - * calculate the lineprotocol entry for a single Point. - * - * Documentation is WIP : https://github.com/influxdb/influxdb/pull/2997 + * Calculate the lineprotocol entry for a single Point. + *

+ * NaN and infinity values are silently dropped as they are unsupported: + * https://github.com/influxdata/influxdb/issues/4089 * - * https://github.com/influxdb/influxdb/blob/master/tsdb/README.md + * @see + * InfluxDB line protocol reference * - * @return the String without newLine. + * @return the String without newLine, empty when there are no fields to write */ public String lineProtocol() { - final StringBuilder sb = new StringBuilder(); - sb.append(KEY_ESCAPER.escape(this.measurement)); - sb.append(concatenatedTags()); - sb.append(concatenateFields()); - sb.append(formatedTime()); + return lineProtocol(null); + } + + /** + * Calculate the lineprotocol entry for a single point, using a specific {@link TimeUnit} for the timestamp. + *

+ * NaN and infinity values are silently dropped as they are unsupported: + * https://github.com/influxdata/influxdb/issues/4089 + * + * @see + * InfluxDB line protocol reference + * + * @param precision the time precision unit for this point + * @return the String without newLine, empty when there are no fields to write + */ + public String lineProtocol(final TimeUnit precision) { + + // setLength(0) is used for reusing cached StringBuilder instance per thread + // it reduces GC activity and performs better then new StringBuilder() + StringBuilder sb = CACHED_STRINGBUILDERS.get(); + if (sb.capacity() > MAX_STRING_BUILDER_SIZE) { + sb = new StringBuilder(DEFAULT_STRING_BUILDER_SIZE); + CACHED_STRINGBUILDERS.set(sb); + } else { + sb.setLength(0); + } + + escapeKey(sb, measurement); + concatenatedTags(sb); + int writtenFields = concatenatedFields(sb); + if (writtenFields == 0) { + return ""; + } + formatedTime(sb, precision); + return sb.toString(); } - private StringBuilder concatenatedTags() { - final StringBuilder sb = new StringBuilder(); + private void concatenatedTags(final StringBuilder sb) { for (Entry tag : this.tags.entrySet()) { - sb.append(",") - .append(KEY_ESCAPER.escape(tag.getKey())) - .append("=") - .append(KEY_ESCAPER.escape(tag.getValue())); + sb.append(','); + escapeKey(sb, tag.getKey()); + sb.append('='); + escapeKey(sb, tag.getValue()); } - sb.append(" "); - return sb; + sb.append(' '); } - private StringBuilder concatenateFields() { - final StringBuilder sb = new StringBuilder(); - final int fieldCount = this.fields.size(); - int loops = 0; - - NumberFormat numberFormat = NumberFormat.getInstance(Locale.ENGLISH); - numberFormat.setMaximumFractionDigits(MAX_FRACTION_DIGITS); - numberFormat.setGroupingUsed(false); - numberFormat.setMinimumFractionDigits(1); - + private int concatenatedFields(final StringBuilder sb) { + int fieldCount = 0; for (Entry field : this.fields.entrySet()) { - loops++; Object value = field.getValue(); - if (value == null) { + if (value == null || isNotFinite(value)) { continue; } - - sb.append(KEY_ESCAPER.escape(field.getKey())).append("="); - if (value instanceof String) { - String stringValue = (String) value; - sb.append("\"").append(FIELD_ESCAPER.escape(stringValue)).append("\""); - } else if (value instanceof Number) { + escapeKey(sb, field.getKey()); + sb.append('='); + if (value instanceof Number) { if (value instanceof Double || value instanceof Float || value instanceof BigDecimal) { - sb.append(numberFormat.format(value)); + sb.append(NUMBER_FORMATTER.get().format(value)); } else { - sb.append(value).append("i"); + sb.append(value).append('i'); } + } else if (value instanceof String) { + String stringValue = (String) value; + sb.append('"'); + escapeField(sb, stringValue); + sb.append('"'); } else { sb.append(value); } - if (loops < fieldCount) { - sb.append(","); + sb.append(','); + + fieldCount++; + } + + // efficiently chop off the trailing comma + int lengthMinusOne = sb.length() - 1; + if (sb.charAt(lengthMinusOne) == ',') { + sb.setLength(lengthMinusOne); + } + + return fieldCount; + } + + static void escapeKey(final StringBuilder sb, final String key) { + for (int i = 0; i < key.length(); i++) { + switch (key.charAt(i)) { + case ' ': + case ',': + case '=': + sb.append('\\'); + default: + sb.append(key.charAt(i)); + } + } + } + + static void escapeField(final StringBuilder sb, final String field) { + for (int i = 0; i < field.length(); i++) { + switch (field.charAt(i)) { + case '\\': + case '\"': + sb.append('\\'); + default: + sb.append(field.charAt(i)); } } + } - return sb; + private static boolean isNotFinite(final Object value) { + return value instanceof Double && !Double.isFinite((Double) value) + || value instanceof Float && !Float.isFinite((Float) value); } - private StringBuilder formatedTime() { - final StringBuilder sb = new StringBuilder(); - sb.append(" ").append(TimeUnit.NANOSECONDS.convert(this.time, this.precision)); - return sb; + private void formatedTime(final StringBuilder sb, final TimeUnit precision) { + if (this.time == null) { + return; + } + TimeUnit converterPrecision = precision; + + if (converterPrecision == null) { + converterPrecision = TimeUnit.NANOSECONDS; + } + if (this.time instanceof BigInteger) { + BigInteger time = (BigInteger) this.time; + long conversionFactor = converterPrecision.convert(1, this.precision); + if (conversionFactor >= 1) { + time = time.multiply(BigInteger.valueOf(conversionFactor)); + } else { + conversionFactor = this.precision.convert(1, converterPrecision); + time = time.divide(BigInteger.valueOf(conversionFactor)); + } + sb.append(" ").append(time); + } else if (this.time instanceof BigDecimal) { + BigDecimal time = (BigDecimal) this.time; + long conversionFactor = converterPrecision.convert(1, this.precision); + if (conversionFactor >= 1) { + time = time.multiply(BigDecimal.valueOf(conversionFactor)); + } else { + conversionFactor = this.precision.convert(1, converterPrecision); + time = time.divide(BigDecimal.valueOf(conversionFactor), RoundingMode.HALF_UP); + } + sb.append(" ").append(time.toBigInteger()); + } else { + sb.append(" ").append(converterPrecision.convert(this.time.longValue(), this.precision)); + } } + + private static String findMeasurementName(final Class clazz) { + return clazz.getAnnotation(Measurement.class).name(); + } } diff --git a/src/main/java/org/influxdb/dto/Pong.java b/src/main/java/org/influxdb/dto/Pong.java index 22ab79e33..4aa041e41 100644 --- a/src/main/java/org/influxdb/dto/Pong.java +++ b/src/main/java/org/influxdb/dto/Pong.java @@ -1,7 +1,5 @@ package org.influxdb.dto; -import com.google.common.base.MoreObjects; - /** * Representation of the response for a influxdb ping. * @@ -11,6 +9,7 @@ public class Pong { private String version; private long responseTime; + private static final String UNKNOWN_VERSION = "unknown"; /** * @return the status @@ -27,6 +26,15 @@ public void setVersion(final String version) { this.version = version; } + /** + * Good or bad connection status. + * + * @return true if the version of influxdb is not unknown. + */ + public boolean isGood() { + return !UNKNOWN_VERSION.equalsIgnoreCase(version); + } + /** * @return the responseTime */ @@ -47,11 +55,7 @@ public void setResponseTime(final long responseTime) { */ @Override public String toString() { - return MoreObjects - .toStringHelper(this.getClass()) - .add("version", this.version) - .add("responseTime", this.responseTime) - .toString(); + return "Pong{version=" + version + ", responseTime=" + responseTime + "}"; } } diff --git a/src/main/java/org/influxdb/dto/Query.java b/src/main/java/org/influxdb/dto/Query.java index 6305c5942..ebed08e7e 100644 --- a/src/main/java/org/influxdb/dto/Query.java +++ b/src/main/java/org/influxdb/dto/Query.java @@ -1,8 +1,18 @@ package org.influxdb.dto; +import com.squareup.moshi.JsonWriter; +import okio.Buffer; +import org.influxdb.InfluxDBIOException; +import org.influxdb.querybuilder.Appendable; + +import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; +import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; /** * Represents a Query against Influxdb. @@ -15,18 +25,26 @@ public class Query { private final String command; private final String database; private final boolean requiresPost; + protected final Map params = new HashMap<>(); /** - * @param command - * @param database + * @param command the query command + */ + public Query(final String command) { + this(command, null); + } + + /** + * @param command the query command + * @param database the database to query */ public Query(final String command, final String database) { this(command, database, false); } /** - * @param command - * @param database + * @param command the query command + * @param database the database to query * @param requiresPost true if the command requires a POST instead of GET to influxdb */ public Query(final String command, final String database, final boolean requiresPost) { @@ -40,59 +58,64 @@ public Query(final String command, final String database, final boolean requires * @return the command */ public String getCommand() { - return this.command; + return command; } /** * @return url encoded command */ public String getCommandWithUrlEncoded() { - return encode(this.command); + return encode(command); } /** * @return the database */ public String getDatabase() { - return this.database; + return database; } public boolean requiresPost() { return requiresPost; } - @SuppressWarnings("checkstyle:avoidinlineconditionals") - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((command == null) ? 0 : command.hashCode()); - result = prime * result - + ((database == null) ? 0 : database.hashCode()); - return result; + public Query bindParameter(final String placeholder, final Object value) { + params.put(placeholder, value); + return this; + } + + public boolean hasBoundParameters() { + return !params.isEmpty(); + } + + public String getParameterJsonWithUrlEncoded() { + try { + String jsonParameterObject = createJsonObject(params); + String urlEncodedJsonParameterObject = encode(jsonParameterObject); + return urlEncodedJsonParameterObject; + } catch (IOException e) { + throw new InfluxDBIOException(e); + } } - @SuppressWarnings("checkstyle:needbraces") @Override - public boolean equals(final Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - Query other = (Query) obj; - if (command == null) { - if (other.command != null) - return false; - } else if (!command.equals(other.command)) - return false; - if (database == null) { - if (other.database != null) - return false; - } else if (!database.equals(other.database)) + public boolean equals(final Object o) { + if (o == null || getClass() != o.getClass()) { return false; - return true; + } + + Query query = (Query) o; + return Objects.equals(command, query.command) && Objects.equals(database, query.database) && params.equals( + query.params); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = Objects.hashCode(command); + result = prime * result + Objects.hashCode(database); + result = prime * result + params.hashCode(); + return result; } /** @@ -105,7 +128,33 @@ public static String encode(final String command) { try { return URLEncoder.encode(command, StandardCharsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); + throw new IllegalStateException("Every JRE must support UTF-8", e); + } + } + + private String createJsonObject(final Map parameterMap) throws IOException { + Buffer b = new Buffer(); + JsonWriter writer = JsonWriter.of(b); + writer.beginObject(); + for (Map.Entry pair : parameterMap.entrySet()) { + String name = pair.getKey(); + Object value = pair.getValue(); + if (value instanceof Number) { + Number number = (Number) value; + writer.name(name).value(number); + } else if (value instanceof String) { + writer.name(name).value((String) value); + } else if (value instanceof Boolean) { + writer.name(name).value((Boolean) value); + } else if (value instanceof Appendable) { + StringBuilder stringBuilder = new StringBuilder(); + ((Appendable) value).appendTo(stringBuilder); + writer.name(name).value(stringBuilder.toString()); + } else { + writer.name(name).value(String.valueOf(value)); + } } + writer.endObject(); + return b.readString(Charset.forName("utf-8")); } } diff --git a/src/main/java/org/influxdb/example/Android.java b/src/main/java/org/influxdb/example/Android.java new file mode 100644 index 000000000..f26495cd2 --- /dev/null +++ b/src/main/java/org/influxdb/example/Android.java @@ -0,0 +1,98 @@ +package org.influxdb.example; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBFactory; +import org.influxdb.dto.QueryResult; +import org.influxdb.dto.Query; + +import java.util.LinkedList; +import java.util.List; + +/** + * @author StrakarCe + * @since 07/05/2021 + * @version 1 + */ +public class Android { + // put the address IP of your database + String address = "http://192.168.1.75:8000/"; + String dbName = "myDatabase"; + String table = "SERIES"; + QueryResult actual; + Boolean flag = false; + InfluxDB con; + + public Android() { + super(); + } + public void queryExecute(final Query query) { + Thread thread = new Thread(new Runnable() { + + @Override + public void run() { + try { + //InfluxDB connector = InfluxDBFactory.connect(address); + // if you want to open every time + System.out.println("Send the query to the database ..."); + // FOR A REAL APP CREATE A LOGGER ; + List results = new LinkedList<>(); + actual = con.query(query); + } catch (Exception e) { + e.printStackTrace(); + } + flag = true; // For simplicity, I use a simple flag to know when the thread have finished + } + }); + + thread.start(); + } + + /** + * It's to open the connexion with the database. + * In my case I decide to open once, do many query and close. + */ + public void connexion() { + con = InfluxDBFactory.connect(address); + } + /** + * It's to close after my list of query. + */ + public void close() { + con.close(); + } + /* + * simple example of how you can create a query + */ + private void queryLauncher(final String query) { + queryExecute(new Query(query, dbName)); + while (!flag) { // ugly method to wait the thread + System.out.println("Wait the thread"); + } + flag = false; + } + public String getEtat() { + queryLauncher("select last(value) from PTEC"); + return actual.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString(); + } + public String getHC() { + queryLauncher("SELECT last(value) FROM HCHC"); + return actual.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString(); + } + // ------------------------- Example when you want to use it ------------ + /* + Android test = new Android(); + refresh.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View v) { + test.connexion(); + etat2.setText(test.getEtat()); + hc2.setText(test.getHC()); + hp2.setText(test.getHP()); + prix2.setText(test.getDepense()); + percMens2.setText(test.getPercentageMensuel()); + percTotal2.setText(test.getPercentageTotal()); + test.close(); + } + }); + */ +} diff --git a/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java b/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java new file mode 100644 index 000000000..ffa75af61 --- /dev/null +++ b/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java @@ -0,0 +1,24 @@ +package org.influxdb.impl; + +import java.io.IOException; + +import okhttp3.Credentials; +import okhttp3.Interceptor; +import okhttp3.Request; +import okhttp3.Response; + +public class BasicAuthInterceptor implements Interceptor { + + private String credentials; + + public BasicAuthInterceptor(final String user, final String password) { + credentials = Credentials.basic(user, password); + } + + @Override + public Response intercept(final Chain chain) throws IOException { + Request request = chain.request(); + Request authenticatedRequest = request.newBuilder().header("Authorization", credentials).build(); + return chain.proceed(authenticatedRequest); + } +} diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java index cea38f0b4..28c45b693 100644 --- a/src/main/java/org/influxdb/impl/BatchProcessor.java +++ b/src/main/java/org/influxdb/impl/BatchProcessor.java @@ -1,9 +1,17 @@ package org.influxdb.impl; +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDB.ConsistencyLevel; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; + import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; @@ -11,16 +19,11 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.logging.Level; import java.util.logging.Logger; -import org.influxdb.InfluxDB; -import org.influxdb.dto.BatchPoints; -import org.influxdb.dto.Point; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; - /** * A BatchProcessor can be attached to a InfluxDB Instance to collect single point writes and * aggregates them to BatchPoints to get a better write performance. @@ -28,31 +31,48 @@ * @author stefan.majer [at] gmail.com * */ -public class BatchProcessor { +public final class BatchProcessor { private static final Logger LOG = Logger.getLogger(BatchProcessor.class.getName()); protected final BlockingQueue queue; private final ScheduledExecutorService scheduler; private final BiConsumer, Throwable> exceptionHandler; - final InfluxDBImpl influxDB; + final InfluxDB influxDB; final int actions; private final TimeUnit flushIntervalUnit; private final int flushInterval; + private final ConsistencyLevel consistencyLevel; + private final int jitterInterval; + private final TimeUnit precision; + private final BatchWriter batchWriter; + private boolean dropActionsOnQueueExhaustion; + Consumer droppedActionHandler; + Supplier randomSupplier; /** * The Builder to create a BatchProcessor instance. */ public static final class Builder { - private final InfluxDBImpl influxDB; + private final InfluxDB influxDB; private ThreadFactory threadFactory = Executors.defaultThreadFactory(); private int actions; private TimeUnit flushIntervalUnit; private int flushInterval; + private int jitterInterval; + // this is a default value if the InfluxDb.enableBatch(BatchOptions) IS NOT used + // the reason is backward compatibility + private int bufferLimit = 0; + private TimeUnit precision; + private BiConsumer, Throwable> exceptionHandler = (entries, throwable) -> { }; + private ConsistencyLevel consistencyLevel; + private boolean dropActionsOnQueueExhaustion; + private Consumer droppedActionsHandler; /** * @param threadFactory * is optional. + * @return this Builder to use it fluent */ public Builder threadFactory(final ThreadFactory threadFactory) { this.threadFactory = threadFactory; @@ -64,7 +84,7 @@ public Builder threadFactory(final ThreadFactory threadFactory) { * is mandatory. */ public Builder(final InfluxDB influxDB) { - this.influxDB = (InfluxDBImpl) influxDB; + this.influxDB = influxDB; } /** @@ -95,6 +115,37 @@ public Builder interval(final int interval, final TimeUnit unit) { return this; } + /** + * The interval at which at least should issued a write. + * + * @param flushInterval + * the flush interval + * @param jitterInterval + * the flush jitter interval + * @param unit + * the TimeUnit of the interval + * + * @return this Builder to use it fluent + */ + public Builder interval(final int flushInterval, final int jitterInterval, final TimeUnit unit) { + this.flushInterval = flushInterval; + this.jitterInterval = jitterInterval; + this.flushIntervalUnit = unit; + return this; + } + + /** + * A buffer for failed writes so that the writes will be retried later on. When the buffer is full and + * new points are written, oldest entries in the buffer are lost. + * + * @param bufferLimit maximum number of points stored in the buffer + * @return this Builder to use it fluent + */ + public Builder bufferLimit(final int bufferLimit) { + this.bufferLimit = bufferLimit; + return this; + } + /** * A callback to be used when an error occurs during a batchwrite. * @@ -108,20 +159,86 @@ public Builder exceptionHandler(final BiConsumer, Throwable> han return this; } + /** + * To define the behaviour when the action queue exhausts. If unspecified, will default to false which means that + * the {@link InfluxDB#write(Point)} will be blocked till the space in the queue is created. + * true means that the newer actions being written to the queue will dropped and + * {@link BatchProcessor#droppedActionHandler} will be called. + * + * @param dropActionsOnQueueExhaustion + * the dropActionsOnQueueExhaustion + * + * @return this Builder to use it fluent + */ + public Builder dropActionsOnQueueExhaustion(final boolean dropActionsOnQueueExhaustion) { + this.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion; + return this; + } + + /** + * A callback to be used when an actions are dropped on action queue exhaustion. + * + * @param handler + * the handler + * + * @return this Builder to use it fluent + */ + public Builder droppedActionHandler(final Consumer handler) { + this.droppedActionsHandler = handler; + return this; + } + + + + /** + * Consistency level for batch write. + * + * @param consistencyLevel + * the consistencyLevel + * + * @return this Builder to use it fluent + */ + public Builder consistencyLevel(final ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + return this; + } + + /** + * Set the time precision to use for the batch. + * + * @param precision + * the precision + * + * @return this Builder to use it fluent + */ + public Builder precision(final TimeUnit precision) { + this.precision = precision; + return this; + } + /** * Create the BatchProcessor. * * @return the BatchProcessor instance. */ public BatchProcessor build() { - Preconditions.checkNotNull(this.influxDB, "influxDB may not be null"); - Preconditions.checkArgument(this.actions > 0, "actions should > 0"); - Preconditions.checkArgument(this.flushInterval > 0, "flushInterval should > 0"); - Preconditions.checkNotNull(this.flushIntervalUnit, "flushIntervalUnit may not be null"); - Preconditions.checkNotNull(this.threadFactory, "threadFactory may not be null"); - Preconditions.checkNotNull(this.exceptionHandler, "exceptionHandler may not be null"); - return new BatchProcessor(this.influxDB, this.threadFactory, this.actions, this.flushIntervalUnit, - this.flushInterval, exceptionHandler); + Objects.requireNonNull(this.influxDB, "influxDB"); + Preconditions.checkPositiveNumber(this.actions, "actions"); + Preconditions.checkPositiveNumber(this.flushInterval, "flushInterval"); + Preconditions.checkNotNegativeNumber(jitterInterval, "jitterInterval"); + Preconditions.checkNotNegativeNumber(bufferLimit, "bufferLimit"); + Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit"); + Objects.requireNonNull(this.threadFactory, "threadFactory"); + Objects.requireNonNull(this.exceptionHandler, "exceptionHandler"); + BatchWriter batchWriter; + if (this.bufferLimit > this.actions) { + batchWriter = new RetryCapableBatchWriter(this.influxDB, this.exceptionHandler, this.bufferLimit, this.actions); + } else { + batchWriter = new OneShotBatchWriter(this.influxDB); + } + return new BatchProcessor(this.influxDB, batchWriter, this.threadFactory, this.actions, this.flushIntervalUnit, + this.flushInterval, this.jitterInterval, exceptionHandler, this.consistencyLevel, + this.precision, this.dropActionsOnQueueExhaustion, this.droppedActionsHandler); } } @@ -180,41 +297,58 @@ public static Builder builder(final InfluxDB influxDB) { return new Builder(influxDB); } - BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions, - final TimeUnit flushIntervalUnit, final int flushInterval, - final BiConsumer, Throwable> exceptionHandler) { + BatchProcessor(final InfluxDB influxDB, final BatchWriter batchWriter, final ThreadFactory threadFactory, + final int actions, final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval, + final BiConsumer, Throwable> exceptionHandler, + final ConsistencyLevel consistencyLevel, final TimeUnit precision, + final boolean dropActionsOnQueueExhaustion, final Consumer droppedActionHandler) { super(); this.influxDB = influxDB; + this.batchWriter = batchWriter; this.actions = actions; this.flushIntervalUnit = flushIntervalUnit; this.flushInterval = flushInterval; + this.jitterInterval = jitterInterval; this.scheduler = Executors.newSingleThreadScheduledExecutor(threadFactory); this.exceptionHandler = exceptionHandler; + this.consistencyLevel = consistencyLevel; + this.precision = precision; + this.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion; + this.droppedActionHandler = droppedActionHandler; if (actions > 1 && actions < Integer.MAX_VALUE) { this.queue = new LinkedBlockingQueue<>(actions); } else { this.queue = new LinkedBlockingQueue<>(); } - // Flush at specified Rate - this.scheduler.scheduleAtFixedRate(new Runnable() { + this.randomSupplier = Math::random; + + Runnable flushRunnable = new Runnable() { @Override public void run() { + // write doesn't throw any exceptions write(); + int jitterInterval = (int) (randomSupplier.get() * BatchProcessor.this.jitterInterval); + BatchProcessor.this.scheduler.schedule(this, + BatchProcessor.this.flushInterval + jitterInterval, BatchProcessor.this.flushIntervalUnit); } - }, this.flushInterval, this.flushInterval, this.flushIntervalUnit); - + }; + // Flush at specified Rate + this.scheduler.schedule(flushRunnable, + this.flushInterval + (int) (randomSupplier.get() * BatchProcessor.this.jitterInterval), + this.flushIntervalUnit); } void write() { List currentBatch = null; try { if (this.queue.isEmpty()) { + BatchProcessor.this.batchWriter.write(Collections.emptyList()); return; } //for batch on HTTP. - Map batchKeyToBatchPoints = Maps.newHashMap(); + Map batchKeyToBatchPoints = new HashMap<>(); //for batch on UDP. - Map> udpPortToBatchPoints = Maps.newHashMap(); + Map> udpPortToBatchPoints = new HashMap<>(); List batchEntries = new ArrayList<>(this.queue.size()); this.queue.drainTo(batchEntries); currentBatch = new ArrayList<>(batchEntries.size()); @@ -229,7 +363,8 @@ void write() { String batchKey = dbName + "_" + rp; if (!batchKeyToBatchPoints.containsKey(batchKey)) { BatchPoints batchPoints = BatchPoints.database(dbName) - .retentionPolicy(rp).build(); + .retentionPolicy(rp).consistency(getConsistencyLevel()) + .precision(getPrecision()).build(); batchKeyToBatchPoints.put(batchKey, batchPoints); } batchKeyToBatchPoints.get(batchKey).point(point); @@ -244,9 +379,8 @@ void write() { } } - for (BatchPoints batchPoints : batchKeyToBatchPoints.values()) { - BatchProcessor.this.influxDB.write(batchPoints); - } + BatchProcessor.this.batchWriter.write(batchKeyToBatchPoints.values()); + for (Entry> entry : udpPortToBatchPoints.entrySet()) { for (String lineprotocolStr : entry.getValue()) { BatchProcessor.this.influxDB.write(entry.getKey(), lineprotocolStr); @@ -267,7 +401,14 @@ void write() { */ void put(final AbstractBatchEntry batchEntry) { try { - this.queue.put(batchEntry); + if (this.dropActionsOnQueueExhaustion) { + if (!this.queue.offer(batchEntry)) { + this.droppedActionHandler.accept(batchEntry.getPoint()); + return; + } + } else { + this.queue.put(batchEntry); + } } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -289,6 +430,7 @@ public void run() { void flushAndShutdown() { this.write(); this.scheduler.shutdown(); + this.batchWriter.close(); } /** @@ -297,4 +439,24 @@ void flushAndShutdown() { void flush() { this.write(); } + + public ConsistencyLevel getConsistencyLevel() { + return consistencyLevel; + } + + public TimeUnit getPrecision() { + return precision; + } + + BatchWriter getBatchWriter() { + return batchWriter; + } + + public boolean isDropActionsOnQueueExhaustion() { + return dropActionsOnQueueExhaustion; + } + + public Consumer getDroppedActionHandler() { + return droppedActionHandler; + } } diff --git a/src/main/java/org/influxdb/impl/BatchWriter.java b/src/main/java/org/influxdb/impl/BatchWriter.java new file mode 100644 index 000000000..2a71ebddd --- /dev/null +++ b/src/main/java/org/influxdb/impl/BatchWriter.java @@ -0,0 +1,22 @@ +package org.influxdb.impl; + +import org.influxdb.dto.BatchPoints; + +import java.util.Collection; + +/** + * Write individual batches to InfluxDB. + */ +interface BatchWriter { + /** + * Write the given batch into InfluxDB. + * @param batchPointsCollection to write + */ + void write(Collection batchPointsCollection); + + /** + * FLush all cached writes into InfluxDB. The application is about to exit. + */ + void close(); +} + diff --git a/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java b/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java index 8969780d9..adaa3d528 100644 --- a/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java +++ b/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.regex.Pattern; import okhttp3.Interceptor; import okhttp3.MediaType; @@ -19,6 +20,8 @@ */ final class GzipRequestInterceptor implements Interceptor { + private static final Pattern WRITE_PATTERN = Pattern.compile(".*/write", Pattern.CASE_INSENSITIVE); + private AtomicBoolean enabled = new AtomicBoolean(false); GzipRequestInterceptor() { @@ -48,6 +51,10 @@ public Response intercept(final Interceptor.Chain chain) throws IOException { return chain.proceed(originalRequest); } + if (!WRITE_PATTERN.matcher(originalRequest.url().encodedPath()).matches()) { + return chain.proceed(originalRequest); + } + Request compressedRequest = originalRequest.newBuilder().header("Content-Encoding", "gzip") .method(originalRequest.method(), gzip(body)).build(); return chain.proceed(compressedRequest); diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java index 884787cab..23427a23d 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java +++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java @@ -1,54 +1,61 @@ package org.influxdb.impl; - -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; import com.squareup.moshi.JsonAdapter; import com.squareup.moshi.Moshi; - -import org.influxdb.InfluxDB; -import org.influxdb.dto.BatchPoints; -import org.influxdb.dto.Point; -import org.influxdb.dto.Pong; -import org.influxdb.dto.Query; -import org.influxdb.dto.QueryResult; -import org.influxdb.impl.BatchProcessor.HttpBatchEntry; -import org.influxdb.impl.BatchProcessor.UdpBatchEntry; - import okhttp3.Headers; -import okhttp3.HttpUrl; import okhttp3.MediaType; import okhttp3.OkHttpClient; +import okhttp3.Request; import okhttp3.RequestBody; import okhttp3.ResponseBody; import okhttp3.logging.HttpLoggingInterceptor; import okhttp3.logging.HttpLoggingInterceptor.Level; import okio.BufferedSource; +import org.influxdb.BatchOptions; +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBException; +import org.influxdb.InfluxDBIOException; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.influxdb.dto.Pong; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.influxdb.impl.BatchProcessor.HttpBatchEntry; +import org.influxdb.impl.BatchProcessor.UdpBatchEntry; +import org.influxdb.msgpack.MessagePackConverterFactory; +import org.influxdb.msgpack.MessagePackTraverser; import retrofit2.Call; import retrofit2.Callback; +import retrofit2.Converter.Factory; import retrofit2.Response; import retrofit2.Retrofit; import retrofit2.converter.moshi.MoshiConverterFactory; import java.io.EOFException; import java.io.IOException; +import java.io.InputStream; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.SocketException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Implementation of a InluxDB API. @@ -56,51 +63,181 @@ * @author stefan.majer [at] gmail.com */ public class InfluxDBImpl implements InfluxDB { + + private static final String APPLICATION_MSGPACK = "application/x-msgpack"; + static final okhttp3.MediaType MEDIA_TYPE_STRING = MediaType.parse("text/plain"); private static final String SHOW_DATABASE_COMMAND_ENCODED = Query.encode("SHOW DATABASES"); - private final InetAddress hostAddress; - private final String username; - private final String password; + /** + * This static constant holds the http logging log level expected in DEBUG mode + * It is set by System property {@code org.influxdb.InfluxDB.logLevel}. + * + * @see org.influxdb.InfluxDB#LOG_LEVEL_PROPERTY + */ + private static final LogLevel LOG_LEVEL = LogLevel.parseLogLevel(System.getProperty(LOG_LEVEL_PROPERTY)); + + private final String hostName; + private String version; private final Retrofit retrofit; + private final OkHttpClient client; private final InfluxDBService influxDBService; private BatchProcessor batchProcessor; private final AtomicBoolean batchEnabled = new AtomicBoolean(false); - private final AtomicLong writeCount = new AtomicLong(); - private final AtomicLong unBatchedCount = new AtomicLong(); - private final AtomicLong batchedCount = new AtomicLong(); + private final LongAdder writeCount = new LongAdder(); + private final LongAdder unBatchedCount = new LongAdder(); + private final LongAdder batchedCount = new LongAdder(); private volatile DatagramSocket datagramSocket; private final HttpLoggingInterceptor loggingInterceptor; private final GzipRequestInterceptor gzipRequestInterceptor; private LogLevel logLevel = LogLevel.NONE; - private JsonAdapter adapter; + private String database; + private String retentionPolicy = "autogen"; + private ConsistencyLevel consistency = ConsistencyLevel.ONE; + private final boolean messagePack; + private Boolean messagePackSupport; + private final ChunkProccesor chunkProccesor; + + /** + * Constructs a new {@code InfluxDBImpl}. + * + * @param url + * The InfluxDB server API URL + * @param username + * The InfluxDB user name + * @param password + * The InfluxDB user password + * @param okHttpBuilder + * The OkHttp Client Builder + * @param responseFormat + * The {@code ResponseFormat} to use for response from InfluxDB + * server + */ + public InfluxDBImpl(final String url, final String username, final String password, + final OkHttpClient.Builder okHttpBuilder, final ResponseFormat responseFormat) { + this(url, username, password, okHttpBuilder, new Retrofit.Builder(), responseFormat); + } + + /** + * Constructs a new {@code InfluxDBImpl}. + * + * @param url + * The InfluxDB server API URL + * @param username + * The InfluxDB user name + * @param password + * The InfluxDB user password + * @param okHttpBuilder + * The OkHttp Client Builder + * @param retrofitBuilder + * The Retrofit Builder + * @param responseFormat + * The {@code ResponseFormat} to use for response from InfluxDB + * server + */ + public InfluxDBImpl(final String url, final String username, final String password, + final OkHttpClient.Builder okHttpBuilder, final Retrofit.Builder retrofitBuilder, + final ResponseFormat responseFormat) { + this.messagePack = ResponseFormat.MSGPACK.equals(responseFormat); + this.hostName = parseHost(url); + + this.loggingInterceptor = new HttpLoggingInterceptor(); + setLogLevel(LOG_LEVEL); + + this.gzipRequestInterceptor = new GzipRequestInterceptor(); + OkHttpClient.Builder clonedOkHttpBuilder = okHttpBuilder.build().newBuilder() + .addInterceptor(loggingInterceptor) + .addInterceptor(gzipRequestInterceptor); + if (username != null && password != null) { + clonedOkHttpBuilder.addInterceptor(new BasicAuthInterceptor(username, password)); + } + Factory converterFactory = null; + switch (responseFormat) { + case MSGPACK: + clonedOkHttpBuilder.addInterceptor(chain -> { + Request request = chain.request().newBuilder().addHeader("Accept", APPLICATION_MSGPACK).build(); + return chain.proceed(request); + }); + + converterFactory = MessagePackConverterFactory.create(); + chunkProccesor = new MessagePackChunkProccesor(); + break; + case JSON: + default: + converterFactory = MoshiConverterFactory.create(); + + Moshi moshi = new Moshi.Builder().build(); + JsonAdapter adapter = moshi.adapter(QueryResult.class); + chunkProccesor = new JSONChunkProccesor(adapter); + break; + } + + this.client = clonedOkHttpBuilder.build(); + Retrofit.Builder clonedRetrofitBuilder = retrofitBuilder.baseUrl(url).build().newBuilder(); + this.retrofit = clonedRetrofitBuilder.client(this.client) + .addConverterFactory(converterFactory).build(); + this.influxDBService = this.retrofit.create(InfluxDBService.class); + + } public InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client) { + this(url, username, password, client, ResponseFormat.JSON); + + } + + InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client, + final InfluxDBService influxDBService, final JsonAdapter adapter) { super(); - Moshi moshi = new Moshi.Builder().build(); - this.hostAddress = parseHostAddress(url); - this.username = username; - this.password = password; + this.messagePack = false; + this.hostName = parseHost(url); + this.loggingInterceptor = new HttpLoggingInterceptor(); - this.loggingInterceptor.setLevel(Level.NONE); + setLogLevel(LOG_LEVEL); + this.gzipRequestInterceptor = new GzipRequestInterceptor(); - this.retrofit = new Retrofit.Builder() - .baseUrl(url) - .client(client.addInterceptor(loggingInterceptor).addInterceptor(gzipRequestInterceptor).build()) - .addConverterFactory(MoshiConverterFactory.create()) - .build(); - this.influxDBService = this.retrofit.create(InfluxDBService.class); - this.adapter = moshi.adapter(QueryResult.class); + OkHttpClient.Builder clonedBuilder = client.build().newBuilder() + .addInterceptor(loggingInterceptor) + .addInterceptor(gzipRequestInterceptor) + .addInterceptor(new BasicAuthInterceptor(username, password)); + this.client = clonedBuilder.build(); + this.retrofit = new Retrofit.Builder().baseUrl(url) + .client(this.client) + .addConverterFactory(MoshiConverterFactory.create()).build(); + this.influxDBService = influxDBService; + + chunkProccesor = new JSONChunkProccesor(adapter); } - private InetAddress parseHostAddress(final String url) { - try { - return InetAddress.getByName(HttpUrl.parse(url).host()); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } + public InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client, + final String database, final String retentionPolicy, final ConsistencyLevel consistency) { + this(url, username, password, client); + + setConsistency(consistency); + setDatabase(database); + setRetentionPolicy(retentionPolicy); + } + + private String parseHost(final String url) { + String hostName; + try { + URI uri = new URI(url); + hostName = uri.getHost(); + } catch (URISyntaxException e1) { + throw new IllegalArgumentException("Unable to parse url: " + url, e1); + } + + if (hostName == null) { + throw new IllegalArgumentException("Unable to parse url: " + url); + } + + try { + InetAddress.getByName(hostName); + } catch (UnknownHostException e) { + throw new InfluxDBIOException(e); + } + return hostName; } @Override @@ -151,6 +288,34 @@ public boolean isGzipEnabled() { return this.gzipRequestInterceptor.isEnabled(); } + @Override + public InfluxDB enableBatch() { + enableBatch(BatchOptions.DEFAULTS); + return this; + } + + @Override + public InfluxDB enableBatch(final BatchOptions batchOptions) { + + if (this.batchEnabled.get()) { + throw new IllegalStateException("BatchProcessing is already enabled."); + } + this.batchProcessor = BatchProcessor + .builder(this) + .actions(batchOptions.getActions()) + .exceptionHandler(batchOptions.getExceptionHandler()) + .interval(batchOptions.getFlushDuration(), batchOptions.getJitterDuration(), TimeUnit.MILLISECONDS) + .threadFactory(batchOptions.getThreadFactory()) + .bufferLimit(batchOptions.getBufferLimit()) + .consistencyLevel(batchOptions.getConsistency()) + .precision(batchOptions.getPrecision()) + .dropActionsOnQueueExhaustion(batchOptions.isDropActionsOnQueueExhaustion()) + .droppedActionHandler(batchOptions.getDroppedActionHandler()) + .build(); + this.batchEnabled.set(true); + return this; + } + @Override public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit) { @@ -165,10 +330,28 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, return this; } + @Override + public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, + final ThreadFactory threadFactory, + final BiConsumer, Throwable> exceptionHandler, + final ConsistencyLevel consistency) { + enableBatch(actions, flushDuration, flushDurationTimeUnit, threadFactory, exceptionHandler) + .setConsistency(consistency); + return this; + } + @Override public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit, final ThreadFactory threadFactory, final BiConsumer, Throwable> exceptionHandler) { + enableBatch(actions, flushDuration, 0, flushDurationTimeUnit, threadFactory, exceptionHandler, false, null); + return this; + } + + private InfluxDB enableBatch(final int actions, final int flushDuration, final int jitterDuration, + final TimeUnit durationTimeUnit, final ThreadFactory threadFactory, + final BiConsumer, Throwable> exceptionHandler, + final boolean dropActionsOnQueueExhaustion, final Consumer droppedActionHandler) { if (this.batchEnabled.get()) { throw new IllegalStateException("BatchProcessing is already enabled."); } @@ -176,8 +359,11 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti .builder(this) .actions(actions) .exceptionHandler(exceptionHandler) - .interval(flushDuration, flushDurationTimeUnit) + .interval(flushDuration, jitterDuration, durationTimeUnit) .threadFactory(threadFactory) + .consistencyLevel(consistency) + .dropActionsOnQueueExhaustion(dropActionsOnQueueExhaustion) + .droppedActionHandler(droppedActionHandler) .build(); this.batchEnabled.set(true); return this; @@ -188,12 +374,6 @@ public void disableBatch() { this.batchEnabled.set(false); if (this.batchProcessor != null) { this.batchProcessor.flushAndShutdown(); - if (this.logLevel != LogLevel.NONE) { - System.out.println( - "total writes:" + this.writeCount.get() - + " unbatched:" + this.unBatchedCount.get() - + " batchPoints:" + this.batchedCount); - } } } @@ -204,7 +384,7 @@ public boolean isBatchEnabled() { @Override public Pong ping() { - Stopwatch watch = Stopwatch.createStarted(); + final long started = System.currentTimeMillis(); Call call = this.influxDBService.ping(); try { Response response = call.execute(); @@ -218,16 +398,34 @@ public Pong ping() { } Pong pong = new Pong(); pong.setVersion(version); - pong.setResponseTime(watch.elapsed(TimeUnit.MILLISECONDS)); + pong.setResponseTime(System.currentTimeMillis() - started); return pong; } catch (IOException e) { - throw new RuntimeException(e); + throw new InfluxDBIOException(e); } } @Override public String version() { - return ping().getVersion(); + if (version == null) { + this.version = ping().getVersion(); + } + return this.version; + } + + @Override + public void write(final Point point) { + write(database, retentionPolicy, point); + } + + @Override + public void write(final String records) { + write(database, retentionPolicy, consistency, records); + } + + @Override + public void write(final List records) { + write(database, retentionPolicy, consistency, records); } @Override @@ -240,9 +438,9 @@ public void write(final String database, final String retentionPolicy, final Poi .retentionPolicy(retentionPolicy).build(); batchPoints.point(point); this.write(batchPoints); - this.unBatchedCount.incrementAndGet(); + this.unBatchedCount.increment(); } - this.writeCount.incrementAndGet(); + this.writeCount.increment(); } /** @@ -255,45 +453,67 @@ public void write(final int udpPort, final Point point) { this.batchProcessor.put(batchEntry); } else { this.write(udpPort, point.lineProtocol()); - this.unBatchedCount.incrementAndGet(); + this.unBatchedCount.increment(); } - this.writeCount.incrementAndGet(); + this.writeCount.increment(); } @Override public void write(final BatchPoints batchPoints) { - this.batchedCount.addAndGet(batchPoints.getPoints().size()); + this.batchedCount.add(batchPoints.getPoints().size()); RequestBody lineProtocol = RequestBody.create(MEDIA_TYPE_STRING, batchPoints.lineProtocol()); + String db = batchPoints.getDatabase(); + if (db == null) { + db = this.database; + } execute(this.influxDBService.writePoints( - this.username, - this.password, - batchPoints.getDatabase(), + db, batchPoints.getRetentionPolicy(), - TimeUtil.toTimePrecision(TimeUnit.NANOSECONDS), + TimeUtil.toTimePrecision(batchPoints.getPrecision()), batchPoints.getConsistency().value(), lineProtocol)); } + @Override + public void writeWithRetry(final BatchPoints batchPoints) { + if (isBatchEnabled()) { + batchProcessor.getBatchWriter().write(Collections.singleton(batchPoints)); + } else { + write(batchPoints); + } + } + @Override public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, - final String records) { + final TimeUnit precision, final String records) { execute(this.influxDBService.writePoints( - this.username, - this.password, database, retentionPolicy, - TimeUtil.toTimePrecision(TimeUnit.NANOSECONDS), + TimeUtil.toTimePrecision(precision), consistency.value(), RequestBody.create(MEDIA_TYPE_STRING, records))); } + @Override + public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, + final String records) { + write(database, retentionPolicy, consistency, TimeUnit.NANOSECONDS, records); + } + @Override public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, final List records) { - final String joinedRecords = Joiner.on("\n").join(records); - write(database, retentionPolicy, consistency, joinedRecords); + write(database, retentionPolicy, consistency, TimeUnit.NANOSECONDS, records); + } + + + @Override + public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency, + final TimeUnit precision, final List records) { + write(database, retentionPolicy, consistency, precision, String.join("\n", records)); } + /** * {@inheritDoc} */ @@ -302,9 +522,9 @@ public void write(final int udpPort, final String records) { initialDatagramSocket(); byte[] bytes = records.getBytes(StandardCharsets.UTF_8); try { - datagramSocket.send(new DatagramPacket(bytes, bytes.length, hostAddress, udpPort)); + datagramSocket.send(new DatagramPacket(bytes, bytes.length, new InetSocketAddress(hostName, udpPort))); } catch (IOException e) { - throw new RuntimeException(e); + throw new InfluxDBIOException(e); } } @@ -315,7 +535,7 @@ private void initialDatagramSocket() { try { datagramSocket = new DatagramSocket(); } catch (SocketException e) { - throw new RuntimeException(e); + throw new InfluxDBIOException(e); } } } @@ -327,8 +547,7 @@ private void initialDatagramSocket() { */ @Override public void write(final int udpPort, final List records) { - final String joinedRecords = Joiner.on("\n").join(records); - write(udpPort, joinedRecords); + write(udpPort, String.join("\n", records)); } /** @@ -336,60 +555,162 @@ public void write(final int udpPort, final List records) { */ @Override public QueryResult query(final Query query) { - Call call; - if (query.requiresPost()) { - call = this.influxDBService.postQuery(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); - } else { - call = this.influxDBService.query(this.username, - this.password, query.getDatabase(), query.getCommandWithUrlEncoded()); - } - return execute(call); + return executeQuery(callQuery(query)); } /** * {@inheritDoc} */ @Override - public void query(final Query query, final int chunkSize, final Consumer consumer) { + public void query(final Query query, final Consumer onSuccess, final Consumer onFailure) { + final Call call = callQuery(query); + call.enqueue(new Callback() { + @Override + public void onResponse(final Call call, final Response response) { + if (response.isSuccessful()) { + onSuccess.accept(response.body()); + } else { + Throwable t = null; + String errorBody = null; + + try { + if (response.errorBody() != null) { + errorBody = response.errorBody().string(); + } + } catch (IOException e) { + t = e; + } - if (version().startsWith("0.") || version().startsWith("1.0")) { - throw new RuntimeException("chunking not supported"); + if (t != null) { + onFailure.accept(new InfluxDBException(response.message(), t)); + } else if (errorBody != null) { + onFailure.accept(new InfluxDBException(response.message() + " - " + errorBody)); + } else { + onFailure.accept(new InfluxDBException(response.message())); + } } + } - Call call = this.influxDBService.query(this.username, this.password, - query.getDatabase(), query.getCommandWithUrlEncoded(), chunkSize); + @Override + public void onFailure(final Call call, final Throwable throwable) { + onFailure.accept(throwable); + } + }); + } - call.enqueue(new Callback() { - @Override - public void onResponse(final Call call, final Response response) { - try { - if (response.isSuccessful()) { - BufferedSource source = response.body().source(); - while (true) { - QueryResult result = InfluxDBImpl.this.adapter.fromJson(source); - if (result != null) { - consumer.accept(result); - } - } - } - try (ResponseBody errorBody = response.errorBody()) { - throw new RuntimeException(errorBody.string()); - } - } catch (EOFException e) { - QueryResult queryResult = new QueryResult(); - queryResult.setError("DONE"); - consumer.accept(queryResult); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + /** + * {@inheritDoc} + */ + @Override + public void query(final Query query, final int chunkSize, final Consumer onNext) { + query(query, chunkSize, onNext, () -> { }); + } + + /** + * {@inheritDoc} + */ + @Override + public void query(final Query query, final int chunkSize, final BiConsumer onNext) { + query(query, chunkSize, onNext, () -> { }); + } + + /** + * {@inheritDoc} + */ + @Override + public void query(final Query query, final int chunkSize, final Consumer onNext, + final Runnable onComplete) { + query(query, chunkSize, (cancellable, queryResult) -> onNext.accept(queryResult), onComplete); + } + + @Override + public void query(final Query query, final int chunkSize, final BiConsumer onNext, + final Runnable onComplete) { + query(query, chunkSize, onNext, onComplete, null); + } + + /** + * {@inheritDoc} + */ + @Override + public void query(final Query query, final int chunkSize, final BiConsumer onNext, + final Runnable onComplete, final Consumer onFailure) { + Call call; + if (query.hasBoundParameters()) { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize, + query.getParameterJsonWithUrlEncoded()); + } else { + call = this.influxDBService.query(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize, + query.getParameterJsonWithUrlEncoded()); + } + } else { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize); + } else { + call = this.influxDBService.query(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize); + } + } + + call.enqueue(new Callback() { + @Override + public void onResponse(final Call call, final Response response) { - @Override - public void onFailure(final Call call, final Throwable t) { - throw new RuntimeException(t); + Cancellable cancellable = new Cancellable() { + @Override + public void cancel() { + call.cancel(); + } + + @Override + public boolean isCanceled() { + return call.isCanceled(); + } + }; + + try { + if (response.isSuccessful()) { + ResponseBody chunkedBody = response.body(); + chunkProccesor.process(chunkedBody, cancellable, onNext, onComplete); + } else { + // REVIEW: must be handled consistently with IOException. + ResponseBody errorBody = response.errorBody(); + if (errorBody != null) { + InfluxDBException influxDBException = new InfluxDBException(errorBody.string()); + if (onFailure == null) { + throw influxDBException; + } else { + onFailure.accept(influxDBException); + } } - }); + } + } catch (IOException e) { + QueryResult queryResult = new QueryResult(); + queryResult.setError(e.toString()); + onNext.accept(cancellable, queryResult); + //passing null onFailure consumer is here for backward compatibility + //where the empty queryResult containing error is propagating into onNext consumer + if (onFailure != null) { + onFailure.accept(e); + } + } catch (Exception e) { + call.cancel(); + if (onFailure != null) { + onFailure.accept(e); + } + } + + } + + @Override + public void onFailure(final Call call, final Throwable t) { + if (onFailure == null) { + throw new InfluxDBException(t); + } else { + onFailure.accept(t); + } + } + }); } /** @@ -397,8 +718,25 @@ public void onFailure(final Call call, final Throwable t) { */ @Override public QueryResult query(final Query query, final TimeUnit timeUnit) { - return execute(this.influxDBService.query(this.username, this.password, query.getDatabase(), - TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded())); + Call call; + if (query.hasBoundParameters()) { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), TimeUtil.toTimePrecision(timeUnit), + query.getCommandWithUrlEncoded(), query.getParameterJsonWithUrlEncoded()); + } else { + call = this.influxDBService.query(getDatabase(query), TimeUtil.toTimePrecision(timeUnit), + query.getCommandWithUrlEncoded(), query.getParameterJsonWithUrlEncoded()); + } + } else { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), + TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded()); + } else { + call = this.influxDBService.query(getDatabase(query), + TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded(), null); + } + } + return executeQuery(call); } /** @@ -406,12 +744,9 @@ public QueryResult query(final Query query, final TimeUnit timeUnit) { */ @Override public void createDatabase(final String name) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Database name may not be null or empty"); + Preconditions.checkNonEmptyString(name, "name"); String createDatabaseQueryString = String.format("CREATE DATABASE \"%s\"", name); - if (this.version().startsWith("0.")) { - createDatabaseQueryString = String.format("CREATE DATABASE IF NOT EXISTS \"%s\"", name); - } - execute(this.influxDBService.postQuery(this.username, this.password, Query.encode(createDatabaseQueryString))); + executeQuery(this.influxDBService.postQuery(Query.encode(createDatabaseQueryString))); } /** @@ -419,8 +754,7 @@ public void createDatabase(final String name) { */ @Override public void deleteDatabase(final String name) { - execute(this.influxDBService.postQuery(this.username, this.password, - Query.encode("DROP DATABASE \"" + name + "\""))); + executeQuery(this.influxDBService.postQuery(Query.encode("DROP DATABASE \"" + name + "\""))); } /** @@ -428,12 +762,11 @@ public void deleteDatabase(final String name) { */ @Override public List describeDatabases() { - QueryResult result = execute(this.influxDBService.query(this.username, - this.password, SHOW_DATABASE_COMMAND_ENCODED)); + QueryResult result = executeQuery(this.influxDBService.postQuery(SHOW_DATABASE_COMMAND_ENCODED)); // {"results":[{"series":[{"name":"databases","columns":["name"],"values":[["mydb"]]}]}]} // Series [name=databases, columns=[name], values=[[mydb], [unittest_1433605300968]]] List> databaseNames = result.getResults().get(0).getSeries().get(0).getValues(); - List databases = Lists.newArrayList(); + List databases = new ArrayList<>(); if (databaseNames != null) { for (List database : databaseNames) { databases.add(database.get(0).toString()); @@ -456,6 +789,60 @@ public boolean databaseExists(final String name) { return false; } + /** + * Calls the influxDBService for the query. + */ + private Call callQuery(final Query query) { + Call call; + if (query.hasBoundParameters()) { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded(), + query.getParameterJsonWithUrlEncoded()); + } else { + call = this.influxDBService.query(getDatabase(query), null, query.getCommandWithUrlEncoded(), + query.getParameterJsonWithUrlEncoded()); + } + } else { + if (query.requiresPost()) { + call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded()); + } else { + call = this.influxDBService.query(getDatabase(query), query.getCommandWithUrlEncoded()); + } + } + return call; + } + + static class ErrorMessage { + public String error; + } + + private boolean checkMessagePackSupport() { + Matcher matcher = Pattern.compile("(\\d+\\.*)+").matcher(version()); + if (!matcher.find()) { + return false; + } + String s = matcher.group(); + String[] versionNumbers = s.split("\\."); + final int major = Integer.parseInt(versionNumbers[0]); + final int minor = Integer.parseInt(versionNumbers[1]); + final int fromMinor = 4; + return (major >= 2) || ((major == 1) && (minor >= fromMinor)); + } + + private QueryResult executeQuery(final Call call) { + if (messagePack) { + if (messagePackSupport == null) { + messagePackSupport = checkMessagePackSupport(); + } + + if (!messagePackSupport) { + throw new UnsupportedOperationException( + "MessagePack format is only supported from InfluxDB version 1.4 and later"); + } + } + return execute(call); + } + private T execute(final Call call) { try { Response response = call.execute(); @@ -463,10 +850,14 @@ private T execute(final Call call) { return response.body(); } try (ResponseBody errorBody = response.errorBody()) { - throw new RuntimeException(errorBody.string()); + if (messagePack) { + throw InfluxDBException.buildExceptionForErrorState(errorBody.byteStream()); + } else { + throw InfluxDBException.buildExceptionForErrorState(errorBody.string()); + } } } catch (IOException e) { - throw new RuntimeException(e); + throw new InfluxDBIOException(e); } } @@ -493,6 +884,156 @@ public void close() { datagramSocket.close(); } } + this.client.dispatcher().executorService().shutdown(); + this.client.connectionPool().evictAll(); + } + + @Override + public InfluxDB setConsistency(final ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + @Override + public InfluxDB setDatabase(final String database) { + this.database = database; + return this; + } + + @Override + public InfluxDB setRetentionPolicy(final String retentionPolicy) { + this.retentionPolicy = retentionPolicy; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor, final boolean isDefault) { + Preconditions.checkNonEmptyString(rpName, "retentionPolicyName"); + Preconditions.checkNonEmptyString(database, "database"); + Preconditions.checkNonEmptyString(duration, "retentionDuration"); + Preconditions.checkDuration(duration, "retentionDuration"); + if (shardDuration != null && !shardDuration.isEmpty()) { + Preconditions.checkDuration(shardDuration, "shardDuration"); + } + Preconditions.checkPositiveNumber(replicationFactor, "replicationFactor"); + + StringBuilder queryBuilder = new StringBuilder("CREATE RETENTION POLICY \""); + queryBuilder.append(rpName) + .append("\" ON \"") + .append(database) + .append("\" DURATION ") + .append(duration) + .append(" REPLICATION ") + .append(replicationFactor); + if (shardDuration != null && !shardDuration.isEmpty()) { + queryBuilder.append(" SHARD DURATION "); + queryBuilder.append(shardDuration); + } + if (isDefault) { + queryBuilder.append(" DEFAULT"); + } + executeQuery(this.influxDBService.postQuery(Query.encode(queryBuilder.toString()))); + } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final int replicationFactor, final boolean isDefault) { + createRetentionPolicy(rpName, database, duration, null, replicationFactor, isDefault); + } + + /** + * {@inheritDoc} + */ + @Override + public void createRetentionPolicy(final String rpName, final String database, final String duration, + final String shardDuration, final int replicationFactor) { + createRetentionPolicy(rpName, database, duration, null, replicationFactor, false); + } + + /** + * {@inheritDoc} + * @param rpName the name of the retentionPolicy + * @param database the name of the database + */ + @Override + public void dropRetentionPolicy(final String rpName, final String database) { + Preconditions.checkNonEmptyString(rpName, "retentionPolicyName"); + Preconditions.checkNonEmptyString(database, "database"); + StringBuilder queryBuilder = new StringBuilder("DROP RETENTION POLICY \""); + queryBuilder.append(rpName) + .append("\" ON \"") + .append(database) + .append("\""); + executeQuery(this.influxDBService.postQuery(Query.encode(queryBuilder.toString()))); + } + + private String getDatabase(final Query query) { + String db = query.getDatabase(); + if (db == null) { + return this.database; + } + return db; + } + + private interface ChunkProccesor { + void process(ResponseBody chunkedBody, Cancellable cancellable, + BiConsumer consumer, Runnable onComplete) throws IOException; + } + + private class MessagePackChunkProccesor implements ChunkProccesor { + @Override + public void process(final ResponseBody chunkedBody, final Cancellable cancellable, + final BiConsumer consumer, final Runnable onComplete) + throws IOException { + MessagePackTraverser traverser = new MessagePackTraverser(); + try (InputStream is = chunkedBody.byteStream()) { + for (Iterator it = traverser.traverse(is).iterator(); it.hasNext() && !cancellable.isCanceled();) { + QueryResult result = it.next(); + consumer.accept(cancellable, result); + } + } + if (!cancellable.isCanceled()) { + onComplete.run(); + } + } } + private class JSONChunkProccesor implements ChunkProccesor { + private JsonAdapter adapter; + + public JSONChunkProccesor(final JsonAdapter adapter) { + this.adapter = adapter; + } + + @Override + public void process(final ResponseBody chunkedBody, final Cancellable cancellable, + final BiConsumer consumer, final Runnable onComplete) + throws IOException { + try { + BufferedSource source = chunkedBody.source(); + while (!cancellable.isCanceled()) { + QueryResult result = adapter.fromJson(source); + if (result != null) { + consumer.accept(cancellable, result); + } + } + } catch (EOFException e) { + QueryResult queryResult = new QueryResult(); + queryResult.setError("DONE"); + consumer.accept(cancellable, queryResult); + if (!cancellable.isCanceled()) { + onComplete.run(); + } + } finally { + chunkedBody.close(); + } + } + } } diff --git a/src/main/java/org/influxdb/impl/InfluxDBMapper.java b/src/main/java/org/influxdb/impl/InfluxDBMapper.java new file mode 100644 index 000000000..2a6c0dc4c --- /dev/null +++ b/src/main/java/org/influxdb/impl/InfluxDBMapper.java @@ -0,0 +1,62 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.annotation.Measurement; +import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; + +import java.util.List; + +public class InfluxDBMapper extends InfluxDBResultMapper { + + private final InfluxDB influxDB; + + public InfluxDBMapper(final InfluxDB influxDB) { + this.influxDB = influxDB; + } + + public List query(final Query query, final Class clazz, final String measurementName) { + QueryResult queryResult = influxDB.query(query); + return toPOJO(queryResult, clazz, measurementName); + } + + public List query(final Query query, final Class clazz) { + throwExceptionIfMissingAnnotation(clazz); + QueryResult queryResult = influxDB.query(query); + return toPOJO(queryResult, clazz); + } + + public List query(final Class clazz) { + throwExceptionIfMissingAnnotation(clazz); + + String measurement = getMeasurementName(clazz); + String database = getDatabaseName(clazz); + + if ("[unassigned]".equals(database)) { + throw new IllegalArgumentException( + Measurement.class.getSimpleName() + + " of class " + + clazz.getName() + + " should specify a database value for this operation"); + } + + QueryResult queryResult = influxDB.query(new Query("SELECT * FROM " + measurement, database)); + return toPOJO(queryResult, clazz); + } + + public void save(final T model) { + throwExceptionIfMissingAnnotation(model.getClass()); + Class modelType = model.getClass(); + String database = getDatabaseName(modelType); + String retentionPolicy = getRetentionPolicy(modelType); + Point.Builder pointBuilder = Point.measurementByPOJO(modelType).addFieldsFromPOJO(model); + Point point = pointBuilder.build(); + + if ("[unassigned]".equals(database)) { + influxDB.write(point); + } else { + influxDB.write(database, retentionPolicy, point); + } + } +} diff --git a/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java new file mode 100644 index 000000000..2cfdeced7 --- /dev/null +++ b/src/main/java/org/influxdb/impl/InfluxDBResultMapper.java @@ -0,0 +1,499 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.impl; + +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Exclude; +import org.influxdb.annotation.Measurement; +import org.influxdb.dto.QueryResult; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Modifier; +import java.lang.reflect.Parameter; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.time.Instant; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Main class responsible for mapping a QueryResult to a POJO. + * + * @author fmachado + */ +public class InfluxDBResultMapper { + + /** + * Data structure used to cache classes used as measurements. + */ + private static class ClassInfo { + ConcurrentMap fieldMap; + ConcurrentMap typeMappers; + } + private static final + ConcurrentMap CLASS_INFO_CACHE = new ConcurrentHashMap<>(); + + /** + * Data structure used to cache records used as measurements. + */ + private static class RecordInfo { + Constructor constructor; + ConcurrentMap constructorParamIndexes; + } + private static final + ConcurrentMap RECORD_INFO = new ConcurrentHashMap<>(); + + private static final int FRACTION_MIN_WIDTH = 0; + private static final int FRACTION_MAX_WIDTH = 9; + private static final boolean ADD_DECIMAL_POINT = true; + + // Support both standard and Android desugared records + private static final Collection RECORD_CLASS_NAMES = + new HashSet<>(Arrays.asList("java.lang.Record", "com.android.tools.r8.RecordTag")); + + /** + * When a query is executed without {@link TimeUnit}, InfluxDB returns the {@code time} + * column as a RFC3339 date. + */ + private static final DateTimeFormatter RFC3339_FORMATTER = new DateTimeFormatterBuilder() + .appendPattern("yyyy-MM-dd'T'HH:mm:ss") + .appendFraction(ChronoField.NANO_OF_SECOND, FRACTION_MIN_WIDTH, FRACTION_MAX_WIDTH, ADD_DECIMAL_POINT) + .appendZoneOrOffsetId() + .toFormatter(); + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @param the target type + * + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * {@code clazz} parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz) throws InfluxDBMapperException { + return toPOJO(queryResult, clazz, TimeUnit.MILLISECONDS); + } + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @param precision the time precision of results + * @param the target type + * + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * {@code clazz} parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz, + final TimeUnit precision) throws InfluxDBMapperException { + throwExceptionIfMissingAnnotation(clazz); + String measurementName = getMeasurementName(clazz); + return this.toPOJO(queryResult, clazz, measurementName, precision); + } + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @param the target type + * @param measurementName name of the Measurement + * + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * {@code clazz} parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz, final String measurementName) + throws InfluxDBMapperException { + return toPOJO(queryResult, clazz, measurementName, TimeUnit.MILLISECONDS); + } + + /** + *

+ * Process a {@link QueryResult} object returned by the InfluxDB client inspecting the internal + * data structure and creating the respective object instances based on the Class passed as + * parameter. + *

+ * + * @param queryResult the InfluxDB result object + * @param clazz the Class that will be used to hold your measurement data + * @param the target type + * @param measurementName name of the Measurement + * @param precision the time precision of results + * + * @return a {@link List} of objects from the same Class passed as parameter and sorted on the + * same order as received from InfluxDB. + * + * @throws InfluxDBMapperException If {@link QueryResult} parameter contain errors, + * {@code clazz} parameter is not annotated with @Measurement or it was not + * possible to define the values of your POJO (e.g. due to an unsupported field type). + */ + public List toPOJO(final QueryResult queryResult, final Class clazz, final String measurementName, + final TimeUnit precision) + throws InfluxDBMapperException { + + Objects.requireNonNull(measurementName, "measurementName"); + Objects.requireNonNull(queryResult, "queryResult"); + Objects.requireNonNull(clazz, "clazz"); + + throwExceptionIfResultWithError(queryResult); + + if (isRecordClass(clazz)) { + cacheRecordClass(clazz); + } else { + cacheMeasurementClass(clazz); + } + + List result = new LinkedList<>(); + + queryResult.getResults().stream() + .filter(internalResult -> Objects.nonNull(internalResult) && Objects.nonNull(internalResult.getSeries())) + .forEach(internalResult -> internalResult.getSeries().stream() + .filter(series -> series.getName().equals(measurementName)) + .forEachOrdered(series -> parseSeriesAs(series, clazz, result, precision))); + + return result; + } + + void throwExceptionIfMissingAnnotation(final Class clazz) { + if (!clazz.isAnnotationPresent(Measurement.class)) { + throw new IllegalArgumentException( + "Class " + clazz.getName() + " is not annotated with @" + Measurement.class.getSimpleName()); + } + } + + void throwExceptionIfResultWithError(final QueryResult queryResult) { + if (queryResult.getError() != null) { + throw new InfluxDBMapperException("InfluxDB returned an error: " + queryResult.getError()); + } + + queryResult.getResults().forEach(seriesResult -> { + if (seriesResult.getError() != null) { + throw new InfluxDBMapperException("InfluxDB returned an error with Series: " + seriesResult.getError()); + } + }); + } + + void cacheMeasurementClass(final Class... classVarAgrs) { + for (Class clazz : classVarAgrs) { + if (CLASS_INFO_CACHE.containsKey(clazz.getName())) { + continue; + } + ConcurrentMap fieldMap = new ConcurrentHashMap<>(); + ConcurrentMap typeMappers = new ConcurrentHashMap<>(); + + Measurement measurement = clazz.getAnnotation(Measurement.class); + boolean allFields = measurement != null && measurement.allFields(); + + Class c = clazz; + TypeMapper typeMapper = TypeMapper.empty(); + while (c != null) { + for (Field field : c.getDeclaredFields()) { + Column colAnnotation = field.getAnnotation(Column.class); + if (colAnnotation == null && !(allFields + && !field.isAnnotationPresent(Exclude.class) && !Modifier.isStatic(field.getModifiers()))) { + continue; + } + + fieldMap.put(getFieldName(field, colAnnotation), field); + typeMappers.put(field, typeMapper); + } + + Class superclass = c.getSuperclass(); + Type genericSuperclass = c.getGenericSuperclass(); + if (genericSuperclass instanceof ParameterizedType) { + typeMapper = TypeMapper.of((ParameterizedType) genericSuperclass, superclass); + } else { + typeMapper = TypeMapper.empty(); + } + + c = superclass; + } + + ClassInfo classInfo = new ClassInfo(); + classInfo.fieldMap = fieldMap; + classInfo.typeMappers = typeMappers; + CLASS_INFO_CACHE.putIfAbsent(clazz.getName(), classInfo); + } + } + + static void cacheRecordClass(final Class clazz) { + if (RECORD_INFO.containsKey(clazz.getName())) { + return; + } + + Map components = Arrays.stream(clazz.getDeclaredFields()) + .filter(field -> !Modifier.isStatic(field.getModifiers())) + .collect(Collectors.toMap(Field::getName, Field::getGenericType)); + boolean found = false; + for (Constructor constructor : clazz.getDeclaredConstructors()) { + Parameter[] parameters = constructor.getParameters(); + Map parameterTypes = Arrays.stream(parameters) + .collect(Collectors.toMap(Parameter::getName, Parameter::getParameterizedType)); + if (!parameterTypes.equals(components)) { + continue; + } + + if (found) { + throw new InfluxDBMapperException(String.format( + "Multiple constructors match set of components for record %s", clazz.getName())); + } + + RecordInfo recordInfo = new RecordInfo(); + recordInfo.constructor = constructor; + + try { + ConcurrentMap constructorParamIndexes = new ConcurrentHashMap<>(parameters.length); + for (int i = 0; i < parameters.length; i++) { + Field field = clazz.getDeclaredField(parameters[i].getName()); + Column colAnnotation = field.getAnnotation(Column.class); + String propertyName = getFieldName(field, colAnnotation); + constructorParamIndexes.put(propertyName, i); + } + recordInfo.constructorParamIndexes = constructorParamIndexes; + } catch (NoSuchFieldException e) { + throw new InfluxDBMapperException(e); + } + + RECORD_INFO.putIfAbsent(clazz.getName(), recordInfo); + found = true; + } + } + + private static String getFieldName(final Field field, final Column colAnnotation) { + if (colAnnotation != null && !colAnnotation.name().isEmpty()) { + return colAnnotation.name(); + } + + return field.getName(); + } + + String getMeasurementName(final Class clazz) { + return ((Measurement) clazz.getAnnotation(Measurement.class)).name(); + } + + String getDatabaseName(final Class clazz) { + return ((Measurement) clazz.getAnnotation(Measurement.class)).database(); + } + + String getRetentionPolicy(final Class clazz) { + return ((Measurement) clazz.getAnnotation(Measurement.class)).retentionPolicy(); + } + + List parseSeriesAs(final QueryResult.Series series, final Class clazz, final List result) { + return parseSeriesAs(series, clazz, result, TimeUnit.MILLISECONDS); + } + + List parseSeriesAs(final QueryResult.Series series, final Class clazz, final List result, + final TimeUnit precision) { + int columnSize = series.getColumns().size(); + + if (isRecordClass(clazz)) { + RecordInfo recordInfo = RECORD_INFO.get(clazz.getName()); + try { + T object = null; + for (List row : series.getValues()) { + Object[] constructorParams = new Object[recordInfo.constructor.getParameterTypes().length]; + for (int i = 0; i < columnSize; i++) { + String columnName = series.getColumns().get(i); /*InfluxDB columnName*/ + addParam(clazz, precision, recordInfo, constructorParams, columnName, row.get(i)); + } + // When the "GROUP BY" clause is used, "tags" are returned as Map and + // accordingly with InfluxDB documentation + // https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value + // "tag" values are always String. + if (series.getTags() != null) { + for (Entry entry : series.getTags().entrySet()) { + addParam(clazz, precision, recordInfo, constructorParams, entry.getKey()/*InfluxDB columnName*/, + entry.getValue()); + } + } + + //noinspection unchecked + result.add((T) recordInfo.constructor.newInstance(constructorParams)); + } + } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { + throw new InfluxDBMapperException(e); + } + } else { + ClassInfo classInfo = CLASS_INFO_CACHE.get(clazz.getName()); + try { + T object = null; + for (List row : series.getValues()) { + for (int i = 0; i < columnSize; i++) { + Field correspondingField = classInfo.fieldMap.get(series.getColumns().get(i)/*InfluxDB columnName*/); + if (correspondingField != null) { + if (object == null) { + object = clazz.newInstance(); + } + setFieldValue(object, correspondingField, row.get(i), precision, + classInfo.typeMappers.get(correspondingField)); + } + } + // When the "GROUP BY" clause is used, "tags" are returned as Map and + // accordingly with InfluxDB documentation + // https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value + // "tag" values are always String. + if (series.getTags() != null && !series.getTags().isEmpty()) { + for (Entry entry : series.getTags().entrySet()) { + Field correspondingField = classInfo.fieldMap.get(entry.getKey()/*InfluxDB columnName*/); + if (correspondingField != null) { + // I don't think it is possible to reach here without a valid "object" + setFieldValue(object, correspondingField, entry.getValue(), precision, + classInfo.typeMappers.get(correspondingField)); + } + } + } + if (object != null) { + result.add(object); + object = null; + } + } + } catch (InstantiationException | IllegalAccessException e) { + throw new InfluxDBMapperException(e); + } + } + return result; + } + + private static void addParam(final Class clazz, final TimeUnit precision, final RecordInfo recordInfo, + final Object[] constructorParams, final String columnName, final Object value) { + Parameter parameter = recordInfo.constructor.getParameters() + [recordInfo.constructorParamIndexes.get(columnName).intValue()]; + constructorParams[recordInfo.constructorParamIndexes.get(columnName).intValue()] = + adaptValue(parameter.getType(), value, precision, parameter.getName(), clazz.getName()); + } + + private static boolean isRecordClass(final Class clazz) { + return RECORD_CLASS_NAMES.contains(clazz.getSuperclass().getName()); + } + + /** + * InfluxDB client returns any number as Double. + * See ... + * for more information. + * + */ + private static void setFieldValue(final T object, final Field field, final Object value, final TimeUnit precision, + final TypeMapper typeMapper) + throws IllegalArgumentException, IllegalAccessException { + if (value == null) { + return; + } + Type fieldType = typeMapper.resolve(field.getGenericType()); + if (!field.isAccessible()) { + field.setAccessible(true); + } + field.set(object, adaptValue((Class) fieldType, value, precision, field.getName(), object.getClass().getName())); + } + + private static Object adaptValue(final Class fieldType, final Object value, final TimeUnit precision, + final String fieldName, final String className) { + try { + if (String.class.isAssignableFrom(fieldType)) { + return String.valueOf(value); + } + if (Instant.class.isAssignableFrom(fieldType)) { + if (value instanceof String) { + return Instant.from(RFC3339_FORMATTER.parse(String.valueOf(value))); + } + if (value instanceof Long) { + return Instant.ofEpochMilli(toMillis(((Long) value).longValue(), precision)); + } + if (value instanceof Double) { + return Instant.ofEpochMilli(toMillis(((Double) value).longValue(), precision)); + } + if (value instanceof Integer) { + return Instant.ofEpochMilli(toMillis(((Integer) value).longValue(), precision)); + } + throw new InfluxDBMapperException("Unsupported type " + fieldType + " for field " + fieldName); + } + if (Double.class.isAssignableFrom(fieldType) || double.class.isAssignableFrom(fieldType)) { + return value; + } + if (Long.class.isAssignableFrom(fieldType) || long.class.isAssignableFrom(fieldType)) { + return ((Double) value).longValue(); + } + if (Integer.class.isAssignableFrom(fieldType) || int.class.isAssignableFrom(fieldType)) { + return ((Double) value).intValue(); + } + if (Boolean.class.isAssignableFrom(fieldType) || boolean.class.isAssignableFrom(fieldType)) { + return Boolean.valueOf(String.valueOf(value)); + } + if (Enum.class.isAssignableFrom(fieldType)) { + //noinspection unchecked + return Enum.valueOf((Class) fieldType, String.valueOf(value)); + } + } catch (ClassCastException e) { + String msg = "Class '%s' field '%s' was defined with a different field type and caused a ClassCastException. " + + "The correct type is '%s' (current field value: '%s')."; + throw new InfluxDBMapperException(String.format(msg, className, fieldName, value.getClass().getName(), value), e); + } + + throw new InfluxDBMapperException( + String.format("Class '%s' field '%s' is from an unsupported type '%s'.", className, fieldName, fieldType)); + } + + private static long toMillis(final long value, final TimeUnit precision) { + return TimeUnit.MILLISECONDS.convert(value, precision); + } +} diff --git a/src/main/java/org/influxdb/impl/InfluxDBService.java b/src/main/java/org/influxdb/impl/InfluxDBService.java index 6485f8654..061a76615 100644 --- a/src/main/java/org/influxdb/impl/InfluxDBService.java +++ b/src/main/java/org/influxdb/impl/InfluxDBService.java @@ -6,6 +6,8 @@ import okhttp3.ResponseBody; import retrofit2.Call; import retrofit2.http.Body; +import retrofit2.http.Field; +import retrofit2.http.FormUrlEncoded; import retrofit2.http.GET; import retrofit2.http.POST; import retrofit2.http.Query; @@ -18,12 +20,13 @@ interface InfluxDBService { public static final String Q = "q"; public static final String DB = "db"; public static final String RP = "rp"; + public static final String PARAMS = "params"; public static final String PRECISION = "precision"; public static final String CONSISTENCY = "consistency"; public static final String EPOCH = "epoch"; public static final String CHUNK_SIZE = "chunk_size"; - @GET("/ping") + @GET("ping") public Call ping(); /** @@ -37,35 +40,58 @@ interface InfluxDBService { * @param consistency optional The write consistency level required for the write to succeed. * Can be one of one, any, all, quorum. Defaults to all. */ - @POST("/write") - public Call writePoints(@Query(U) String username, - @Query(P) String password, @Query(DB) String database, + @POST("write") + public Call writePoints(@Query(DB) String database, @Query(RP) String retentionPolicy, @Query(PRECISION) String precision, @Query(CONSISTENCY) String consistency, @Body RequestBody batchPoints); - @GET("/query") - public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, - @Query(EPOCH) String epoch, @Query(value = Q, encoded = true) String query); + @GET("query") + public Call query(@Query(DB) String db, + @Query(EPOCH) String epoch, @Query(value = Q, encoded = true) String query, + @Query(value = PARAMS, encoded = true) String params); - @GET("/query") - public Call query(@Query(U) String username, @Query(P) String password, @Query(DB) String db, + @GET("query") + public Call query(@Query(DB) String db, @Query(value = Q, encoded = true) String query); - @POST("/query") - public Call postQuery(@Query(U) String username, @Query(P) String password, @Query(DB) String db, - @Query(value = Q, encoded = true) String query); + @POST("query") + @FormUrlEncoded + public Call postQuery(@Query(DB) String db, + @Field(value = Q, encoded = true) String query); - @GET("/query") - public Call query(@Query(U) String username, @Query(P) String password, - @Query(value = Q, encoded = true) String query); + @POST("query") + @FormUrlEncoded + public Call postQuery(@Query(DB) String db, @Query(EPOCH) String epoch, + @Field(value = Q, encoded = true) String query); - @POST("/query") - public Call postQuery(@Query(U) String username, - @Query(P) String password, @Query(value = Q, encoded = true) String query); + @POST("query") + @FormUrlEncoded + public Call postQuery(@Query(DB) String db, @Query(EPOCH) String epoch, + @Field(value = Q, encoded = true) String query, @Query(value = PARAMS, encoded = true) String params); @Streaming - @GET("/query?chunked=true") - public Call query(@Query(U) String username, - @Query(P) String password, @Query(DB) String db, @Query(value = Q, encoded = true) String query, + @POST("query?chunked=true") + @FormUrlEncoded + public Call postQuery(@Query(DB) String db, @Field(value = Q, encoded = true) String query, + @Query(CHUNK_SIZE) int chunkSize); + + @Streaming + @POST("query?chunked=true") + @FormUrlEncoded + public Call postQuery(@Query(DB) String db, @Field(value = Q, encoded = true) String query, + @Query(CHUNK_SIZE) int chunkSize, @Query(value = PARAMS, encoded = true) String params); + + @POST("query") + @FormUrlEncoded + public Call postQuery(@Field(value = Q, encoded = true) String query); + + @Streaming + @GET("query?chunked=true") + public Call query(@Query(DB) String db, @Query(value = Q, encoded = true) String query, @Query(CHUNK_SIZE) int chunkSize); + + @Streaming + @GET("query?chunked=true") + public Call query(@Query(DB) String db, @Query(value = Q, encoded = true) String query, + @Query(CHUNK_SIZE) int chunkSize, @Query(value = PARAMS, encoded = true) String params); } diff --git a/src/main/java/org/influxdb/impl/OneShotBatchWriter.java b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java new file mode 100644 index 000000000..96754f144 --- /dev/null +++ b/src/main/java/org/influxdb/impl/OneShotBatchWriter.java @@ -0,0 +1,30 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.dto.BatchPoints; + +import java.util.Collection; + +/** + * Batch writer that tries to write BatchPoints exactly once. + */ +class OneShotBatchWriter implements BatchWriter { + + private InfluxDB influxDB; + + OneShotBatchWriter(final InfluxDB influxDB) { + this.influxDB = influxDB; + } + + @Override + public void write(final Collection batchPointsCollection) { + for (BatchPoints batchPoints : batchPointsCollection) { + influxDB.write(batchPoints); + } + } + + @Override + public void close() { + + } +} diff --git a/src/main/java/org/influxdb/impl/Preconditions.java b/src/main/java/org/influxdb/impl/Preconditions.java new file mode 100644 index 000000000..1e34204a3 --- /dev/null +++ b/src/main/java/org/influxdb/impl/Preconditions.java @@ -0,0 +1,62 @@ +package org.influxdb.impl; + +/** + * Functions for parameter validation. + * + * @author Simon Legner + */ +public final class Preconditions { + + private Preconditions() { + } + + /** + * Enforces that the string is {@linkplain String#isEmpty() not empty}. + * @param string the string to test + * @param name variable name for reporting + * @return {@code string} + * @throws IllegalArgumentException if the string is empty + */ + public static String checkNonEmptyString(final String string, final String name) throws IllegalArgumentException { + if (string == null || string.isEmpty()) { + throw new IllegalArgumentException("Expecting a non-empty string for " + name); + } + return string; + } + + /** + * Enforces that the number is larger than 0. + * @param number the number to test + * @param name variable name for reporting + * @throws IllegalArgumentException if the number is less or equal to 0 + */ + public static void checkPositiveNumber(final Number number, final String name) throws IllegalArgumentException { + if (number == null || number.doubleValue() <= 0) { + throw new IllegalArgumentException("Expecting a positive number for " + name); + } + } + + /** + * Enforces that the number is not negative. + * @param number the number to test + * @param name variable name for reporting + * @throws IllegalArgumentException if the number is less or equal to 0 + */ + public static void checkNotNegativeNumber(final Number number, final String name) throws IllegalArgumentException { + if (number == null || number.doubleValue() < 0) { + throw new IllegalArgumentException("Expecting a positive or zero number for " + name); + } + } + /** + * Enforces that the duration is a valid influxDB duration. + * @param duration the duration to test + * @param name variable name for reporting + * @throws IllegalArgumentException if the given duration is not valid. + */ + public static void checkDuration(final String duration, final String name) throws IllegalArgumentException { + if (!duration.matches("(\\d+[wdmhs])+|inf")) { + throw new IllegalArgumentException("Invalid InfluxDB duration: " + duration + + " for " + name); + } + } +} diff --git a/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java new file mode 100644 index 000000000..e2b08190b --- /dev/null +++ b/src/main/java/org/influxdb/impl/RetryCapableBatchWriter.java @@ -0,0 +1,161 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBException; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; + +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.function.BiConsumer; + +/** + * Batch writer that tries to retry a write if it failed previously and + * the reason of the failure is not permanent. + */ +class RetryCapableBatchWriter implements BatchWriter { + + private InfluxDB influxDB; + private BiConsumer, Throwable> exceptionHandler; + private LinkedList batchQueue; + private int requestActionsLimit; + private int retryBufferCapacity; + private int usedRetryBufferCapacity; + + RetryCapableBatchWriter(final InfluxDB influxDB, final BiConsumer, Throwable> exceptionHandler, + final int retryBufferCapacity, final int requestActionsLimit) { + this.influxDB = influxDB; + this.exceptionHandler = exceptionHandler; + batchQueue = new LinkedList<>(); + this.retryBufferCapacity = retryBufferCapacity; + this.requestActionsLimit = requestActionsLimit; + } + + private enum WriteResultOutcome { WRITTEN, FAILED_RETRY_POSSIBLE, FAILED_RETRY_IMPOSSIBLE } + + private static final class WriteResult { + + static final WriteResult WRITTEN = new WriteResult(WriteResultOutcome.WRITTEN); + + WriteResultOutcome outcome; + Throwable throwable; + + private WriteResult(final WriteResultOutcome outcome) { + this.outcome = outcome; + } + + private WriteResult(final WriteResultOutcome outcome, final Throwable throwable) { + this.outcome = outcome; + this.throwable = throwable; + } + + private WriteResult(final InfluxDBException e) { + this.throwable = e; + if (e.isRetryWorth()) { + this.outcome = WriteResultOutcome.FAILED_RETRY_POSSIBLE; + } else { + this.outcome = WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE; + } + } + } + + /* This method is synchronized to avoid parallel execution when the user invokes flush/close + * of the client in the middle of scheduled write execution (buffer flush / action limit overrun) */ + @Override + public synchronized void write(final Collection collection) { + // empty the cached data first + ListIterator batchQueueIterator = batchQueue.listIterator(); + while (batchQueueIterator.hasNext()) { + BatchPoints entry = batchQueueIterator.next(); + WriteResult result = tryToWrite(entry); + if (result.outcome == WriteResultOutcome.WRITTEN + || result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { + batchQueueIterator.remove(); + usedRetryBufferCapacity -= entry.getPoints().size(); + // we are throwing out data, notify the client + if (result.outcome == WriteResultOutcome.FAILED_RETRY_IMPOSSIBLE) { + exceptionHandler.accept(entry.getPoints(), result.throwable); + } + } else { + // we cannot send more data otherwise we would write them in different + // order than in which were submitted + for (BatchPoints batchPoints : collection) { + addToBatchQueue(batchPoints); + } + return; + } + } + // write the last given batch last so that duplicate data points get overwritten correctly + Iterator collectionIterator = collection.iterator(); + while (collectionIterator.hasNext()) { + BatchPoints batchPoints = collectionIterator.next(); + WriteResult result = tryToWrite(batchPoints); + switch (result.outcome) { + case FAILED_RETRY_POSSIBLE: + addToBatchQueue(batchPoints); + while (collectionIterator.hasNext()) { + addToBatchQueue(collectionIterator.next()); + } + break; + case FAILED_RETRY_IMPOSSIBLE: + exceptionHandler.accept(batchPoints.getPoints(), result.throwable); + break; + default: + + } + } + } + + /* This method is synchronized to avoid parallel execution when the BatchProcessor scheduler + * has been shutdown but there are jobs still being executed (using RetryCapableBatchWriter.write).*/ + @Override + public synchronized void close() { + // try to write everything queued / buffered + for (BatchPoints points : batchQueue) { + WriteResult result = tryToWrite(points); + if (result.outcome != WriteResultOutcome.WRITTEN) { + exceptionHandler.accept(points.getPoints(), result.throwable); + } + } + } + + private WriteResult tryToWrite(final BatchPoints batchPoints) { + try { + influxDB.write(batchPoints); + return WriteResult.WRITTEN; + } catch (InfluxDBException e) { + return new WriteResult(e); + } catch (Exception e) { + return new WriteResult(WriteResultOutcome.FAILED_RETRY_POSSIBLE, e); + } + } + + private void evictTooOldFailedWrites() { + while (usedRetryBufferCapacity > retryBufferCapacity && batchQueue.size() > 0) { + List points = batchQueue.removeFirst().getPoints(); + usedRetryBufferCapacity -= points.size(); + exceptionHandler.accept(points, + new InfluxDBException.RetryBufferOverrunException( + "Retry buffer overrun, current capacity: " + retryBufferCapacity)); + } + } + + private void addToBatchQueue(final BatchPoints batchPoints) { + boolean hasBeenMergedIn = false; + if (batchQueue.size() > 0) { + BatchPoints last = batchQueue.getLast(); + if (last.getPoints().size() + batchPoints.getPoints().size() <= requestActionsLimit) { + hasBeenMergedIn = last.mergeIn(batchPoints); + } + } + if (!hasBeenMergedIn) { + batchQueue.add(batchPoints); + } + // recalculate local counter and evict old batches on merge as well + usedRetryBufferCapacity += batchPoints.getPoints().size(); + evictTooOldFailedWrites(); + } +} diff --git a/src/main/java/org/influxdb/impl/TimeUtil.java b/src/main/java/org/influxdb/impl/TimeUtil.java index 61492eb11..ca4cf987a 100644 --- a/src/main/java/org/influxdb/impl/TimeUtil.java +++ b/src/main/java/org/influxdb/impl/TimeUtil.java @@ -44,7 +44,7 @@ protected SimpleDateFormat initialValue() { /** * Convert from a TimeUnit to a influxDB timeunit String. * - * @param t + * @param t the TimeUnit * @return the String representation. */ public static String toTimePrecision(final TimeUnit t) { @@ -69,7 +69,7 @@ public static String toTimePrecision(final TimeUnit t) { /** * convert a unix epoch time to timestamp used by influxdb. * this can then be used in query expressions against influxdb's time column like so: - * influxDB.query(new Query("SELECT * FROM some_measurement WHERE time >= '" + * influxDB.query(new Query("SELECT * FROM some_measurement WHERE time >= '" * + toInfluxDBTimeFormat(timeStart) + "'", some_database)) * influxdb time format example: 2016-10-31T06:52:20.020Z * diff --git a/src/main/java/org/influxdb/impl/TypeMapper.java b/src/main/java/org/influxdb/impl/TypeMapper.java new file mode 100644 index 000000000..98f4ada63 --- /dev/null +++ b/src/main/java/org/influxdb/impl/TypeMapper.java @@ -0,0 +1,70 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.impl; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; +import java.util.HashMap; +import java.util.Map; + +/** + * Resolves generic type variables to actual types, based on context. + * + * @author Eran Leshem + */ +@FunctionalInterface +public interface TypeMapper { + TypeMapper EMPTY = typeVariable -> null; + + static TypeMapper of(ParameterizedType type, Class clazz) { + TypeVariable>[] typeVariables = clazz.getTypeParameters(); + Type[] types = type.getActualTypeArguments(); + if (types.length != typeVariables.length) { + throw new IllegalStateException("Mismatched lengths for type variables and actual types"); + } + Map, Type> typeMapping = new HashMap<>(typeVariables.length); + for (int i = 0; i < typeVariables.length; i++) { + typeMapping.put(typeVariables[i], types[i]); + } + + return typeMapping::get; + } + + static TypeMapper empty() { + return EMPTY; + } + + default Type resolve(Type type) { + if (type instanceof TypeVariable) { + Type resolvedType = get((TypeVariable) type); + if (resolvedType == null) { + throw new IllegalStateException("Could not resolve type " + type); + } + + return resolvedType; + } + + return type; + } + + Type get(TypeVariable typeVariable); +} diff --git a/src/main/java/org/influxdb/msgpack/MessagePackConverterFactory.java b/src/main/java/org/influxdb/msgpack/MessagePackConverterFactory.java new file mode 100644 index 000000000..baa8135df --- /dev/null +++ b/src/main/java/org/influxdb/msgpack/MessagePackConverterFactory.java @@ -0,0 +1,26 @@ +package org.influxdb.msgpack; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +import okhttp3.ResponseBody; +import retrofit2.Converter; +import retrofit2.Retrofit; + +/** + * A Retrofit Convertor Factory for MessagePack response. + * + * @author hoan.le [at] bonitoo.io + * + */ +public class MessagePackConverterFactory extends Converter.Factory { + public static MessagePackConverterFactory create() { + return new MessagePackConverterFactory(); + } + + @Override + public Converter responseBodyConverter(final Type type, final Annotation[] annotations, + final Retrofit retrofit) { + return new MessagePackResponseBodyConverter(); + } +} diff --git a/src/main/java/org/influxdb/msgpack/MessagePackResponseBodyConverter.java b/src/main/java/org/influxdb/msgpack/MessagePackResponseBodyConverter.java new file mode 100644 index 000000000..25446d051 --- /dev/null +++ b/src/main/java/org/influxdb/msgpack/MessagePackResponseBodyConverter.java @@ -0,0 +1,25 @@ +package org.influxdb.msgpack; + +import java.io.IOException; +import java.io.InputStream; + +import org.influxdb.dto.QueryResult; +import okhttp3.ResponseBody; +import retrofit2.Converter; + +/** + * Test the InfluxDB API over MessagePack format. + * + * @author hoan.le [at] bonitoo.io + * + */ +public class MessagePackResponseBodyConverter implements Converter { + + @Override + public QueryResult convert(final ResponseBody value) throws IOException { + try (InputStream is = value.byteStream()) { + MessagePackTraverser traverser = new MessagePackTraverser(); + return traverser.parse(is); + } + } +} diff --git a/src/main/java/org/influxdb/msgpack/MessagePackTraverser.java b/src/main/java/org/influxdb/msgpack/MessagePackTraverser.java new file mode 100644 index 000000000..5fab07cd5 --- /dev/null +++ b/src/main/java/org/influxdb/msgpack/MessagePackTraverser.java @@ -0,0 +1,258 @@ +package org.influxdb.msgpack; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.influxdb.InfluxDBException; +import org.influxdb.dto.QueryResult; +import org.influxdb.dto.QueryResult.Result; +import org.influxdb.dto.QueryResult.Series; +import org.msgpack.core.ExtensionTypeHeader; +import org.msgpack.core.MessageFormat; +import org.msgpack.core.MessagePack; +import org.msgpack.core.MessageUnpacker; +import org.msgpack.value.ValueType; + +/** + * Traverse the MessagePack input stream and return Query Result object(s). + * + * @author hoan.le [at] bonitoo.io + * + */ +public class MessagePackTraverser { + + private static final byte MSG_PACK_TIME_EXT_TYPE = 5; + private String lastStringNode; + + /** + * Traverse over the whole message pack stream. + * This method can be used for converting query results in chunk. + * + * @param is + * The MessagePack format input stream + * @return an Iterable over the QueryResult objects + * + */ + public Iterable traverse(final InputStream is) { + MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(is); + + return () -> { + return new Iterator() { + @Override + public boolean hasNext() { + try { + return unpacker.hasNext(); + } catch (IOException e) { + throw new InfluxDBException(e); + } + } + + @Override + public QueryResult next() { + return parse(unpacker); + } + }; + }; + + } + + /** + * Parse the message pack stream. + * This method can be used for converting query + * result from normal query response where exactly one QueryResult returned + * + * @param is + * The MessagePack format input stream + * @return QueryResult + * + */ + public QueryResult parse(final InputStream is) { + MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(is); + return parse(unpacker); + } + + private QueryResult parse(final MessageUnpacker unpacker) { + QueryResult queryResult = new QueryResult(); + QueryResultModelPath queryResultPath = new QueryResultModelPath(); + queryResultPath.add("queryResult", queryResult); + try { + traverse(unpacker, queryResultPath, 1); + } catch (IOException e) { + throw new InfluxDBException(e); + } + return queryResult; + } + + void traverse(final MessageUnpacker unpacker, final QueryResultModelPath queryResultPath, final int readAmount) + throws IOException { + int amount = 0; + + while (unpacker.hasNext() && amount < readAmount) { + MessageFormat format = unpacker.getNextFormat(); + ValueType type = format.getValueType(); + int length; + ExtensionTypeHeader extension; + Object o = null; + byte[] dst; + String addedName = null; + Object addedObject = null; + switch (type) { + case NIL: + unpacker.unpackNil(); + break; + case BOOLEAN: + o = unpacker.unpackBoolean(); + break; + case INTEGER: + switch (format) { + case UINT64: + o = unpacker.unpackBigInteger(); + break; + case INT64: + case UINT32: + o = unpacker.unpackLong(); + break; + default: + o = unpacker.unpackInt(); + break; + } + break; + case FLOAT: + o = unpacker.unpackDouble(); + break; + case STRING: + o = unpacker.unpackString(); + lastStringNode = (String) o; + if ("name".equals(o) && queryResultPath.compareEndingPath("series")) { + queryResultPath.add("name", null); + } else if (queryResultPath.compareEndingPath("name")) { + queryResultPath.removeLast(); + Series series = queryResultPath.getLastObject(); + series.setName((String) o); + } else if (queryResultPath.compareEndingPath("tags")) { + queryResultPath.add("tagKey", o); + } else if (queryResultPath.compareEndingPath("tagKey")) { + String tagKey = queryResultPath.getLastObject(); + queryResultPath.removeLast(); + Map tags = queryResultPath.getLastObject(); + tags.put(tagKey, (String) o); + } else if (queryResultPath.compareEndingPath("columns")) { + List columns = queryResultPath.getLastObject(); + columns.add((String) o); + } + break; + case BINARY: + length = unpacker.unpackBinaryHeader(); + dst = new byte[length]; + unpacker.readPayload(dst); + break; + case ARRAY: + length = unpacker.unpackArrayHeader(); + if (length > 0) { + if ("results".equals(lastStringNode)) { + QueryResult queryResult = queryResultPath.getLastObject(); + List results = new ArrayList<>(); + queryResult.setResults(results); + addedName = "results"; + addedObject = results; + } else if ("series".equals(lastStringNode) && queryResultPath.compareEndingPath("result")) { + Result result = queryResultPath.getLastObject(); + List series = new ArrayList<>(); + result.setSeries(series); + addedName = "seriesList"; + addedObject = series; + } else if ("columns".equals(lastStringNode) && queryResultPath.compareEndingPath("series")) { + Series series = queryResultPath.getLastObject(); + List columns = new ArrayList<>(); + series.setColumns(columns); + addedName = "columns"; + addedObject = columns; + } else if ("values".equals(lastStringNode) && queryResultPath.compareEndingPath("series")) { + Series series = queryResultPath.getLastObject(); + List> values = new ArrayList<>(); + series.setValues(values); + addedName = "values"; + addedObject = values; + } else if (queryResultPath.compareEndingPath("values")) { + List> values = queryResultPath.getLastObject(); + List value = new ArrayList<>(); + values.add(value); + addedName = "value"; + addedObject = value; + } + + if (addedName != null) { + queryResultPath.add(addedName, addedObject); + } + traverse(unpacker, queryResultPath, length); + if (addedName != null) { + queryResultPath.removeLast(); + } + } + break; + case MAP: + length = unpacker.unpackMapHeader(); + if (queryResultPath.compareEndingPath("results")) { + List results = queryResultPath.getLastObject(); + Result result = new Result(); + results.add(result); + addedName = "result"; + addedObject = result; + } else if (queryResultPath.compareEndingPath("seriesList")) { + List series = queryResultPath.getLastObject(); + Series s = new Series(); + series.add(s); + addedName = "series"; + addedObject = s; + } else if ("tags".equals(lastStringNode) && queryResultPath.compareEndingPath("series")) { + Series series = queryResultPath.getLastObject(); + Map tags = new HashMap<>(); + series.setTags(tags); + addedName = "tags"; + addedObject = tags; + } + + if (addedName != null) { + queryResultPath.add(addedName, addedObject); + } + for (int i = 0; i < length; i++) { + traverse(unpacker, queryResultPath, 1); // key + traverse(unpacker, queryResultPath, 1); // value + } + if (addedName != null) { + queryResultPath.removeLast(); + } + break; + case EXTENSION: + final int nanosStartIndex = 8; + extension = unpacker.unpackExtensionTypeHeader(); + if (extension.getType() == MSG_PACK_TIME_EXT_TYPE) { + //decode epoch nanos in accordance with https://github.com/tinylib/msgp/blob/master/msgp/write.go#L594 + + dst = new byte[extension.getLength()]; + unpacker.readPayload(dst); + ByteBuffer bf = ByteBuffer.wrap(dst, 0, extension.getLength()); + long epochSeconds = bf.getLong(); + int nanosOffset = bf.getInt(nanosStartIndex); + o = TimeUnit.SECONDS.toNanos(epochSeconds) + nanosOffset; + } + break; + + default: + } + + if (queryResultPath.compareEndingPath("value")) { + List value = queryResultPath.getLastObject(); + value.add(o); + } + amount++; + } + } +} diff --git a/src/main/java/org/influxdb/msgpack/QueryResultModelPath.java b/src/main/java/org/influxdb/msgpack/QueryResultModelPath.java new file mode 100644 index 000000000..87c19d2cd --- /dev/null +++ b/src/main/java/org/influxdb/msgpack/QueryResultModelPath.java @@ -0,0 +1,47 @@ +package org.influxdb.msgpack; + +import java.util.ArrayList; +import java.util.List; + +/** + * A simple object model path, used internally for navigating on QueryResult objects + * when traverse and parse the MessagePack data. + * + * @author hoan.le [at] bonitoo.io + * + */ +class QueryResultModelPath { + private List names = new ArrayList<>(); + private List objects = new ArrayList<>(); + private int lastIndex = -1; + + public void add(final String name, final Object object) { + names.add(name); + objects.add(object); + lastIndex++; + } + + public T getLastObject() { + return (T) objects.get(lastIndex); + } + + public void removeLast() { + names.remove(lastIndex); + objects.remove(lastIndex); + lastIndex--; + } + + public boolean compareEndingPath(final String... names) { + int diff = (lastIndex + 1) - names.length; + if (diff < 0) { + return false; + } + for (int i = 0; i < names.length; i++) { + if (!names[i].equals(this.names.get(i + diff))) { + return false; + } + } + + return true; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Aggregations.java b/src/main/java/org/influxdb/querybuilder/Aggregations.java new file mode 100644 index 000000000..a6760cca7 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Aggregations.java @@ -0,0 +1,13 @@ +package org.influxdb.querybuilder; + +public final class Aggregations { + + private Aggregations() { + } + + public static final String COUNT = "COUNT"; + public static final String MAX = "MAX"; + public static final String MIN = "MIN"; + public static final String SUM = "SUM"; + public static final String MEAN = "MEAN"; +} diff --git a/src/main/java/org/influxdb/querybuilder/Alias.java b/src/main/java/org/influxdb/querybuilder/Alias.java new file mode 100644 index 000000000..40b6dc29d --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Alias.java @@ -0,0 +1,20 @@ +package org.influxdb.querybuilder; + +public class Alias { + + private final Object column; + private final String alias; + + public Alias(final Object column, final String alias) { + this.column = column; + this.alias = alias; + } + + public Object getColumn() { + return column; + } + + public String getAlias() { + return alias; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Appendable.java b/src/main/java/org/influxdb/querybuilder/Appendable.java new file mode 100644 index 000000000..0c2b006b3 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Appendable.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder; + +public interface Appendable { + + void appendTo(StringBuilder stringBuilder); +} diff --git a/src/main/java/org/influxdb/querybuilder/Appender.java b/src/main/java/org/influxdb/querybuilder/Appender.java new file mode 100644 index 000000000..8c7e34bfd --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Appender.java @@ -0,0 +1,119 @@ +package org.influxdb.querybuilder; + +import java.util.List; +import java.util.regex.Pattern; +import org.influxdb.querybuilder.clauses.ConjunctionClause; + +public final class Appender { + + private static final Pattern COLUMN_NAME_PATTERN = Pattern.compile("\\w+(?:\\[.+\\])?"); + + private Appender() { + } + + public static StringBuilder joinAndAppend( + final StringBuilder stringBuilder, final List clauses) { + for (int i = 0; i < clauses.size(); i++) { + if (i > 0) { + clauses.get(i).join(stringBuilder); + } + clauses.get(i).appendTo(stringBuilder); + } + return stringBuilder; + } + + public static StringBuilder joinAndAppend( + final StringBuilder stringBuilder, + final String separator, + final List values) { + for (int i = 0; i < values.size(); i++) { + if (i > 0) { + stringBuilder.append(separator); + } + values.get(i).appendTo(stringBuilder); + } + return stringBuilder; + } + + public static StringBuilder joinAndAppendNames( + final StringBuilder stringBuilder, final List values) { + for (int i = 0; i < values.size(); i++) { + if (i > 0) { + stringBuilder.append(","); + } + appendName(values.get(i), stringBuilder); + } + return stringBuilder; + } + + public static StringBuilder appendValue(final Object value, final StringBuilder stringBuilder) { + if (value instanceof Appendable) { + Appendable appendable = (Appendable) value; + appendable.appendTo(stringBuilder); + } else if (value instanceof Function) { + Function functionCall = (Function) value; + stringBuilder.append(functionCall.getName()).append('('); + for (int i = 0; i < functionCall.getParameters().length; i++) { + if (i > 0) { + stringBuilder.append(','); + } + appendValue(functionCall.getParameters()[i], stringBuilder); + } + stringBuilder.append(')'); + } else if (value instanceof Column) { + appendName(((Column) value).getName(), stringBuilder); + } else if (value instanceof Placeholder) { + stringBuilder.append('$').append(((Placeholder) value).getName()); + } else if (value instanceof String) { + stringBuilder.append("'").append(value).append("'"); + } else if (value != null) { + stringBuilder.append(value); + } else { + stringBuilder.append('?'); + return stringBuilder; + } + return stringBuilder; + } + + public static StringBuilder appendName(final String name, final StringBuilder stringBuilder) { + String trimmedName = name.trim(); + if (trimmedName.startsWith("\"") || COLUMN_NAME_PATTERN.matcher(trimmedName).matches()) { + stringBuilder.append(trimmedName); + } else { + stringBuilder.append('"').append(trimmedName).append('"'); + } + return stringBuilder; + } + + public static StringBuilder appendName(final Object name, final StringBuilder stringBuilder) { + if (name instanceof String) { + appendName((String) name, stringBuilder); + } else if (name instanceof Column) { + appendName(((Column) name).getName(), stringBuilder); + } else if (name instanceof Function) { + Function functionCall = (Function) name; + stringBuilder.append(functionCall.getName()).append('('); + for (int i = 0; i < functionCall.getParameters().length; i++) { + if (i > 0) { + stringBuilder.append(','); + } + appendValue(functionCall.getParameters()[i], stringBuilder); + } + stringBuilder.append(')'); + } else if (name instanceof Alias) { + Alias alias = (Alias) name; + appendName(alias.getColumn(), stringBuilder); + stringBuilder.append(" AS ").append(alias.getAlias()); + } else if (name instanceof Distinct) { + Distinct distinct = (Distinct) name; + stringBuilder.append("DISTINCT "); + appendName(distinct.getExpression(), stringBuilder); + } else if (name instanceof Appendable) { + Appendable appendable = (Appendable) name; + appendable.appendTo(stringBuilder); + } else { + throw new IllegalArgumentException("Invalid type"); + } + return stringBuilder; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/BuiltQuery.java b/src/main/java/org/influxdb/querybuilder/BuiltQuery.java new file mode 100644 index 000000000..d9a3c5920 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/BuiltQuery.java @@ -0,0 +1,213 @@ +package org.influxdb.querybuilder; + +import static org.influxdb.querybuilder.Operations.EQ; +import static org.influxdb.querybuilder.Operations.GT; +import static org.influxdb.querybuilder.Operations.GTE; +import static org.influxdb.querybuilder.Operations.LT; +import static org.influxdb.querybuilder.Operations.LTE; +import static org.influxdb.querybuilder.Operations.NE; +import static org.influxdb.querybuilder.Operations.NEQ; + +import org.influxdb.dto.Query; +import org.influxdb.querybuilder.clauses.AddRelativeTimeClause; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ContainsClause; +import org.influxdb.querybuilder.clauses.NegativeRegexClause; +import org.influxdb.querybuilder.clauses.OperationClause; +import org.influxdb.querybuilder.clauses.RegexClause; +import org.influxdb.querybuilder.clauses.SimpleClause; +import org.influxdb.querybuilder.clauses.SubRelativeTimeClause; +import org.influxdb.querybuilder.time.TimeInterval; + +public abstract class BuiltQuery extends Query implements QueryStringBuilder { + + public BuiltQuery(final String database) { + super(null, database); + } + + public BuiltQuery(final String database, final boolean requiresPost) { + super(null, database, requiresPost); + } + + static StringBuilder addSemicolonIfMissing(final StringBuilder stringBuilder) { + int length = trimLast(stringBuilder); + if (length == 0 || stringBuilder.charAt(length - 1) != ';') { + stringBuilder.append(';'); + } + return stringBuilder; + } + + static int trimLast(final StringBuilder stringBuilder) { + int length = stringBuilder.length(); + while (length > 0 && stringBuilder.charAt(length - 1) <= ' ') { + length -= 1; + } + if (length != stringBuilder.length()) { + stringBuilder.setLength(length); + } + return length; + } + + @Override + public String getCommand() { + StringBuilder sb = buildQueryString(new StringBuilder()); + addSemicolonIfMissing(sb); + return sb.toString(); + } + + @Override + public String getCommandWithUrlEncoded() { + return encode(getCommand()); + } + + /** + * The query builder shall provide all the building blocks needed, only a static block shall be + * used. + */ + public static final class QueryBuilder { + + private QueryBuilder() { + } + + public static SelectionQueryImpl select(final String... columns) { + return select((Object[]) columns); + } + + public static SelectionQueryImpl select(final Object... columns) { + return new SelectionQueryImpl(new SelectionCoreImpl(columns)); + } + + public static Clause eq(final String name, final Object value) { + return new SimpleClause(name, EQ, value); + } + + public static Clause eq(final Object arg1, final Object arg2) { + return new OperationClause(arg1, EQ, arg2); + } + + public static Clause ne(final String name, final Object value) { + return new SimpleClause(name, NE, value); + } + + public static Clause neq(final String name, final Object value) { + return new SimpleClause(name, NEQ, value); + } + + public static Clause neq(final Object arg1, final Object arg2) { + return new OperationClause(arg1, NEQ, arg2); + } + + public static Clause ne(final Object arg1, final Object arg2) { + return new OperationClause(arg1, NE, arg2); + } + + public static Clause contains(final String name, final String value) { + return new ContainsClause(name, value); + } + + public static Clause regex(final String name, final String value) { + return new RegexClause(name, value); + } + + public static Clause nregex(final String name, final String value) { + return new NegativeRegexClause(name, value); + } + + public static Clause lt(final String name, final Object value) { + return new SimpleClause(name, LT, value); + } + + public static Clause lt(final Object arg1, final Object arg2) { + return new OperationClause(arg1, LT, arg2); + } + + public static Clause lte(final String name, final Object value) { + return new SimpleClause(name, LTE, value); + } + + public static Clause lte(final Object arg1, final Object arg2) { + return new OperationClause(arg1, LTE, arg2); + } + + public static Clause gt(final String name, final Object value) { + return new SimpleClause(name, GT, value); + } + + public static Clause gt(final Object arg1, final Object arg2) { + return new OperationClause(arg1, GT, arg2); + } + + public static Clause gte(final String name, final Object value) { + return new SimpleClause(name, GTE, value); + } + + public static Clause gte(final Object arg1, final Object arg2) { + return new OperationClause(arg1, GTE, arg2); + } + + public static Clause addTime(final long interval, final String literal) { + return new AddRelativeTimeClause(new TimeInterval(interval, literal)); + } + + public static Clause subTime(final long interval, final String literal) { + return new SubRelativeTimeClause(new TimeInterval(interval, literal)); + } + + /** + * + * @return the Ordering + */ + public static Ordering asc() { + return new Ordering(false); + } + + /** + * InfluxDB supports only time for ordering. + * + * @return the Ordering + */ + public static Ordering desc() { + return new Ordering(true); + } + + public static Object raw(final String str) { + return new RawText(str); + } + + public static Object max(final Object column) { + return FunctionFactory.max(column); + } + + public static Object min(final Object column) { + return FunctionFactory.min(column); + } + + public static Object time(final Long timeInterval, final String durationLiteral) { + return FunctionFactory.time(timeInterval, durationLiteral); + } + + public static TimeInterval ti(final Long timeInterval, final String durationLiteral) { + return new TimeInterval(timeInterval, durationLiteral); + } + + public static SimpleClause cop(final String column, final String op, final Object arg2) { + return new SimpleClause(column, op, arg2); + } + + public static OperationClause op(final Object arg1, final String op, final Object arg2) { + return new OperationClause(arg1, op, arg2); + } + + public static Object time( + final Long timeInterval, + final String durationLiteral, + final Long offsetInterval, + final String offSetLiteral) { + return FunctionFactory.time(timeInterval, durationLiteral, offsetInterval, offSetLiteral); + } + + public static Object now() { + return FunctionFactory.now(); + } + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Column.java b/src/main/java/org/influxdb/querybuilder/Column.java new file mode 100644 index 000000000..ac70cbf33 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Column.java @@ -0,0 +1,14 @@ +package org.influxdb.querybuilder; + +public class Column { + + private final String name; + + Column(final String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Distinct.java b/src/main/java/org/influxdb/querybuilder/Distinct.java new file mode 100644 index 000000000..4fafc928d --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Distinct.java @@ -0,0 +1,15 @@ +package org.influxdb.querybuilder; + +public class Distinct { + + /** Distinct might as well contain an expression. */ + private final Object expression; + + Distinct(final Object expression) { + this.expression = expression; + } + + public Object getExpression() { + return expression; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Function.java b/src/main/java/org/influxdb/querybuilder/Function.java new file mode 100644 index 000000000..d3e598c3c --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Function.java @@ -0,0 +1,20 @@ +package org.influxdb.querybuilder; + +public class Function { + + private final String name; + private final Object[] parameters; + + Function(final String name, final Object... parameters) { + this.name = name; + this.parameters = parameters; + } + + public String getName() { + return name; + } + + public Object[] getParameters() { + return parameters; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/FunctionFactory.java b/src/main/java/org/influxdb/querybuilder/FunctionFactory.java new file mode 100644 index 000000000..ba5bfaba3 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/FunctionFactory.java @@ -0,0 +1,81 @@ +package org.influxdb.querybuilder; + +import static org.influxdb.querybuilder.Aggregations.COUNT; +import static org.influxdb.querybuilder.Aggregations.MAX; +import static org.influxdb.querybuilder.Aggregations.MEAN; +import static org.influxdb.querybuilder.Aggregations.MIN; +import static org.influxdb.querybuilder.Aggregations.SUM; + +import org.influxdb.querybuilder.time.TimeInterval; + +public final class FunctionFactory { + + private FunctionFactory() { + } + + public static Function function(final String name, final Object... parameters) { + convertToColumns(parameters); + return new Function(name, parameters); + } + + public static Object now() { + return new Function("now"); + } + + public static Object count(final Object column) { + return new Function(COUNT, convertToColumn(column)); + } + + public static Object max(final Object column) { + return new Function(MAX, convertToColumn(column)); + } + + public static Object min(final Object column) { + return new Function(MIN, convertToColumn(column)); + } + + public static Object sum(final Object column) { + return new Function(SUM, convertToColumn(column)); + } + + public static Object mean(final Object column) { + return new Function(MEAN, convertToColumn(column)); + } + + public static Object time(final Long timeInterval, final String durationLiteral) { + return new Function("time", new TimeInterval(timeInterval, durationLiteral)); + } + + public static Object time( + final Long timeInterval, + final String durationLiteral, + final Long offsetInterval, + final String offSetLiteral) { + return new Function( + "time", + new TimeInterval(timeInterval, durationLiteral), + new TimeInterval(offsetInterval, offSetLiteral)); + } + + public static Object column(final String name) { + return new Column(name); + } + + public static Object placeholder(final String name) { + return new Placeholder(name); + } + + private static void convertToColumns(final Object... arguments) { + for (int i = 0; i < arguments.length; i++) { + arguments[i] = convertToColumn(arguments[i]); + } + } + + private static Object convertToColumn(final Object argument) { + if (argument instanceof String) { + return column(((String) argument)); + } + + return argument; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Operations.java b/src/main/java/org/influxdb/querybuilder/Operations.java new file mode 100644 index 000000000..f29554545 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Operations.java @@ -0,0 +1,20 @@ +package org.influxdb.querybuilder; + +public final class Operations { + + private Operations() { + } + + public static final String EQ = "="; + public static final String NE = "!="; + public static final String NEQ = "<>"; + public static final String LT = "<"; + public static final String LTE = "<="; + public static final String GT = ">"; + public static final String GTE = ">="; + public static final String EQR = "=~"; + public static final String NER = "!~"; + public static final String ADD = "+"; + public static final String SUB = "-"; + public static final String MUL = "*"; +} diff --git a/src/main/java/org/influxdb/querybuilder/Ordering.java b/src/main/java/org/influxdb/querybuilder/Ordering.java new file mode 100644 index 000000000..4a3821108 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Ordering.java @@ -0,0 +1,27 @@ +package org.influxdb.querybuilder; + +public class Ordering implements Appendable { + + private final boolean isDesc; + + private static final String TIME_KEY = "time"; + + /** + * Influxdb ordering currently supports only time. + * + * @param isDesc + */ + Ordering(final boolean isDesc) { + this.isDesc = isDesc; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + Appender.appendName(TIME_KEY, stringBuilder); + if (isDesc) { + stringBuilder.append(" DESC"); + } else { + stringBuilder.append(" ASC"); + } + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Placeholder.java b/src/main/java/org/influxdb/querybuilder/Placeholder.java new file mode 100644 index 000000000..8b21cd880 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Placeholder.java @@ -0,0 +1,14 @@ +package org.influxdb.querybuilder; + +public class Placeholder { + + private final String name; + + Placeholder(final String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/QueryStringBuilder.java b/src/main/java/org/influxdb/querybuilder/QueryStringBuilder.java new file mode 100644 index 000000000..246fb4dc7 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/QueryStringBuilder.java @@ -0,0 +1,8 @@ +package org.influxdb.querybuilder; + +public interface QueryStringBuilder { + + StringBuilder buildQueryString(final StringBuilder stringBuilder); + + StringBuilder buildQueryString(); +} diff --git a/src/main/java/org/influxdb/querybuilder/RawText.java b/src/main/java/org/influxdb/querybuilder/RawText.java new file mode 100644 index 000000000..6f3e95589 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/RawText.java @@ -0,0 +1,15 @@ +package org.influxdb.querybuilder; + +public class RawText implements Appendable { + + private final String text; + + public RawText(final String text) { + this.text = text; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append(text); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Select.java b/src/main/java/org/influxdb/querybuilder/Select.java new file mode 100644 index 000000000..d99680b24 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Select.java @@ -0,0 +1,30 @@ +package org.influxdb.querybuilder; + +import org.influxdb.querybuilder.clauses.Clause; + +public interface Select { + + T where(); + + T where(final Clause clause); + + T where(final String text); + + T orderBy(final Ordering ordering); + + T groupBy(final Object... columns); + + T fill(final Number value); + + T fill(final String value); + + T limit(final int limit); + + T limit(final int limit, final long offSet); + + T sLimit(final int sLimit); + + T sLimit(final int sLimit, final long sOffSet); + + T tz(final String timezone); +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectCoreImpl.java b/src/main/java/org/influxdb/querybuilder/SelectCoreImpl.java new file mode 100644 index 000000000..37ccf47be --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectCoreImpl.java @@ -0,0 +1,268 @@ +package org.influxdb.querybuilder; + +import static org.influxdb.querybuilder.Appender.appendValue; +import static org.influxdb.querybuilder.Appender.joinAndAppend; +import static org.influxdb.querybuilder.Appender.joinAndAppendNames; +import static org.influxdb.querybuilder.BuiltQuery.trimLast; +import static org.influxdb.querybuilder.FunctionFactory.function; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.FromClause; +import org.influxdb.querybuilder.clauses.RawTextClause; +import org.influxdb.querybuilder.clauses.SubQueryFromClause; + +public class SelectCoreImpl implements Select, QueryStringBuilder, WithSubquery { + + private FromClause table; + private final boolean isDistinct; + private final List columns; + protected final T where; + private final Optional intoMeasurement; + private Optional ordering = Optional.empty(); + private List groupByColumns; + private Optional fill = Optional.empty(); + private Optional limit = Optional.empty(); + private Optional offSet = Optional.empty(); + private Optional sLimit = Optional.empty(); + private Optional sOffSet = Optional.empty(); + private Optional timeZone = Optional.empty(); + + SelectCoreImpl(final List columns, final boolean isDistinct, final T where) { + this.columns = columns; + this.isDistinct = isDistinct; + this.where = where; + this.intoMeasurement = Optional.empty(); + } + + SelectCoreImpl( + final List columns, + final boolean isDistinct, + final T where, + final String intoMeasurement) { + this.columns = columns; + this.isDistinct = isDistinct; + this.where = where; + if (intoMeasurement != null) { + this.intoMeasurement = Optional.of(intoMeasurement); + } else { + this.intoMeasurement = Optional.empty(); + } + } + + SelectCoreImpl( + final FromClause table, final List columns, final boolean isDistinct, final T where) { + this.table = table; + this.columns = columns; + this.isDistinct = isDistinct; + this.where = where; + this.intoMeasurement = Optional.empty(); + } + + SelectCoreImpl( + final FromClause table, + final List columns, + final boolean isDistinct, + final T where, + final String intoMeasurement) { + this.table = table; + this.columns = columns; + this.isDistinct = isDistinct; + this.where = where; + if (intoMeasurement != null) { + this.intoMeasurement = Optional.of(intoMeasurement); + } else { + this.intoMeasurement = Optional.empty(); + } + } + + @Override + public T where() { + return where; + } + + @Override + public T where(final Clause clause) { + return where.and(clause); + } + + @Override + public T where(final String text) { + return where.and(new RawTextClause(text)); + } + + @Override + public Select orderBy(final Ordering ordering) { + this.ordering = Optional.of(ordering); + return this; + } + + @Override + public Select groupBy(final Object... columns) { + this.groupByColumns = Arrays.asList(columns); + return this; + } + + @Override + public Select fill(final Number value) { + this.fill = Optional.of(function("fill", value)); + return this; + } + + @Override + public Select fill(final String value) { + if ("linear".equals(value) + || "none".equals(value) + || "null".equals(value) + || "previous".equals(value)) { + this.fill = Optional.of(function("fill", value)); + return this; + } else { + throw new IllegalArgumentException( + "Please give a numeric value or linear, none, null, previous"); + } + } + + @Override + public Select limit(final int limit) { + if (limit <= 0) { + throw new IllegalArgumentException("Invalid LIMIT value, must be strictly positive"); + } + + if (this.limit.isPresent()) { + throw new IllegalStateException("A LIMIT value has already been provided"); + } + + this.limit = Optional.of(limit); + return this; + } + + @Override + public Select limit(final int limit, final long offSet) { + if (limit <= 0 || offSet <= 0) { + throw new IllegalArgumentException( + "Invalid LIMIT and OFFSET Value, must be strictly positive"); + } + + this.limit = Optional.of(limit); + this.offSet = Optional.of(offSet); + return this; + } + + @Override + public Select sLimit(final int sLimit) { + if (sLimit <= 0) { + throw new IllegalArgumentException("Invalid SLIMIT value, must be strictly positive"); + } + + if (this.sLimit.isPresent()) { + throw new IllegalStateException("A SLIMIT value has already been provided"); + } + + this.sLimit = Optional.of(sLimit); + return this; + } + + @Override + public Select sLimit(final int sLimit, final long sOffSet) { + if (sLimit <= 0 || sOffSet <= 0) { + throw new IllegalArgumentException( + "Invalid LIMIT and OFFSET Value, must be strictly positive"); + } + + this.sLimit = Optional.of(sLimit); + this.sOffSet = Optional.of(sOffSet); + return this; + } + + @Override + public Select tz(final String timezone) { + this.timeZone = Optional.of(new TimeZone(timezone)); + return this; + } + + @Override + public void setSubQuery(final QueryStringBuilder query) { + this.table = new SubQueryFromClause(query); + } + + @Override + public StringBuilder buildQueryString() { + return buildQueryString(new StringBuilder()); + } + + @Override + public StringBuilder buildQueryString(final StringBuilder builder) { + builder.append("SELECT "); + + if (isDistinct) { + if (columns.size() > 1) { + throw new IllegalStateException("DISTINCT function can only be used with one column"); + } + } + + if (columns == null || columns.size() == 0) { + builder.append('*'); + } else { + joinAndAppendNames(builder, columns); + } + + if (intoMeasurement.isPresent()) { + builder.append(" INTO ").append(intoMeasurement.get()); + } + + builder.append(" FROM "); + + if (table != null) { + table.appendTo(builder); + } else { + throw new IllegalStateException(); + } + + if (!where.getClauses().isEmpty()) { + builder.append(" WHERE "); + joinAndAppend(builder, where.getClauses()); + } + + if (groupByColumns != null) { + builder.append(" GROUP BY "); + joinAndAppendNames(builder, groupByColumns); + } + + if (fill.isPresent()) { + builder.append(" "); + appendValue(fill.get(), builder); + } + + if (ordering.isPresent()) { + builder.append(" ORDER BY "); + joinAndAppend(builder, ",", Collections.singletonList(ordering.get())); + } + + if (limit.isPresent()) { + builder.append(" LIMIT ").append(limit.get()); + } + + if (offSet.isPresent()) { + builder.append(" OFFSET ").append(offSet.get()); + } + + if (sLimit.isPresent()) { + builder.append(" SLIMIT ").append(sLimit.get()); + } + + if (sOffSet.isPresent()) { + builder.append(" SOFFSET ").append(sOffSet.get()); + } + + if (timeZone.isPresent()) { + timeZone.get().appendTo(builder); + } + + trimLast(builder); + return builder; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectQueryImpl.java b/src/main/java/org/influxdb/querybuilder/SelectQueryImpl.java new file mode 100644 index 000000000..5b2876ade --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectQueryImpl.java @@ -0,0 +1,113 @@ +package org.influxdb.querybuilder; + +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.FromClause; +import org.influxdb.querybuilder.clauses.RawTextClause; + +public class SelectQueryImpl extends BuiltQuery implements SelectWithSubquery { + + private final SelectCoreImpl selectCore; + + SelectQueryImpl( + final String database, final boolean requiresPost, final SelectionCoreImpl selectionCore) { + super(database, requiresPost); + WhereCoreImpl whereCore = new WhereCoreImpl<>(this); + WhereQueryImpl whereQuery = new WhereQueryImpl<>(this, whereCore); + this.selectCore = selectionCore.from(whereQuery); + } + + SelectQueryImpl( + final String database, + final FromClause fromClause, + final boolean requiresPost, + final SelectionCoreImpl selectionCore) { + super(database, requiresPost); + WhereCoreImpl whereCore = new WhereCoreImpl<>(this); + WhereQueryImpl whereQuery = new WhereQueryImpl<>(this, whereCore); + this.selectCore = selectionCore.from(fromClause, whereQuery); + } + + @Override + public StringBuilder buildQueryString() { + return selectCore.buildQueryString(new StringBuilder()); + } + + @Override + public StringBuilder buildQueryString(final StringBuilder stringBuilder) { + return selectCore.buildQueryString(stringBuilder); + } + + @Override + public void setSubQuery(final QueryStringBuilder query) { + selectCore.setSubQuery(query); + } + + @Override + public WhereQueryImpl where() { + return selectCore.where(); + } + + @Override + public WhereQueryImpl where(final Clause clause) { + return selectCore.where().and(clause); + } + + @Override + public WhereQueryImpl where(final String text) { + return selectCore.where().and(new RawTextClause(text)); + } + + @Override + public SelectQueryImpl orderBy(final Ordering ordering) { + selectCore.orderBy(ordering); + return this; + } + + @Override + public SelectQueryImpl groupBy(final Object... columns) { + selectCore.groupBy(columns); + return this; + } + + @Override + public SelectQueryImpl fill(final Number value) { + selectCore.fill(value); + return this; + } + + @Override + public SelectQueryImpl fill(final String value) { + selectCore.fill(value); + return this; + } + + @Override + public SelectQueryImpl limit(final int limit) { + selectCore.limit(limit); + return this; + } + + @Override + public SelectQueryImpl limit(final int limit, final long offSet) { + selectCore.limit(limit, offSet); + return this; + } + + @Override + public SelectQueryImpl sLimit(final int sLimit) { + selectCore.sLimit(sLimit); + return this; + } + + @Override + public SelectQueryImpl sLimit(final int sLimit, final long sOffSet) { + selectCore.sLimit(sLimit, sOffSet); + return this; + } + + @Override + public SelectQueryImpl tz(final String timezone) { + selectCore.tz(timezone); + return this; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectSubQueryImpl.java b/src/main/java/org/influxdb/querybuilder/SelectSubQueryImpl.java new file mode 100644 index 000000000..8a74c30a1 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectSubQueryImpl.java @@ -0,0 +1,113 @@ +package org.influxdb.querybuilder; + +import java.util.List; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.FromClause; + +public class SelectSubQueryImpl extends SubQuery + implements SelectWithSubquery { + + private SelectCoreImpl, T>> selectCore; + private WhereSubQueryImpl, T> whereSubQuery; + + SelectSubQueryImpl( + final FromClause fromClause, final List columns, final boolean isDistinct) { + whereSubQuery = new WhereSubQueryImpl<>(this, new WhereCoreImpl<>(this)); + this.selectCore = new SelectCoreImpl<>(fromClause, columns, isDistinct, whereSubQuery); + } + + SelectSubQueryImpl(final List columns, final boolean isDistinct) { + whereSubQuery = new WhereSubQueryImpl<>(this, new WhereCoreImpl<>(this)); + this.selectCore = new SelectCoreImpl<>(columns, isDistinct, whereSubQuery); + } + + @Override + public WhereSubQueryImpl, T> where() { + return selectCore.where(); + } + + @Override + public WhereSubQueryImpl, T> where(final Clause clause) { + return selectCore.where(clause); + } + + @Override + public WhereSubQueryImpl, T> where(final String text) { + return selectCore.where(text); + } + + @Override + public SelectSubQueryImpl orderBy(final Ordering ordering) { + selectCore.orderBy(ordering); + return this; + } + + @Override + public SelectSubQueryImpl groupBy(final Object... columns) { + selectCore.groupBy(columns); + return this; + } + + @Override + public SelectSubQueryImpl fill(final Number value) { + selectCore.fill(value); + return this; + } + + @Override + public SelectSubQueryImpl fill(final String value) { + selectCore.fill(value); + return this; + } + + @Override + public SelectSubQueryImpl limit(final int limit) { + selectCore.limit(limit); + return this; + } + + @Override + public SelectSubQueryImpl limit(final int limit, final long offSet) { + selectCore.limit(limit, offSet); + return this; + } + + @Override + public SelectSubQueryImpl sLimit(final int sLimit) { + selectCore.sLimit(sLimit); + return this; + } + + @Override + public SelectSubQueryImpl sLimit(final int sLimit, final long sOffSet) { + selectCore.sLimit(sLimit, sOffSet); + return this; + } + + @Override + public SelectSubQueryImpl tz(final String timezone) { + selectCore.tz(timezone); + return this; + } + + @Override + public StringBuilder buildQueryString() { + return selectCore.buildQueryString(new StringBuilder()); + } + + @Override + public StringBuilder buildQueryString(final StringBuilder stringBuilder) { + return selectCore.buildQueryString(stringBuilder); + } + + @Override + public void setSubQuery(final QueryStringBuilder query) { + selectCore.setSubQuery(query); + } + + @Override + void setParent(final T parent) { + whereSubQuery.setParent(parent); + super.setParent(parent); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectWithSubquery.java b/src/main/java/org/influxdb/querybuilder/SelectWithSubquery.java new file mode 100644 index 000000000..f97f5b788 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectWithSubquery.java @@ -0,0 +1,4 @@ +package org.influxdb.querybuilder; + +public interface SelectWithSubquery extends Select, WithSubquery, QueryStringBuilder { +} diff --git a/src/main/java/org/influxdb/querybuilder/Selection.java b/src/main/java/org/influxdb/querybuilder/Selection.java new file mode 100644 index 000000000..1408c09ae --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Selection.java @@ -0,0 +1,41 @@ +package org.influxdb.querybuilder; + +import org.influxdb.querybuilder.clauses.OperationClause; +import org.influxdb.querybuilder.clauses.SimpleClause; + +public interface Selection { + + Selection distinct(); + + Selection as(final String aliasName); + + Selection all(); + + Selection countAll(); + + Selection regex(final String clause); + + Selection column(final String name); + + Selection function(final String name, final Object... parameters); + + Selection raw(final String text); + + Selection count(final Object column); + + Selection max(final Object column); + + Selection min(final Object column); + + Selection sum(final Object column); + + Selection mean(final Object column); + + Selection op(final OperationClause operationClause); + + Selection op(final Object arg1, final String op, final Object arg2); + + Selection cop(final SimpleClause simpleClause); + + Selection cop(final String column, final String op, final Object arg2); +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectionCoreImpl.java b/src/main/java/org/influxdb/querybuilder/SelectionCoreImpl.java new file mode 100644 index 000000000..685459054 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectionCoreImpl.java @@ -0,0 +1,185 @@ +package org.influxdb.querybuilder; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import org.influxdb.querybuilder.clauses.OperationClause; +import org.influxdb.querybuilder.clauses.SimpleClause; +import org.influxdb.querybuilder.clauses.SelectRegexClause; +import org.influxdb.querybuilder.clauses.FromClause; + +class SelectionCoreImpl implements Selection, WithInto { + + protected List columns; + protected boolean isDistinct; + private String intoMeasurement; + private static final List COUNT_ALL = + Collections.singletonList(new Function("COUNT", new RawText("*"))); + + private Object currentSelection; + + SelectionCoreImpl() { + } + + SelectionCoreImpl(final Object[] columns) { + for (Object column : columns) { + addToCurrentColumn(column); + } + } + + @Override + public Selection distinct() { + assertColumnIsSelected(); + this.isDistinct = true; + Object distinct = new Distinct(currentSelection); + currentSelection = null; + return moveToColumns(distinct); + } + + @Override + public Selection as(final String aliasName) { + assertColumnIsSelected(); + Object alias = new Alias(currentSelection, aliasName); + currentSelection = null; + return moveToColumns(alias); + } + + private void assertColumnIsSelected() { + if (currentSelection == null) { + throw new IllegalStateException("You need to select a column prior to calling distinct"); + } + } + + private SelectionCoreImpl moveToColumns(final Object name) { + if (columns == null) { + columns = new ArrayList<>(); + } + + columns.add(name); + return this; + } + + private SelectionCoreImpl addToCurrentColumn(final Object name) { + if (currentSelection != null) { + moveToColumns(currentSelection); + } + + currentSelection = name; + return this; + } + + @Override + public SelectionCoreImpl all() { + if (isDistinct) { + throw new IllegalStateException("DISTINCT function can only be used with one column"); + } + if (columns != null) { + throw new IllegalStateException("Can't select all columns over columns selected previously"); + } + if (currentSelection != null) { + throw new IllegalStateException("Can't select all columns over columns selected previously"); + } + return this; + } + + @Override + public SelectionCoreImpl countAll() { + if (columns != null) { + throw new IllegalStateException("Can't select all columns over columns selected previously"); + } + if (currentSelection != null) { + throw new IllegalStateException("Can't select all columns over columns selected previously"); + } + columns = COUNT_ALL; + return this; + } + + @Override + public SelectionCoreImpl column(final String name) { + return addToCurrentColumn(name); + } + + @Override + public SelectionCoreImpl regex(final String clause) { + return addToCurrentColumn(new SelectRegexClause(clause)); + } + + @Override + public SelectionCoreImpl function(final String name, final Object... parameters) { + return addToCurrentColumn(FunctionFactory.function(name, parameters)); + } + + @Override + public SelectionCoreImpl raw(final String text) { + return addToCurrentColumn(new RawText(text)); + } + + @Override + public SelectionCoreImpl count(final Object column) { + return addToCurrentColumn(FunctionFactory.count(column)); + } + + @Override + public SelectionCoreImpl max(final Object column) { + return addToCurrentColumn(FunctionFactory.max(column)); + } + + @Override + public SelectionCoreImpl min(final Object column) { + return addToCurrentColumn(FunctionFactory.min(column)); + } + + @Override + public SelectionCoreImpl sum(final Object column) { + return addToCurrentColumn(FunctionFactory.sum(column)); + } + + @Override + public SelectionCoreImpl mean(final Object column) { + return addToCurrentColumn(FunctionFactory.mean(column)); + } + + @Override + public SelectionCoreImpl into(final String measurement) { + this.intoMeasurement = measurement; + return this; + } + + @Override + public Selection op(final OperationClause operationClause) { + return addToCurrentColumn(operationClause); + } + + @Override + public Selection op(final Object arg1, final String op, final Object arg2) { + return addToCurrentColumn(new OperationClause(arg1, op, arg2)); + } + + @Override + public Selection cop(final SimpleClause simpleClause) { + return addToCurrentColumn(simpleClause); + } + + @Override + public Selection cop(final String column, final String op, final Object arg2) { + return addToCurrentColumn(new SimpleClause(column, op, arg2)); + } + + SelectCoreImpl from(final FromClause fromClause, final E where) { + clearSelection(); + return new SelectCoreImpl<>(fromClause, columns, isDistinct, where, intoMeasurement); + } + + SelectCoreImpl from(final E where) { + clearSelection(); + return new SelectCoreImpl<>(columns, isDistinct, where, intoMeasurement); + } + + protected SelectionCoreImpl clearSelection() { + if (currentSelection != null) { + moveToColumns(currentSelection); + } + currentSelection = null; + return this; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectionQueryImpl.java b/src/main/java/org/influxdb/querybuilder/SelectionQueryImpl.java new file mode 100644 index 000000000..33efe9b1f --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectionQueryImpl.java @@ -0,0 +1,193 @@ +package org.influxdb.querybuilder; + +import java.util.ArrayList; +import java.util.Arrays; +import org.influxdb.querybuilder.clauses.OperationClause; +import org.influxdb.querybuilder.clauses.SimpleClause; +import org.influxdb.querybuilder.clauses.RawFromClause; +import org.influxdb.querybuilder.clauses.SimpleFromClause; +import org.influxdb.querybuilder.clauses.MultipleFromClause; + +public class SelectionQueryImpl implements Selection, WithInto { + + private final SelectionCoreImpl selectionCore; + private boolean requiresPost; + + SelectionQueryImpl(final SelectionCoreImpl selectionCore) { + this.selectionCore = selectionCore; + } + + @Override + public SelectionQueryImpl distinct() { + selectionCore.distinct(); + return this; + } + + public SelectionQueryImpl requiresPost() { + requiresPost = true; + return this; + } + + @Override + public SelectionQueryImpl as(final String aliasName) { + selectionCore.as(aliasName); + return this; + } + + @Override + public SelectionQueryImpl all() { + selectionCore.all(); + return this; + } + + @Override + public SelectionQueryImpl countAll() { + selectionCore.countAll(); + return this; + } + + @Override + public SelectionQueryImpl column(final String name) { + selectionCore.column(name); + return this; + } + + @Override + public SelectionQueryImpl regex(final String clause) { + selectionCore.regex(clause); + return this; + } + + @Override + public SelectionQueryImpl function(final String name, final Object... parameters) { + selectionCore.function(name, parameters); + return this; + } + + @Override + public SelectionQueryImpl raw(final String text) { + selectionCore.raw(text); + return this; + } + + @Override + public SelectionQueryImpl count(final Object column) { + selectionCore.count(column); + return this; + } + + @Override + public SelectionQueryImpl max(final Object column) { + selectionCore.max(column); + return this; + } + + @Override + public SelectionQueryImpl min(final Object column) { + selectionCore.min(column); + return this; + } + + @Override + public SelectionQueryImpl sum(final Object column) { + selectionCore.sum(column); + return this; + } + + @Override + public SelectionQueryImpl mean(final Object column) { + selectionCore.mean(column); + return this; + } + + @Override + public SelectionQueryImpl into(final String measurement) { + selectionCore.into(measurement); + return this; + } + + @Override + public SelectionQueryImpl op(final OperationClause operationClause) { + selectionCore.op(operationClause); + return this; + } + + @Override + public SelectionQueryImpl op(final Object arg1, final String op, final Object arg2) { + selectionCore.op(arg1, op, arg2); + return this; + } + + @Override + public SelectionQueryImpl cop(final SimpleClause simpleClause) { + selectionCore.cop(simpleClause); + return this; + } + + @Override + public SelectionQueryImpl cop(final String column, final String op, final Object arg2) { + selectionCore.cop(column, op, arg2); + return this; + } + + public SelectQueryImpl from(final String database, final String table) { + SelectQueryImpl selectQuery = + new SelectQueryImpl(database, new SimpleFromClause(table), requiresPost, selectionCore); + return selectQuery; + } + + public SelectQueryImpl from(final String database, final String[] table) { + if (table == null) { + throw new IllegalArgumentException("Tables names should be specified"); + } + SelectQueryImpl selectQuery = + new SelectQueryImpl( + database, new MultipleFromClause(Arrays.asList(table)), requiresPost, selectionCore); + return selectQuery; + } + + public SelectQueryImpl fromRaw(final String database, final String text) { + SelectQueryImpl selectQuery = + new SelectQueryImpl(database, new RawFromClause(text), requiresPost, selectionCore); + return selectQuery; + } + + public SelectQueryImpl from(final String database) { + SelectQueryImpl selectQuery = new SelectQueryImpl(database, requiresPost, selectionCore); + return selectQuery; + } + + public SelectSubQueryImpl fromSubQuery( + final String database, final String table) { + SelectSubQueryImpl subSelect = + new SelectSubQueryImpl<>( + new SimpleFromClause(table), new ArrayList<>(), selectionCore.isDistinct); + subSelect.setParent(from(database)); + return subSelect; + } + + public SelectSubQueryImpl fromSubQuery( + final String database, final String[] tables) { + SelectSubQueryImpl subSelect = + new SelectSubQueryImpl<>( + new MultipleFromClause(Arrays.asList(tables)), + new ArrayList<>(), + selectionCore.isDistinct); + subSelect.setParent(from(database)); + return subSelect; + } + + public SelectSubQueryImpl fromSubQueryRaw( + final String database, final String text) { + SelectSubQueryImpl subSelect = + new SelectSubQueryImpl<>( + new RawFromClause(text), new ArrayList<>(), selectionCore.isDistinct); + subSelect.setParent(from(database)); + return subSelect; + } + + public SelectionSubQueryImpl fromSubQuery(final String database) { + SelectQueryImpl selectQuery = from(database); + return new SelectionSubQueryImpl<>(selectQuery); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SelectionSubQueryImpl.java b/src/main/java/org/influxdb/querybuilder/SelectionSubQueryImpl.java new file mode 100644 index 000000000..10ac96eff --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SelectionSubQueryImpl.java @@ -0,0 +1,170 @@ +package org.influxdb.querybuilder; + +import java.util.Arrays; +import org.influxdb.querybuilder.clauses.OperationClause; +import org.influxdb.querybuilder.clauses.SimpleClause; +import org.influxdb.querybuilder.clauses.RawFromClause; +import org.influxdb.querybuilder.clauses.SimpleFromClause; +import org.influxdb.querybuilder.clauses.MultipleFromClause; +import org.influxdb.querybuilder.clauses.FromClause; + +public class SelectionSubQueryImpl extends SubQuery + implements Selection, WithSubquery { + + private final SelectionCoreImpl selectionCore; + + SelectionSubQueryImpl(final T selectQuery) { + setParent(selectQuery); + this.selectionCore = new SelectionCoreImpl(); + } + + @Override + public SelectionSubQueryImpl distinct() { + selectionCore.distinct(); + return this; + } + + @Override + public SelectionSubQueryImpl as(final String aliasName) { + selectionCore.as(aliasName); + return this; + } + + @Override + public SelectionSubQueryImpl all() { + selectionCore.all(); + return this; + } + + @Override + public SelectionSubQueryImpl countAll() { + selectionCore.countAll(); + return this; + } + + @Override + public SelectionSubQueryImpl column(final String name) { + selectionCore.column(name); + return this; + } + + @Override + public SelectionSubQueryImpl regex(final String clause) { + selectionCore.regex(clause); + return this; + } + + @Override + public SelectionSubQueryImpl function(final String name, final Object... parameters) { + selectionCore.function(name, parameters); + return this; + } + + @Override + public SelectionSubQueryImpl raw(final String text) { + selectionCore.raw(text); + return this; + } + + @Override + public SelectionSubQueryImpl count(final Object column) { + selectionCore.count(column); + return this; + } + + @Override + public SelectionSubQueryImpl max(final Object column) { + selectionCore.max(column); + return this; + } + + @Override + public SelectionSubQueryImpl min(final Object column) { + selectionCore.min(column); + return this; + } + + @Override + public SelectionSubQueryImpl sum(final Object column) { + selectionCore.sum(column); + return this; + } + + @Override + public SelectionSubQueryImpl mean(final Object column) { + selectionCore.mean(column); + return this; + } + + @Override + public SelectionSubQueryImpl op(final OperationClause operationClause) { + selectionCore.op(operationClause); + return this; + } + + @Override + public SelectionSubQueryImpl op(final Object arg1, final String op, final Object arg2) { + selectionCore.op(arg1, op, arg2); + return this; + } + + @Override + public SelectionSubQueryImpl cop(final SimpleClause simpleClause) { + selectionCore.cop(simpleClause); + return this; + } + + @Override + public SelectionSubQueryImpl cop(final String column, final String op, final Object arg2) { + selectionCore.cop(column, op, arg2); + return this; + } + + public SelectSubQueryImpl fromRaw(final String text) { + return from(new RawFromClause(text)); + } + + public SelectSubQueryImpl from(final String[] tables) { + if (tables == null) { + throw new IllegalArgumentException("Tables names should be specified"); + } + + return from(new MultipleFromClause(Arrays.asList(tables))); + } + + public SelectSubQueryImpl from(final String table) { + return from(new SimpleFromClause(table)); + } + + private SelectSubQueryImpl from(final FromClause fromClause) { + selectionCore.clearSelection(); + SelectSubQueryImpl subSelect = + new SelectSubQueryImpl<>(fromClause, selectionCore.columns, selectionCore.isDistinct); + subSelect.setParent(getParent()); + return subSelect; + } + + public SelectionSubQueryImpl> fromSubQuery() { + selectionCore.clearSelection(); + SelectSubQueryImpl selectSubQuery = + new SelectSubQueryImpl<>(selectionCore.columns, selectionCore.isDistinct); + selectSubQuery.setParent(this.getParent()); + SelectionSubQueryImpl> selectionSubQuery + = new SelectionSubQueryImpl<>(selectSubQuery); + return selectionSubQuery; + } + + @Override + public void setSubQuery(final QueryStringBuilder query) { + } + + @Override + public StringBuilder buildQueryString() { + return null; + } + + @Override + public StringBuilder buildQueryString(final StringBuilder stringBuilder) { + return null; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/SubQuery.java b/src/main/java/org/influxdb/querybuilder/SubQuery.java new file mode 100644 index 000000000..5f59b6ea5 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/SubQuery.java @@ -0,0 +1,19 @@ +package org.influxdb.querybuilder; + +public abstract class SubQuery implements QueryStringBuilder { + + private T parent; + + void setParent(final T parent) { + this.parent = parent; + } + + T getParent() { + return parent; + } + + public T close() { + parent.setSubQuery(this); + return parent; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/TimeZone.java b/src/main/java/org/influxdb/querybuilder/TimeZone.java new file mode 100644 index 000000000..574f562fa --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/TimeZone.java @@ -0,0 +1,23 @@ +package org.influxdb.querybuilder; + +public class TimeZone implements Appendable { + + private final String timeZone; + + TimeZone(final String timeZone) { + this.timeZone = timeZone; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder + .append(" ") + .append("tz") + .append("(") + .append("'") + .append(timeZone) + .append("'") + .append(")") + .append(" "); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/Where.java b/src/main/java/org/influxdb/querybuilder/Where.java new file mode 100644 index 000000000..4f4a50865 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/Where.java @@ -0,0 +1,26 @@ +package org.influxdb.querybuilder; + +import java.util.List; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ConjunctionClause; + +public interface Where { + + T and(final Clause clause); + + T or(final Clause clause); + + List getClauses(); + + WhereNested andNested(); + + WhereNested orNested(); + + T orderBy(final Ordering orderings); + + T groupBy(final Object... columns); + + T limit(final int limit); + + T limit(final int limit, final long offSet); +} diff --git a/src/main/java/org/influxdb/querybuilder/WhereCoreImpl.java b/src/main/java/org/influxdb/querybuilder/WhereCoreImpl.java new file mode 100644 index 000000000..f11491d00 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WhereCoreImpl.java @@ -0,0 +1,106 @@ +package org.influxdb.querybuilder; + +import java.util.ArrayList; +import java.util.List; +import org.influxdb.querybuilder.clauses.AndConjunction; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ConjunctionClause; +import org.influxdb.querybuilder.clauses.OrConjunction; + +public class WhereCoreImpl implements Select, Where { + + private final List clauses = new ArrayList<>(); + + private final T statement; + + WhereCoreImpl(final T statement) { + this.statement = statement; + } + + @Override + public WhereCoreImpl and(final Clause clause) { + clauses.add(new AndConjunction(clause)); + return this; + } + + @Override + public WhereCoreImpl or(final Clause clause) { + clauses.add(new OrConjunction(clause)); + return this; + } + + @Override + public WhereCoreImpl where() { + return statement.where(); + } + + @Override + public WhereCoreImpl where(final Clause clause) { + return statement.where(clause); + } + + @Override + public WhereCoreImpl where(final String text) { + return statement.where(text); + } + + @Override + public List getClauses() { + return clauses; + } + + @Override + public WhereNested andNested() { + return new WhereNested<>(this, false); + } + + @Override + public WhereNested orNested() { + return new WhereNested<>(this, true); + } + + @Override + public SelectCoreImpl orderBy(final Ordering orderings) { + return statement.orderBy(orderings); + } + + @Override + public SelectCoreImpl groupBy(final Object... columns) { + return statement.groupBy(columns); + } + + @Override + public SelectCoreImpl fill(final Number value) { + return statement.fill(value); + } + + @Override + public SelectCoreImpl fill(final String value) { + return statement.fill(value); + } + + @Override + public SelectCoreImpl limit(final int limit) { + return statement.limit(limit); + } + + @Override + public SelectCoreImpl limit(final int limit, final long offSet) { + return statement.limit(limit, offSet); + } + + @Override + public SelectCoreImpl sLimit(final int sLimit) { + return statement.sLimit(sLimit); + } + + @Override + public SelectCoreImpl sLimit(final int sLimit, final long sOffSet) { + return statement.sLimit(sLimit, sOffSet); + } + + @Override + public SelectCoreImpl tz(final String timezone) { + return statement.tz(timezone); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/WhereNested.java b/src/main/java/org/influxdb/querybuilder/WhereNested.java new file mode 100644 index 000000000..d17398ae4 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WhereNested.java @@ -0,0 +1,41 @@ +package org.influxdb.querybuilder; + +import java.util.ArrayList; +import java.util.List; +import org.influxdb.querybuilder.clauses.AndConjunction; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ConjunctionClause; +import org.influxdb.querybuilder.clauses.NestedClause; +import org.influxdb.querybuilder.clauses.OrConjunction; + +public class WhereNested { + + private final List clauses = new ArrayList<>(); + private final boolean orConjunction; + private final T where; + + WhereNested(final T where, final boolean orConjunction) { + this.where = where; + this.orConjunction = orConjunction; + } + + public WhereNested and(final Clause clause) { + clauses.add(new AndConjunction(clause)); + return this; + } + + public WhereNested or(final Clause clause) { + clauses.add(new OrConjunction(clause)); + return this; + } + + public T close() { + if (orConjunction) { + where.or(new NestedClause(clauses)); + } else { + where.and(new NestedClause(clauses)); + } + + return where; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/WhereQueryImpl.java b/src/main/java/org/influxdb/querybuilder/WhereQueryImpl.java new file mode 100644 index 000000000..0d3cf5218 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WhereQueryImpl.java @@ -0,0 +1,119 @@ +package org.influxdb.querybuilder; + +import java.util.List; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ConjunctionClause; + +public class WhereQueryImpl extends BuiltQuery implements Where, Select { + + private final T query; + private final WhereCoreImpl whereCore; + + WhereQueryImpl(final T query, final WhereCoreImpl whereCore) { + super(null); + this.query = query; + this.whereCore = whereCore; + } + + @Override + public WhereQueryImpl where() { + return query.where(); + } + + @Override + public WhereQueryImpl where(final Clause clause) { + return query.where(clause); + } + + @Override + public WhereQueryImpl where(final String text) { + return query.where(text); + } + + @Override + public WhereQueryImpl and(final Clause clause) { + whereCore.and(clause); + return this; + } + + @Override + public WhereQueryImpl or(final Clause clause) { + whereCore.or(clause); + return this; + } + + @Override + public List getClauses() { + return whereCore.getClauses(); + } + + @Override + public WhereNested> andNested() { + return new WhereNested<>(this, false); + } + + @Override + public WhereNested> orNested() { + return new WhereNested<>(this, true); + } + + @Override + public SelectQueryImpl orderBy(final Ordering orderings) { + return query.orderBy(orderings); + } + + @Override + public SelectQueryImpl groupBy(final Object... columns) { + return query.groupBy(columns); + } + + @Override + public SelectQueryImpl fill(final Number value) { + return query.fill(value); + } + + @Override + public SelectQueryImpl fill(final String value) { + return query.fill(value); + } + + @Override + public SelectQueryImpl limit(final int limit) { + return query.limit(limit); + } + + @Override + public SelectQueryImpl limit(final int limit, final long offSet) { + return query.limit(limit, offSet); + } + + @Override + public SelectQueryImpl sLimit(final int sLimit) { + return query.sLimit(sLimit); + } + + @Override + public SelectQueryImpl sLimit(final int sLimit, final long sOffSet) { + return query.sLimit(sLimit, sOffSet); + } + + @Override + public SelectQueryImpl tz(final String timezone) { + return query.tz(timezone); + } + + @Override + public String getDatabase() { + return query.getDatabase(); + } + + @Override + public StringBuilder buildQueryString(final StringBuilder stringBuilder) { + return query.buildQueryString(stringBuilder); + } + + @Override + public StringBuilder buildQueryString() { + return query.buildQueryString(new StringBuilder()); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/WhereSubQueryImpl.java b/src/main/java/org/influxdb/querybuilder/WhereSubQueryImpl.java new file mode 100644 index 000000000..7aba3d5b9 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WhereSubQueryImpl.java @@ -0,0 +1,115 @@ +package org.influxdb.querybuilder; + +import java.util.List; +import org.influxdb.querybuilder.clauses.Clause; +import org.influxdb.querybuilder.clauses.ConjunctionClause; + +public class WhereSubQueryImpl + extends SubQuery implements Select, Where { + + private final WhereCoreImpl whereCore; + private final T selectQuery; + + WhereSubQueryImpl(final T subQuery, final WhereCoreImpl whereCore) { + this.selectQuery = subQuery; + this.whereCore = whereCore; + } + + @Override + public WhereSubQueryImpl and(final Clause clause) { + whereCore.and(clause); + return this; + } + + @Override + public WhereSubQueryImpl or(final Clause clause) { + whereCore.or(clause); + return this; + } + + @Override + public List getClauses() { + return whereCore.getClauses(); + } + + @Override + public WhereNested> andNested() { + return new WhereNested<>(this, false); + } + + @Override + public WhereNested> orNested() { + return new WhereNested<>(this, true); + } + + @Override + public T orderBy(final Ordering ordering) { + return selectQuery.orderBy(ordering); + } + + @Override + public T groupBy(final Object... columns) { + return selectQuery.groupBy(columns); + } + + @Override + public T fill(final Number value) { + return selectQuery.fill(value); + } + + @Override + public T fill(final String value) { + return selectQuery.fill(value); + } + + @Override + public T limit(final int limit) { + return selectQuery.limit(limit); + } + + @Override + public T limit(final int limit, final long offSet) { + return selectQuery.limit(limit, offSet); + } + + @Override + public T sLimit(final int sLimit) { + return selectQuery.sLimit(sLimit); + } + + @Override + public T sLimit(final int sLimit, final long sOffSet) { + return selectQuery.sLimit(sLimit, sOffSet); + } + + @Override + public T tz(final String timezone) { + return selectQuery.tz(timezone); + } + + @Override + public StringBuilder buildQueryString() { + return selectQuery.buildQueryString(new StringBuilder()); + } + + @Override + public StringBuilder buildQueryString(final StringBuilder stringBuilder) { + return selectQuery.buildQueryString(stringBuilder); + } + + @Override + public WhereSubQueryImpl where() { + return selectQuery.where(); + } + + @Override + public WhereSubQueryImpl where(final Clause clause) { + return selectQuery.where(clause); + } + + @Override + public WhereSubQueryImpl where(final String text) { + return selectQuery.where(text); + } + +} diff --git a/src/main/java/org/influxdb/querybuilder/WithInto.java b/src/main/java/org/influxdb/querybuilder/WithInto.java new file mode 100644 index 000000000..d08c22e2b --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WithInto.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder; + +public interface WithInto { + + WithInto into(String measurement); +} diff --git a/src/main/java/org/influxdb/querybuilder/WithSubquery.java b/src/main/java/org/influxdb/querybuilder/WithSubquery.java new file mode 100644 index 000000000..5973432f1 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/WithSubquery.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder; + +public interface WithSubquery { + + void setSubQuery(QueryStringBuilder query); +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/AbstractClause.java b/src/main/java/org/influxdb/querybuilder/clauses/AbstractClause.java new file mode 100644 index 000000000..473618e0a --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/AbstractClause.java @@ -0,0 +1,10 @@ +package org.influxdb.querybuilder.clauses; + +public abstract class AbstractClause implements Clause { + + final String name; + + AbstractClause(final String name) { + this.name = name; + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/AddRelativeTimeClause.java b/src/main/java/org/influxdb/querybuilder/clauses/AddRelativeTimeClause.java new file mode 100644 index 000000000..d64ff31c4 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/AddRelativeTimeClause.java @@ -0,0 +1,10 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.time.TimeInterval; + +public class AddRelativeTimeClause extends RelativeTimeClause { + + public AddRelativeTimeClause(final TimeInterval timeInterval) { + super("+", timeInterval); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/AndConjunction.java b/src/main/java/org/influxdb/querybuilder/clauses/AndConjunction.java new file mode 100644 index 000000000..b4c46f093 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/AndConjunction.java @@ -0,0 +1,15 @@ +package org.influxdb.querybuilder.clauses; + +public class AndConjunction extends ConjunctionClause { + + private static final String AND = "AND"; + + public AndConjunction(final Clause clause) { + super(clause); + } + + @Override + public void join(final StringBuilder stringBuilder) { + stringBuilder.append(" ").append(AND).append(" "); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/Clause.java b/src/main/java/org/influxdb/querybuilder/clauses/Clause.java new file mode 100644 index 000000000..4e4820a7d --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/Clause.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.Appendable; + +public interface Clause extends Appendable { +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/Conjunction.java b/src/main/java/org/influxdb/querybuilder/clauses/Conjunction.java new file mode 100644 index 000000000..5f5eaffe2 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/Conjunction.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder.clauses; + +public interface Conjunction { + + void join(StringBuilder stringBuilder); +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/ConjunctionClause.java b/src/main/java/org/influxdb/querybuilder/clauses/ConjunctionClause.java new file mode 100644 index 000000000..b393fd81f --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/ConjunctionClause.java @@ -0,0 +1,15 @@ +package org.influxdb.querybuilder.clauses; + +public abstract class ConjunctionClause implements Conjunction, Clause { + + private Clause clause; + + public ConjunctionClause(final Clause clause) { + this.clause = clause; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + clause.appendTo(stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/ContainsClause.java b/src/main/java/org/influxdb/querybuilder/clauses/ContainsClause.java new file mode 100644 index 000000000..b0a9ccee6 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/ContainsClause.java @@ -0,0 +1,8 @@ +package org.influxdb.querybuilder.clauses; + +public class ContainsClause extends RegexClause { + + public ContainsClause(final String name, final String value) { + super(name, "/" + value + "/"); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/FromClause.java b/src/main/java/org/influxdb/querybuilder/clauses/FromClause.java new file mode 100644 index 000000000..fac3adc8a --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/FromClause.java @@ -0,0 +1,6 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.Appendable; + +public abstract class FromClause implements Appendable { +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/MultipleFromClause.java b/src/main/java/org/influxdb/querybuilder/clauses/MultipleFromClause.java new file mode 100644 index 000000000..17c7f0a6f --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/MultipleFromClause.java @@ -0,0 +1,22 @@ +package org.influxdb.querybuilder.clauses; + +import static org.influxdb.querybuilder.Appender.joinAndAppendNames; + +import java.util.List; + +public class MultipleFromClause extends FromClause { + + private final List tables; + + public MultipleFromClause(final List tables) { + if (tables == null || tables.size() == 0) { + throw new IllegalArgumentException("Tables names should be specified"); + } + this.tables = tables; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + joinAndAppendNames(stringBuilder, tables); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/NegativeRegexClause.java b/src/main/java/org/influxdb/querybuilder/clauses/NegativeRegexClause.java new file mode 100644 index 000000000..0f472b330 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/NegativeRegexClause.java @@ -0,0 +1,25 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.Appender; +import org.influxdb.querybuilder.Operations; +import org.influxdb.querybuilder.RawText; + +public class NegativeRegexClause extends AbstractClause { + + private final RawText value; + + public NegativeRegexClause(final String name, final String value) { + super(name); + this.value = new RawText(value); + + if (value == null) { + throw new IllegalArgumentException("Missing value for regex clause"); + } + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + Appender.appendName(name, stringBuilder).append(" ").append(Operations.NER).append(" "); + Appender.appendValue(value, stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/NestedClause.java b/src/main/java/org/influxdb/querybuilder/clauses/NestedClause.java new file mode 100644 index 000000000..cef99b043 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/NestedClause.java @@ -0,0 +1,21 @@ +package org.influxdb.querybuilder.clauses; + +import static org.influxdb.querybuilder.Appender.joinAndAppend; + +import java.util.List; + +public class NestedClause implements Clause { + + private final List conjunctionClauses; + + public NestedClause(final List conjunctionClauses) { + this.conjunctionClauses = conjunctionClauses; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append("("); + joinAndAppend(stringBuilder, conjunctionClauses); + stringBuilder.append(")"); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/OperationClause.java b/src/main/java/org/influxdb/querybuilder/clauses/OperationClause.java new file mode 100644 index 000000000..fff50a12c --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/OperationClause.java @@ -0,0 +1,34 @@ +package org.influxdb.querybuilder.clauses; + +import static org.influxdb.querybuilder.Appender.appendValue; + +public class OperationClause extends AbstractClause { + + private final Object arg1; + private final String op; + private final Object arg2; + + public OperationClause(final Object arg1, final String op, final Object arg2) { + super(null); + this.arg1 = arg1; + this.op = op; + this.arg2 = arg2; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + appendArg(arg1, stringBuilder); + stringBuilder.append(" ").append(op).append(" "); + appendArg(arg2, stringBuilder); + } + + private void appendArg(final Object arg, final StringBuilder stringBuilder) { + if (arg instanceof OperationClause || arg instanceof SimpleClause) { + stringBuilder.append("("); + appendValue(arg, stringBuilder); + stringBuilder.append(")"); + } else { + appendValue(arg, stringBuilder); + } + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/OrConjunction.java b/src/main/java/org/influxdb/querybuilder/clauses/OrConjunction.java new file mode 100644 index 000000000..73a7f6de8 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/OrConjunction.java @@ -0,0 +1,15 @@ +package org.influxdb.querybuilder.clauses; + +public class OrConjunction extends ConjunctionClause { + + private static final String OR = "OR"; + + public OrConjunction(final Clause clause) { + super(clause); + } + + @Override + public void join(final StringBuilder stringBuilder) { + stringBuilder.append(" ").append(OR).append(" "); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/RawFromClause.java b/src/main/java/org/influxdb/querybuilder/clauses/RawFromClause.java new file mode 100644 index 000000000..db90fbc67 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/RawFromClause.java @@ -0,0 +1,18 @@ +package org.influxdb.querybuilder.clauses; + +public class RawFromClause extends FromClause { + + private final String text; + + public RawFromClause(final String text) { + if (text == null) { + throw new IllegalArgumentException("Provide a valid value"); + } + this.text = text; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append(text); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/RawTextClause.java b/src/main/java/org/influxdb/querybuilder/clauses/RawTextClause.java new file mode 100644 index 000000000..4c067474b --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/RawTextClause.java @@ -0,0 +1,23 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.Appender; +import org.influxdb.querybuilder.RawText; + +public class RawTextClause extends AbstractClause { + + private final RawText value; + + public RawTextClause(final String text) { + super(""); + this.value = new RawText(text); + + if (text == null) { + throw new IllegalArgumentException("Missing text for expression"); + } + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + Appender.appendValue(value, stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/RegexClause.java b/src/main/java/org/influxdb/querybuilder/clauses/RegexClause.java new file mode 100644 index 000000000..77ac5f8ae --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/RegexClause.java @@ -0,0 +1,27 @@ +package org.influxdb.querybuilder.clauses; + +import static org.influxdb.querybuilder.Appender.appendName; +import static org.influxdb.querybuilder.Appender.appendValue; + +import org.influxdb.querybuilder.Operations; +import org.influxdb.querybuilder.RawText; + +public class RegexClause extends AbstractClause { + + private final RawText value; + + public RegexClause(final String name, final String value) { + super(name); + this.value = new RawText(value); + + if (value == null) { + throw new IllegalArgumentException("Missing value for regex clause"); + } + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + appendName(name, stringBuilder).append(" ").append(Operations.EQR).append(" "); + appendValue(value, stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/RelativeTimeClause.java b/src/main/java/org/influxdb/querybuilder/clauses/RelativeTimeClause.java new file mode 100644 index 000000000..d8b2418f4 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/RelativeTimeClause.java @@ -0,0 +1,21 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.time.TimeInterval; + +public class RelativeTimeClause extends AbstractClause { + + private final String rule; + private final TimeInterval timeInterval; + + RelativeTimeClause(final String rule, final TimeInterval timeInterval) { + super("now()"); + this.rule = rule; + this.timeInterval = timeInterval; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append(name).append(" ").append(rule).append(" "); + timeInterval.appendTo(stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/SelectRegexClause.java b/src/main/java/org/influxdb/querybuilder/clauses/SelectRegexClause.java new file mode 100644 index 000000000..a60a52065 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/SelectRegexClause.java @@ -0,0 +1,13 @@ +package org.influxdb.querybuilder.clauses; + +public class SelectRegexClause extends AbstractClause { + + public SelectRegexClause(final String clause) { + super(clause); + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append(name); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/SimpleClause.java b/src/main/java/org/influxdb/querybuilder/clauses/SimpleClause.java new file mode 100644 index 000000000..fdcbd6c4f --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/SimpleClause.java @@ -0,0 +1,21 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.Appender; + +public class SimpleClause extends AbstractClause { + + private final String op; + private final Object value; + + public SimpleClause(final String name, final String op, final Object value) { + super(name); + this.op = op; + this.value = value; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + Appender.appendName(name, stringBuilder).append(" ").append(op).append(" "); + Appender.appendValue(value, stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/SimpleFromClause.java b/src/main/java/org/influxdb/querybuilder/clauses/SimpleFromClause.java new file mode 100644 index 000000000..c64b88d8d --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/SimpleFromClause.java @@ -0,0 +1,20 @@ +package org.influxdb.querybuilder.clauses; + +import static org.influxdb.querybuilder.Appender.appendName; + +public class SimpleFromClause extends FromClause { + + private final String table; + + public SimpleFromClause(final String table) { + if (table == null) { + throw new IllegalArgumentException("Provide a valid table name"); + } + this.table = table; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + appendName(table, stringBuilder); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/SubQueryFromClause.java b/src/main/java/org/influxdb/querybuilder/clauses/SubQueryFromClause.java new file mode 100644 index 000000000..e52281ac1 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/SubQueryFromClause.java @@ -0,0 +1,22 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.QueryStringBuilder; + +public class SubQueryFromClause extends FromClause { + + private final QueryStringBuilder queryStringBuilder; + + public SubQueryFromClause(final QueryStringBuilder queryStringBuilder) { + if (queryStringBuilder == null) { + throw new IllegalArgumentException("Provide a valid value"); + } + this.queryStringBuilder = queryStringBuilder; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append("("); + queryStringBuilder.buildQueryString(stringBuilder); + stringBuilder.append(")"); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/clauses/SubRelativeTimeClause.java b/src/main/java/org/influxdb/querybuilder/clauses/SubRelativeTimeClause.java new file mode 100644 index 000000000..604468d58 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/clauses/SubRelativeTimeClause.java @@ -0,0 +1,10 @@ +package org.influxdb.querybuilder.clauses; + +import org.influxdb.querybuilder.time.TimeInterval; + +public class SubRelativeTimeClause extends RelativeTimeClause { + + public SubRelativeTimeClause(final TimeInterval timeInterval) { + super("-", timeInterval); + } +} diff --git a/src/main/java/org/influxdb/querybuilder/time/DurationLiteral.java b/src/main/java/org/influxdb/querybuilder/time/DurationLiteral.java new file mode 100644 index 000000000..f686c5796 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/time/DurationLiteral.java @@ -0,0 +1,16 @@ +package org.influxdb.querybuilder.time; + +public final class DurationLiteral { + + private DurationLiteral() { + } + + public static final String NANOSECONDS = "NANOSECONDS"; + public static final String MICROSECONDS = "µ"; + public static final String MILLISECONDS = "ms"; + public static final String SECOND = "s"; + public static final String MINUTE = "m"; + public static final String HOUR = "h"; + public static final String DAY = "d"; + public static final String WEEK = "w"; +} diff --git a/src/main/java/org/influxdb/querybuilder/time/TimeInterval.java b/src/main/java/org/influxdb/querybuilder/time/TimeInterval.java new file mode 100644 index 000000000..81b05ff80 --- /dev/null +++ b/src/main/java/org/influxdb/querybuilder/time/TimeInterval.java @@ -0,0 +1,19 @@ +package org.influxdb.querybuilder.time; + +import org.influxdb.querybuilder.Appendable; + +public class TimeInterval implements Appendable { + + private final Long measure; + private final String literal; + + public TimeInterval(final Long measure, final String literal) { + this.measure = measure; + this.literal = literal; + } + + @Override + public void appendTo(final StringBuilder stringBuilder) { + stringBuilder.append(measure).append(literal); + } +} diff --git a/src/main/resources/docker-compose.yml b/src/main/resources/docker-compose.yml new file mode 100644 index 000000000..91d5d5cea --- /dev/null +++ b/src/main/resources/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3.1' + +services: + # Define an InfluxDB service + influxdb: + image: ${image} + volumes: + - ${project.basedir}/influxdb.conf:/etc/influxdb/influxdb.conf + ports: + - "8086:8086" + - "8089:8089/udp" \ No newline at end of file diff --git a/src/test-jdk17/java/org/influxdb/impl/InfluxDBRecordResultMapperTest.java b/src/test-jdk17/java/org/influxdb/impl/InfluxDBRecordResultMapperTest.java new file mode 100644 index 000000000..c16fa24b0 --- /dev/null +++ b/src/test-jdk17/java/org/influxdb/impl/InfluxDBRecordResultMapperTest.java @@ -0,0 +1,579 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.impl; + +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.dto.QueryResult; +import org.junit.Assert; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * @author Eran Leshem + */ +@SuppressWarnings({"removal", "deprecation"}) +@RunWith(JUnitPlatform.class) +public class InfluxDBRecordResultMapperTest { + + private final InfluxDBResultMapper mapper = new InfluxDBResultMapper(); + + @Test + public void testToRecord_HappyPath() { + // Given... + var columnList = Arrays.asList("time", "uuid"); + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + series.setName("CustomMeasurement"); + series.setValues(List.of(firstSeriesResult)); + + var internalResult = new QueryResult.Result(); + internalResult.setSeries(List.of(series)); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(internalResult)); + + //When... + var myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertEquals(1, myList.size(), "there must be one entry in the result list"); + } + + @Test + public void testThrowExceptionIfMissingAnnotation() { + Assertions.assertThrows(IllegalArgumentException.class, () -> mapper.throwExceptionIfMissingAnnotation(String.class)); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultHasError() { + var queryResult = new QueryResult(); + queryResult.setError("main queryresult error"); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> mapper.throwExceptionIfResultWithError(queryResult)); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { + var seriesResult = new QueryResult.Result(); + seriesResult.setError("series error"); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(seriesResult)); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> mapper.throwExceptionIfResultWithError(queryResult)); + } + + @Test + public void testGetMeasurementName_testStateMeasurement() { + Assertions.assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); + } + + @Test + public void testParseSeriesAs_testTwoValidSeries() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var columnList = Arrays.asList("time", "uuid"); + + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + List secondSeriesResult = Arrays.asList(Instant.now().plusSeconds(1).toEpochMilli(), + UUID.randomUUID().toString()); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult, secondSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 2, "there must be two series in the result list"); + + Assertions.assertEquals(firstSeriesResult.get(0), result.get(0).time().toEpochMilli(), + "Field 'time' (1st series) is not valid"); + Assertions.assertEquals(firstSeriesResult.get(1), result.get(0).uuid(), "Field 'uuid' (1st series) is not valid"); + + Assertions.assertEquals(secondSeriesResult.get(0), result.get(1).time().toEpochMilli(), + "Field 'time' (2nd series) is not valid"); + Assertions.assertEquals(secondSeriesResult.get(1), result.get(1).uuid(), "Field 'uuid' (2nd series) is not valid"); + } + + @Test + public void testParseSeriesAs_testNonNullAndValidValues() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurementWithPrimitives.class); + + var columnList = Arrays.asList("time", "uuid", + "doubleObject", "longObject", "integerObject", + "doublePrimitive", "longPrimitive", "integerPrimitive", + "booleanObject", "booleanPrimitive"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + // InfluxDB client returns any number as Double. + // See https://github.com/influxdata/influxdb-java/issues/153#issuecomment-259681987 + // for more information. + + var series = new QueryResult.Series(); + series.setColumns(columnList); + var uuidAsString = UUID.randomUUID().toString(); + List seriesResult = Arrays.asList(now, uuidAsString, + new Double("1.01"), new Double("2"), new Double("3"), + new Double("1.01"), new Double("4"), new Double("5"), + "false", "true"); + series.setValues(List.of(seriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurementWithPrimitives.class, result); + + //Then... + var myObject = result.get(0); + Assertions.assertEquals(now.longValue(), myObject.time().toEpochMilli(), "field 'time' does not match"); + Assertions.assertEquals(uuidAsString, myObject.uuid(), "field 'uuid' does not match"); + + Assertions.assertEquals(asDouble(seriesResult.get(2)), myObject.doubleObject(), + "field 'doubleObject' does not match"); + Assertions.assertEquals(Long.valueOf(asDouble(seriesResult.get(3)).longValue()), myObject.longObject(), + "field 'longObject' does not match"); + Assertions.assertEquals(Integer.valueOf(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject(), + "field 'integerObject' does not match"); + + Assertions.assertTrue( + Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive()) == 0, + "field 'doublePrimitive' does not match"); + + Assertions.assertTrue(asDouble(seriesResult.get(6)).longValue() == myObject.longPrimitive(), + "field 'longPrimitive' does not match"); + + Assertions.assertTrue(asDouble(seriesResult.get(7)).intValue() == myObject.integerPrimitive(), + "field 'integerPrimitive' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject(), + "field 'booleanObject' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive(), + "field 'booleanPrimitive' does not match"); + } + + private static Double asDouble(Object obj) { + return (Double) obj; + } + + @Test + public void testFieldValueModified_DateAsISO8601() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var columnList = List.of("time"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = List.of("2017-06-19T09:29:45.655123Z"); + series.setValues(List.of(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testFieldValueModified_DateAsInteger() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var columnList = List.of("time"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = List.of(1_000); + series.setValues(List.of(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testUnsupportedField() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyRecordWithUnsupportedField.class); + + var columnList = List.of("bar"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = List.of("content representing a Date"); + series.setValues(List.of(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + Assertions.assertThrows(InfluxDBMapperException.class, + () -> mapper.parseSeriesAs(series, MyRecordWithUnsupportedField.class, result)); + } + + /** + * for more information. + */ + @Test + public void testToRecord_SeriesFromQueryResultIsNull() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var internalResult = new QueryResult.Result(); + internalResult.setSeries(null); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(internalResult)); + + // When... + var myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertTrue( myList.isEmpty(), "there must NO entry in the result list"); + } + + @Test + public void testToRecord_QueryResultCreatedByGroupByClause() { + // Given... + InfluxDBResultMapper.cacheRecordClass(GroupByCarrierDeviceOS.class); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + // When the "GROUP BY" clause is used, "tags" are returned as Map + Map firstSeriesTagMap = new HashMap<>(2); + firstSeriesTagMap.put("CARRIER", "000/00"); + firstSeriesTagMap.put("DEVICE_OS_VERSION", "4.4.2"); + + Map secondSeriesTagMap = new HashMap<>(2); + secondSeriesTagMap.put("CARRIER", "000/01"); + secondSeriesTagMap.put("DEVICE_OS_VERSION", "9.3.5"); + + var firstSeries = new QueryResult.Series(); + var columnList = Arrays.asList("time", "median", "min", "max"); + firstSeries.setColumns(columnList); + List firstSeriesResult = Arrays.asList(now, new Double("233.8"), new Double("0.0"), + new Double("3090744.0")); + firstSeries.setValues(List.of(firstSeriesResult)); + firstSeries.setTags(firstSeriesTagMap); + firstSeries.setName("tb_network"); + + var secondSeries = new QueryResult.Series(); + secondSeries.setColumns(columnList); + List secondSeriesResult = Arrays.asList(now, new Double("552.0"), new Double("135.0"), + new Double("267705.0")); + secondSeries.setValues(List.of(secondSeriesResult)); + secondSeries.setTags(secondSeriesTagMap); + secondSeries.setName("tb_network"); + + var internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(firstSeries, secondSeries)); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(internalResult)); + + // When... + var myList = mapper.toPOJO(queryResult, GroupByCarrierDeviceOS.class); + + // Then... + var firstGroupByEntry = myList.get(0); + Assertions.assertEquals("000/00", firstGroupByEntry.carrier(), "field 'carrier' does not match"); + Assertions.assertEquals("4.4.2", firstGroupByEntry.deviceOsVersion(), "field 'deviceOsVersion' does not match"); + + var secondGroupByEntry = myList.get(1); + Assertions.assertEquals("000/01", secondGroupByEntry.carrier(), "field 'carrier' does not match"); + Assertions.assertEquals("9.3.5", secondGroupByEntry.deviceOsVersion(), "field 'deviceOsVersion' does not match"); + } + + @Test + public void testToRecord_ticket363() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var columnList = List.of("time"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = List.of("2000-01-01T00:00:00.000000001Z"); + series.setValues(List.of(firstSeriesResult)); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elemets"); + Assertions.assertEquals(1, result.get(0).time().getNano(), "incorrect value for the nanoseconds field"); + } + + @Test + void testToRecord_Precision() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var series = new QueryResult.Series(); + series.setName("CustomMeasurement"); + var columnList = List.of("time"); + series.setColumns(columnList); + List firstSeriesResult = List.of(1_500_000L); + series.setValues(List.of(firstSeriesResult)); + + var internalResult = new QueryResult.Result(); + internalResult.setSeries(List.of(series)); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(internalResult)); + + // When... + var result = mapper.toPOJO(queryResult, MyCustomMeasurement.class, TimeUnit.SECONDS); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elements"); + Assertions.assertEquals(1_500_000_000L, result.get(0).time().toEpochMilli(), + "incorrect value for the millis field"); + } + + @Test + void testToRecord_SetMeasureName() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var series = new QueryResult.Series(); + series.setName("MySeriesName"); + var columnList = List.of("uuid"); + series.setColumns(columnList); + List firstSeriesResult = Collections.singletonList(UUID.randomUUID().toString()); + series.setValues(List.of(firstSeriesResult)); + + var internalResult = new QueryResult.Result(); + internalResult.setSeries(List.of(series)); + + var queryResult = new QueryResult(); + queryResult.setResults(List.of(internalResult)); + + //When... + var result = + mapper.toPOJO(queryResult, MyCustomMeasurement.class, "MySeriesName"); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testToRecord_HasTimeColumn() { + // Given... + InfluxDBResultMapper.cacheRecordClass(HasTimeColumnMeasurement.class); + + var columnList = List.of("time"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List> valuesList = Arrays.asList( + List.of("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + List.of("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + List.of("2000-01-01T00:00:00-00:00"), + List.of("2000-01-02T00:00:00+00:00") + ); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, HasTimeColumnMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time().equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time().equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time().equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time().equals(Instant.parse("2000-01-02T00:00:00Z"))); + + } + + @Test + public void testToRecord_ticket573() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + var columnList = List.of("time"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List> valuesList = Arrays.asList( + List.of("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + List.of("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + List.of("2000-01-01T00:00:00-00:00"), + List.of("2000-01-02T00:00:00+00:00") + ); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time().equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time().equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time().equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time().equals(Instant.parse("2000-01-02T00:00:00Z"))); + } + + @Test + public void testMultipleConstructors() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MultipleConstructors.class); + + var columnList = List.of("i", "s"); + + var series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = List.of(9.0, "str"); + series.setValues(List.of(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MultipleConstructors.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + + Assert.assertEquals(9, result.get(0).i()); + Assert.assertEquals("str", result.get(0).s()); + } + + @Test + public void testConflictingConstructors() { + Assert.assertThrows(InfluxDBMapperException.class, + () -> InfluxDBResultMapper.cacheRecordClass(ConflictingConstructors.class)); + } + + @Measurement(name = "HasTimeColumnMeasurement") + record HasTimeColumnMeasurement( + @TimeColumn + Instant time, + Integer value) {} + + @Measurement(name = "CustomMeasurement") + record MyCustomMeasurement( + Instant time, + String uuid, + Double doubleObject, + Long longObject, + Integer integerObject, + Boolean booleanObject, + + @SuppressWarnings("unused") + String nonColumn1, + + @SuppressWarnings("unused") + Random rnd) {} + + @Measurement(name = "CustomMeasurement") + record MyCustomMeasurementWithPrimitives( + Instant time, + String uuid, + Double doubleObject, + Long longObject, + Integer integerObject, + double doublePrimitive, + long longPrimitive, + int integerPrimitive, + Boolean booleanObject, + boolean booleanPrimitive, + + @SuppressWarnings("unused") + String nonColumn1, + + @SuppressWarnings("unused") + Random rnd) {} + + @Measurement(name = "foo") + record MyRecordWithUnsupportedField( + @Column(name = "bar") + Date myDate) {} + + /** + * Class created based on example from this issue + */ + @Measurement(name = "tb_network") + record GroupByCarrierDeviceOS( + Instant time, + + @Column(name = "CARRIER", tag = true) + String carrier, + + @Column(name = "DEVICE_OS_VERSION", tag = true) + String deviceOsVersion, + + Double median, + Double min, + Double max) {} + + record MultipleConstructors(int i, String s) { + MultipleConstructors(String i, String s) { + this(Integer.parseInt(i), s); + } + + MultipleConstructors(int i, String s, double d) { + this(i, s); + } + } + + record ConflictingConstructors(int i, String s) { + private ConflictingConstructors(String s, int i) { + this(i, s); + } + } +} diff --git a/src/test/java/com/android/tools/r8/RecordTag.java b/src/test/java/com/android/tools/r8/RecordTag.java new file mode 100644 index 000000000..51aacedbb --- /dev/null +++ b/src/test/java/com/android/tools/r8/RecordTag.java @@ -0,0 +1,9 @@ +package com.android.tools.r8; + +/** + * Simulates the super class of Android-desugared records. + * + * @author Eran Leshem + **/ +public class RecordTag { +} diff --git a/src/test/java/org/influxdb/AsyncResult.java b/src/test/java/org/influxdb/AsyncResult.java new file mode 100644 index 000000000..e31f693b1 --- /dev/null +++ b/src/test/java/org/influxdb/AsyncResult.java @@ -0,0 +1,49 @@ +package org.influxdb; + +import java.util.function.Consumer; + +public class AsyncResult { + + private final Object syncObject = new Object(); + + private boolean gotResult = false; + private T result = null; + private Throwable throwable = null; + + T result() throws Throwable { + while (!this.gotResult) { + synchronized (this.syncObject) { + this.syncObject.wait(); + } + } + + if (this.throwable != null) { + throw this.throwable; + } + + return this.result; + } + + public final Consumer resultConsumer = new Consumer() { + @Override + public void accept(T t) { + synchronized (syncObject) { + result = t; + gotResult = true; + syncObject.notifyAll(); + } + } + }; + + public final Consumer errorConsumer = new Consumer() { + @Override + public void accept(Throwable t) { + synchronized (syncObject) { + throwable = t; + gotResult = true; + syncObject.notifyAll(); + } + } + }; + +} diff --git a/src/test/java/org/influxdb/BatchOptionsTest.java b/src/test/java/org/influxdb/BatchOptionsTest.java new file mode 100644 index 000000000..1abef4576 --- /dev/null +++ b/src/test/java/org/influxdb/BatchOptionsTest.java @@ -0,0 +1,720 @@ +package org.influxdb; + +import okhttp3.OkHttpClient; +import org.influxdb.InfluxDB.ConsistencyLevel; +import org.influxdb.InfluxDBException.DatabaseNotFoundException; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.influxdb.impl.BatchProcessor; +import org.influxdb.impl.BatchProcessorTest; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import static org.mockito.Mockito.*; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Supplier; + + +@RunWith(JUnitPlatform.class) +public class BatchOptionsTest { + + InfluxDB influxDB; + + @BeforeEach + public void setUp() throws InterruptedException, IOException { + this.influxDB = TestUtils.connectToInfluxDB(); + } + + /** + * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. + */ + @Test + public void testBatchEnabledWithDefaultSettings() { + try { + this.influxDB.enableBatch(); + + } + finally { + this.influxDB.disableBatch(); + } + } + + @Test + public void testParametersSet() { + + + + BatchOptions options = BatchOptions.DEFAULTS.actions(3); + Assertions.assertEquals(3, options.getActions()); + options=options.consistency(InfluxDB.ConsistencyLevel.ANY); + Assertions.assertEquals(InfluxDB.ConsistencyLevel.ANY, options.getConsistency()); + options=options.flushDuration(1001); + Assertions.assertEquals(1001, options.getFlushDuration()); + options=options.bufferLimit(7070); + Assertions.assertEquals(7070, options.getBufferLimit()); + options=options.jitterDuration(104); + Assertions.assertEquals(104, options.getJitterDuration()); + BiConsumer, Throwable> handler=new BiConsumer, Throwable>() { + @Override + public void accept(Iterable points, Throwable throwable) { + + } + }; + options=options.exceptionHandler(handler); + Assertions.assertEquals(handler, options.getExceptionHandler()); + ThreadFactory tf=Executors.defaultThreadFactory(); + options=options.threadFactory(tf); + Assertions.assertEquals(tf, options.getThreadFactory()); + Assertions.assertEquals(false, options.isDropActionsOnQueueExhaustion()); + + options=options.dropActionsOnQueueExhaustion(true); + Assertions.assertEquals(true, options.isDropActionsOnQueueExhaustion()); + Consumer droppedActionHandler = (pt) -> {}; + options=options.droppedActionHandler(droppedActionHandler); + Assertions.assertEquals(droppedActionHandler, options.getDroppedActionHandler()); + } + + /** + * Test the implementation of {@link BatchOptions#actions(int)} }. + */ + @Test + public void testActionsSetting() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.actions(3).flushDuration(100); + + this.influxDB.enableBatch(options); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); + for (int j = 0; j < 5; j++) { + Point point = Point.measurement("cpu") + .time(j,TimeUnit.MILLISECONDS) + .addField("idle", (double) j) + .addField("user", 2.0 * j) + .addField("system", 3.0 * j).build(); + this.influxDB.write(point); + } + + //wait for at least one flush period + Thread.sleep(200); + //test at least 3 points was written + QueryResult result = influxDB.query(new Query("select * from cpu", dbName)); + int size = result.getResults().get(0).getSeries().get(0).getValues().size(); + Assertions.assertTrue(size >= 3, "there must be be at least 3 points written"); + + //wait for at least one flush period + Thread.sleep(200); + + //test all 5 points was written + result = influxDB.query(new Query("select * from cpu", dbName)); + Assertions.assertEquals(5, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + } + + /** + * Test the implementation of {@link BatchOptions#dropActionsOnQueueExhaustion(boolean)} and {@link BatchOptions#droppedActionHandler(Consumer)} }. + */ + @Test + public void testDropActionsOnQueueExhaustionSettings() throws InterruptedException, IOException { + + /* + To simulate a behavior where the action if exhausted because it is not cleared by the batchProcessor(may be because the latency of writes to influx server went up), + will have to artificially inrease latency of http calls to influx db server, for that need to add an interceptor to the http client. + Thus locally creating a new influx db server instead of using the global one. + */ + + CountDownLatch countDownLatch = new CountDownLatch(1); + try (InfluxDB influxDBLocal = TestUtils.connectToInfluxDB(new OkHttpClient.Builder().addInterceptor(chain -> { + try { + //blocking the http call for write with countdown latch + if (chain.request().url().encodedPath().contains("write")) + countDownLatch.await(5, TimeUnit.SECONDS); + return chain.proceed(chain.request()); + } catch (Exception e) { + e.printStackTrace(); + } + return chain.proceed(chain.request()); + }), null, InfluxDB.ResponseFormat.JSON)) { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + + Consumer droppedActionHandler = mock(Consumer.class); + BatchOptions options = BatchOptions.DEFAULTS.actions(2).flushDuration(1000) + .dropActionsOnQueueExhaustion(true) + .droppedActionHandler(droppedActionHandler); + + influxDBLocal.query(new Query("CREATE DATABASE " + dbName)); + influxDBLocal.setDatabase(dbName); + influxDBLocal.enableBatch(options); + + + //write 4 points and the first two will get flushed as well as actions = 2; + writeSomePoints(influxDBLocal, 1, 2); + //wait for the BatchProcessor$write() to flush the queue. + Thread.sleep(200); + + //4 more points, last 2 of these should get dropped + writeSomePoints(influxDBLocal, 3, 6); + verify(droppedActionHandler, times(2)).accept(any()); + + //releasing the latch + countDownLatch.countDown(); + + //wait for the point to get written to influx db. + Thread.sleep(200); + + QueryResult result = influxDBLocal.query(new Query("select * from weather", dbName)); + //assert that 2 points were dropped + Assertions.assertEquals(4, result.getResults().get(0).getSeries().get(0).getValues().size()); + //assert that that last 2 points were dropped. + org.assertj.core.api.Assertions.assertThat(result.getResults().get(0).getSeries().get(0).getValues()).extracting(a -> a.get(2)) + .containsOnly(1.0, 2.0, 3.0, 4.0); + + } + finally { + influxDBLocal.query(new Query("DROP DATABASE " + dbName)); + } + + } + + } + + /** + * Test the implementation of {@link BatchOptions#flushDuration(int)} }. + * @throws InterruptedException + */ + @Test + public void testFlushDuration() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(200); + influxDB.query(new Query("CREATE DATABASE " + dbName)); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); + + //check no points writen to DB before the flush duration + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + //wait for at least one flush + Thread.sleep(500); + result = influxDB.query(new Query("select * from weather", dbName)); + + //check points written already to DB + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + this.influxDB.disableBatch(); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + } + + /** + * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. + * @throws InterruptedException + */ + @Test + public void testJitterDuration() throws Exception { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + // prepare points before start BatchProcessor + List points = prepareSomePoints(0, 19); + BatchOptions options = BatchOptions.DEFAULTS.flushDuration(100).jitterDuration(1000); + influxDB.query(new Query("CREATE DATABASE " + dbName)); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + BatchProcessor batchProcessor = BatchProcessorTest.getPrivateField(influxDB, "batchProcessor"); + // random always return 1.0 to be sure that first query is null + BatchProcessorTest.setPrivateField(batchProcessor, "randomSupplier", (Supplier) () -> 1.0); + points.forEach(influxDB::write); + + Thread.sleep(100); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + //wait for at least one flush + Thread.sleep(1500); + result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + influxDB.disableBatch(); + influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + + } + + /** + * Test the implementation of {@link BatchOptions#jitterDuration(int)} }. + */ + @Test + public void testNegativeJitterDuration() { + + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchOptions options = BatchOptions.DEFAULTS.jitterDuration(-10); + influxDB.enableBatch(options); + influxDB.disableBatch(); + }); + } + + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + * use a bufferLimit that less than actions, then OneShotBatchWrite is used + */ + @Test + public void testBufferLimitLessThanActions() throws InterruptedException { + + TestAnswer answer = new TestAnswer() { + + InfluxDBException influxDBException = InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + @Override + protected void check(InvocationOnMock invocation) { + if ((Boolean) params.get("throwException")) { + throw influxDBException; + } + } + }; + + InfluxDB spy = spy(influxDB); + //the spied influxDB.write(BatchPoints) will always throw InfluxDBException + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + answer.params.put("throwException", true); + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(3).actions(4).flushDuration(100).exceptionHandler(mockHandler); + + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + spy.enableBatch(options); + write20Points(spy); + + Thread.sleep(300); + verify(mockHandler, atLeastOnce()).accept(any(), any()); + + QueryResult result = spy.query(new Query("select * from weather", dbName)); + //assert 0 point written because of InfluxDBException and OneShotBatchWriter did not retry + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + answer.params.put("throwException", false); + write20Points(spy); + Thread.sleep(300); + result = spy.query(new Query("select * from weather", dbName)); + //assert all 20 points written to DB due to no exception + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } + + } + + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + * use a bufferLimit that greater than actions, then RetryCapableBatchWriter is used + */ + @Test + public void testBufferLimitGreaterThanActions() throws InterruptedException { + TestAnswer answer = new TestAnswer() { + + int nthCall = 0; + InfluxDBException cacheMaxMemorySizeExceededException = InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + @Override + protected void check(InvocationOnMock invocation) { + + switch (nthCall++) { + case 0: + throw InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.DATABASE_NOT_FOUND_ERROR)); + case 1: + throw InfluxDBException.buildExceptionForErrorState(createErrorBody(InfluxDBException.CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)); + default: + break; + } + } + }; + + InfluxDB spy = spy(influxDB); + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(10).actions(8).flushDuration(100).exceptionHandler(mockHandler); + + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + spy.enableBatch(options); + writeSomePoints(spy, "measurement1", 0, 5); + + Thread.sleep(300); + verify(mockHandler, atLeastOnce()).accept(any(), any()); + + QueryResult result = spy.query(new Query("select * from measurement1", dbName)); + //assert 0 point written because of non-retry capable DATABASE_NOT_FOUND_ERROR and RetryCapableBatchWriter did not retry + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + + writeSomePoints(spy, "measurement2", 0, 5); + + Thread.sleep(300); + + result = spy.query(new Query("select * from measurement2", dbName)); + //assert all 6 point written because of retry capable CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR and RetryCapableBatchWriter did retry + Assertions.assertEquals(6, result.getResults().get(0).getSeries().get(0).getValues().size()); + } + finally { + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } + + } + /** + * Test the implementation of {@link BatchOptions#bufferLimit(int)} }. + */ + @Test + public void testNegativeBufferLimit() { + + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchOptions options = BatchOptions.DEFAULTS.bufferLimit(-10); + influxDB.enableBatch(options); + influxDB.disableBatch(); + }); + } + + /** + * Test the implementation of {@link BatchOptions#threadFactory(ThreadFactory)} }. + * @throws InterruptedException + */ + @Test + public void testThreadFactory() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + ThreadFactory spy = spy(new ThreadFactory() { + + ThreadFactory threadFactory = Executors.defaultThreadFactory(); + @Override + public Thread newThread(Runnable r) { + return threadFactory.newThread(r); + } + }); + BatchOptions options = BatchOptions.DEFAULTS.threadFactory(spy).flushDuration(100); + + influxDB.query(new Query("CREATE DATABASE " + dbName)); + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + write20Points(influxDB); + + Thread.sleep(500); + //Test the thread factory is used somewhere + verify(spy, atLeastOnce()).newThread(any()); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + } finally { + this.influxDB.disableBatch(); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + } + + /** + * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. + * @throws InterruptedException + */ + @Test + public void testHandlerOnRetryImpossible() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); + + influxDB.setDatabase(dbName); + influxDB.enableBatch(options); + + writeSomePoints(influxDB, 1); + + verify(mockHandler, timeout(500).times(1)).accept(any(), any()); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + } finally { + influxDB.disableBatch(); + } + + } + + /** + * Test the implementation of {@link BatchOptions#exceptionHandler(BiConsumer)} }. + * @throws InterruptedException + */ + @Test + public void testHandlerOnRetryPossible() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + InfluxDB spy = spy(influxDB); + doAnswer(new Answer() { + boolean firstCall = true; + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + if (firstCall) { + firstCall = false; + throw new InfluxDBException("error"); + } else { + return invocation.callRealMethod(); + } + } + }).when(spy).write(any(BatchPoints.class)); + + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); + + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + spy.enableBatch(options); + + writeSomePoints(spy, 1); + + Thread.sleep(500); + verify(mockHandler, never()).accept(any(), any()); + + verify(spy, times(2)).write(any(BatchPoints.class)); + + QueryResult result = influxDB.query(new Query("select * from weather", dbName)); + Assertions.assertNotNull(result.getResults().get(0).getSeries()); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + + } finally { + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } + + } + + /** + * Test the implementation of {@link BatchOptions#consistency(InfluxDB.ConsistencyLevel)} }. + * @throws InterruptedException + */ + @Test + public void testConsistency() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + + InfluxDB spy = spy(influxDB); + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + try { + TestAnswer answer = new TestAnswer() { + @Override + protected void check(InvocationOnMock invocation) { + BatchPoints batchPoints = (BatchPoints) invocation.getArgument(0); + Assertions.assertEquals(params.get("consistencyLevel"), batchPoints.getConsistency()); + + } + }; + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + + int n = 0; + for (final ConsistencyLevel consistencyLevel : ConsistencyLevel.values()) { + answer.params.put("consistencyLevel", consistencyLevel); + BatchOptions options = BatchOptions.DEFAULTS.consistency(consistencyLevel).flushDuration(100); + spy.enableBatch(options); + Assertions.assertEquals(options.getConsistency(), consistencyLevel); + + writeSomePoints(spy, n, n + 4); + n += 5; + Thread.sleep(300); + + verify(spy, atLeastOnce()).write(any(BatchPoints.class)); + QueryResult result = spy.query(new Query("select * from weather", dbName)); + Assertions.assertEquals(n, result.getResults().get(0).getSeries().get(0).getValues().size()); + + + spy.disableBatch(); + } + + } finally { + spy.query(new Query("DROP DATABASE " + dbName)); + } + } + + + @Test + public void testWriteWithRetryOnRecoverableError() throws InterruptedException { + String dbName = "write_unittest_" + System.currentTimeMillis(); + InfluxDB spy = spy(influxDB); + doAnswer(new Answer() { + boolean firstCall = true; + + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + if (firstCall) { + firstCall = false; + throw new InfluxDBException("error"); + } else { + return invocation.callRealMethod(); + } + } + }).when(spy).write(any(BatchPoints.class)); + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); + + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + spy.enableBatch(options); + + BatchPoints batchPoints = createBatchPoints(dbName, "m0", 200); + spy.writeWithRetry(batchPoints); + Thread.sleep(500); + verify(mockHandler, never()).accept(any(), any()); + + verify(spy, times(2)).write(any(BatchPoints.class)); + + Thread.sleep(1_500); + QueryResult result = influxDB.query(new Query("select * from m0", dbName)); + Assertions.assertNotNull(result.getResults().get(0).getSeries()); + Assertions.assertEquals(200, result.getResults().get(0).getSeries().get(0).getValues().size()); + + } finally { + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } + } + + @Test + public void testWriteWithRetryOnUnrecoverableError() throws InterruptedException { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + InfluxDB spy = spy((InfluxDB) influxDB); + doThrow(DatabaseNotFoundException.class).when(spy).write(any(BatchPoints.class)); + + try { + BiConsumer, Throwable> mockHandler = mock(BiConsumer.class); + BatchOptions options = BatchOptions.DEFAULTS.exceptionHandler(mockHandler).flushDuration(100); + + spy.query(new Query("CREATE DATABASE " + dbName)); + spy.setDatabase(dbName); + spy.enableBatch(options); + + BatchPoints batchPoints = createBatchPoints(dbName, "m0", 200); + spy.writeWithRetry(batchPoints); + Thread.sleep(500); + + verify(mockHandler, times(1)).accept(any(), any()); + + QueryResult result = influxDB.query(new Query("select * from m0", dbName)); + Assertions.assertNull(result.getResults().get(0).getSeries()); + Assertions.assertNull(result.getResults().get(0).getError()); + } finally { + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } + + } + + @Test + public void testWriteWithRetryOnBatchingNotEnabled() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + try { + + influxDB.query(new Query("CREATE DATABASE " + dbName)); + influxDB.setDatabase(dbName); + + BatchPoints batchPoints = createBatchPoints(dbName, "m0", 200); + influxDB.writeWithRetry(batchPoints); + + QueryResult result = influxDB.query(new Query("select * from m0", dbName)); + Assertions.assertNotNull(result.getResults().get(0).getSeries()); + Assertions.assertEquals(200, result.getResults().get(0).getSeries().get(0).getValues().size()); + } finally { + influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + } + void writeSomePoints(InfluxDB influxDB, String measurement, int firstIndex, int lastIndex) { + for (int i = firstIndex; i <= lastIndex; i++) { + Point point = Point.measurement(measurement) + .time(i,TimeUnit.HOURS) + .addField("field1", (double) i) + .addField("field2", (double) (i) * 1.1) + .addField("field3", "moderate").build(); + influxDB.write(point); + } + } + + void writeSomePoints(InfluxDB influxDB, int firstIndex, int lastIndex) { + prepareSomePoints(firstIndex, lastIndex).forEach(influxDB::write); + } + + void write20Points(InfluxDB influxDB) { + writeSomePoints(influxDB, 0, 19); + } + + void writeSomePoints(InfluxDB influxDB, int n) { + writeSomePoints(influxDB, 0, n - 1); + } + + List prepareSomePoints(int firstIndex, int lastIndex) { + List points = new ArrayList<>(); + for (int i = firstIndex; i <= lastIndex; i++) { + Point point = Point.measurement("weather") + .time(i, TimeUnit.HOURS) + .addField("temperature", (double) i) + .addField("humidity", (double) (i) * 1.1) + .addField("uv_index", "moderate").build(); + points.add(point); + } + return points; + } + + private BatchPoints createBatchPoints(String dbName, String measurement, int n) { + BatchPoints batchPoints = BatchPoints.database(dbName).build(); + for (int i = 1; i <= n; i++) { + Point point = Point.measurement(measurement) + .time(i,TimeUnit.MILLISECONDS) + .addField("f1", (double) i) + .addField("f2", (double) (i) * 1.1) + .addField("f3", "f_v3").build(); + batchPoints.point(point); + } + + return batchPoints; + } + + static String createErrorBody(String errorMessage) { + return MessageFormat.format("'{' \"error\": \"{0}\" '}'", errorMessage); + } +} diff --git a/src/test/java/org/influxdb/InfluxDB2Test.java b/src/test/java/org/influxdb/InfluxDB2Test.java new file mode 100644 index 000000000..0118d0318 --- /dev/null +++ b/src/test/java/org/influxdb/InfluxDB2Test.java @@ -0,0 +1,57 @@ +package org.influxdb; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import org.influxdb.dto.Query; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * @author Jakub Bednar (30/08/2021 11:31) + */ +@RunWith(JUnitPlatform.class) +@EnabledIfEnvironmentVariable(named = "INFLUXDB_VERSION", matches = "2\\..") +public class InfluxDB2Test { + + private InfluxDB influxDB; + + @BeforeEach + public void setUp() throws NoSuchFieldException, IllegalAccessException { + String url = String.format("http://%s:%s", TestUtils.getInfluxIP(), TestUtils.getInfluxPORT(true)); + influxDB = InfluxDBFactory + .connect(url, "my-user", "my-password") + .setDatabase("mydb") + .setRetentionPolicy("autogen"); + } + + @AfterEach + public void cleanup() { + influxDB.close(); + } + + @Test + public void testQuery() throws InterruptedException { + + String measurement = TestUtils.getRandomMeasurement(); + + // prepare data + List records = new ArrayList<>(); + records.add(measurement + ",test=a value=1 1"); + records.add(measurement + ",test=a value=2 2"); + influxDB.write(records); + + // query data + final CountDownLatch countDownLatch = new CountDownLatch(1); + influxDB.query(new Query("SELECT * FROM " + measurement), 2, queryResult -> countDownLatch.countDown()); + + Assertions.assertTrue(countDownLatch.await(2, TimeUnit.SECONDS)); + } +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/InfluxDBExceptionTest.java b/src/test/java/org/influxdb/InfluxDBExceptionTest.java new file mode 100644 index 000000000..2ae179b7d --- /dev/null +++ b/src/test/java/org/influxdb/InfluxDBExceptionTest.java @@ -0,0 +1,30 @@ +package org.influxdb; + +import org.influxdb.InfluxDBException.DatabaseNotFoundException; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * Test cases for InfluxDBException + * + * @author hoan.le [at] bonitoo.io + * + */ + +@RunWith(JUnitPlatform.class) +public class InfluxDBExceptionTest { + + @Test + public void testBuildExceptionForMessagePackErrorState() { + DatabaseNotFoundException dbex = (DatabaseNotFoundException) InfluxDBException + .buildExceptionForErrorState(InfluxDBExceptionTest.class.getResourceAsStream("msgpack_errorBody.bin")); + + Assertions.assertEquals("database not found: \"abc\"", dbex.getMessage()); + + InfluxDBException ex = InfluxDBException.buildExceptionForErrorState(InfluxDBExceptionTest.class.getResourceAsStream("invalid_msgpack_errorBody.bin")); + Assertions.assertTrue(ex.getCause() instanceof ClassCastException); + + } +} diff --git a/src/test/java/org/influxdb/InfluxDBFactoryTest.java b/src/test/java/org/influxdb/InfluxDBFactoryTest.java index e2a930aa6..74ebfab70 100644 --- a/src/test/java/org/influxdb/InfluxDBFactoryTest.java +++ b/src/test/java/org/influxdb/InfluxDBFactoryTest.java @@ -1,41 +1,57 @@ -package org.influxdb; - -import org.influxdb.dto.Pong; -import org.junit.Assert; -import org.junit.Test; - -import okhttp3.OkHttpClient; - -/** - * Test the InfluxDB Factory API. - * - * @author fujian1115 [at] gmail.com - * - */ -public class InfluxDBFactoryTest { - - /** - * Test for a {@link InfluxDBFactory #connect(String)}. - */ - @Test - public void testCreateInfluxDBInstanceWithoutUserNameAndPassword() { - InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true)); - verifyInfluxDBInstance(influxDB); - } - - private void verifyInfluxDBInstance(InfluxDB influxDB) { - Assert.assertNotNull(influxDB); - Pong pong = influxDB.ping(); - Assert.assertNotNull(pong); - Assert.assertNotEquals(pong.getVersion(), "unknown"); - } - - /** - * Test for a {@link InfluxDBFactory #connect(String, okhttp3.OkHttpClient.Builder)}. - */ - @Test - public void testCreateInfluxDBInstanceWithClientAndWithoutUserNameAndPassword() { - InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), new OkHttpClient.Builder()); - verifyInfluxDBInstance(influxDB); - } -} +package org.influxdb; + +import org.influxdb.dto.Pong; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import okhttp3.OkHttpClient; + +/** + * Test the InfluxDB Factory API. + * + * @author fujian1115 [at] gmail.com + * + */ +@RunWith(JUnitPlatform.class) +public class InfluxDBFactoryTest { + + /** + * Test for a {@link InfluxDBFactory #connect(String)}. + */ + @Test + public void testShouldNotUseBasicAuthWhenCreateInfluxDBInstanceWithoutUserNameAndPassword() { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true)); + verifyInfluxDBInstance(influxDB); + } + + @Test + public void testShouldNotUseBasicAuthWhenCreateInfluxDBInstanceWithUserNameAndWithoutPassword() { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", null); + verifyInfluxDBInstance(influxDB); + } + + private void verifyInfluxDBInstance(InfluxDB influxDB) { + Assertions.assertNotNull(influxDB); + Pong pong = influxDB.ping(); + Assertions.assertNotNull(pong); + Assertions.assertNotEquals(pong.getVersion(), "unknown"); + } + + /** + * Test for a {@link InfluxDBFactory #connect(String, okhttp3.OkHttpClient.Builder)}. + */ + @Test + public void testCreateInfluxDBInstanceWithClientAndWithoutUserNameAndPassword() { + InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), new OkHttpClient.Builder()); + verifyInfluxDBInstance(influxDB); + } + + @Test + public void testShouldThrowIllegalArgumentWithInvalidUrl() { + Assertions.assertThrows(IllegalArgumentException.class,() -> { + InfluxDBFactory.connect("invalidUrl"); + }); + } +} diff --git a/src/test/java/org/influxdb/InfluxDBProxyTest.java b/src/test/java/org/influxdb/InfluxDBProxyTest.java new file mode 100644 index 000000000..c54dab53a --- /dev/null +++ b/src/test/java/org/influxdb/InfluxDBProxyTest.java @@ -0,0 +1,84 @@ +package org.influxdb; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +/** + * Test the InfluxDB API. + * + * @author hoan.le [at] bonitoo.io + * + */ +@RunWith(JUnitPlatform.class) +public class InfluxDBProxyTest { + private InfluxDB influxDB; + private static final String TEST_DB = "InfluxDBProxyTest_db"; + private static final String UDP_DB = "udp"; + + @BeforeEach + public void setUp() throws InterruptedException, IOException { + influxDB = TestUtils.connectToInfluxDB(TestUtils.getProxyApiUrl()); + } + + /** + * delete database after all tests end. + */ + @AfterEach + public void cleanup(){ + influxDB.close(); + } + + @Test + public void testWriteSomePointThroughTcpProxy() { + influxDB.query(new Query("CREATE DATABASE " + TEST_DB));; + influxDB.setDatabase(TEST_DB); + + for(int i = 0; i < 20; i++) { + Point point = Point.measurement("weather") + .time(i,TimeUnit.HOURS) + .addField("temperature", (double) i) + .addField("humidity", (double) (i) * 1.1) + .addField("uv_index", "moderate").build(); + influxDB.write(point); + } + + QueryResult result = influxDB.query(new Query("select * from weather", TEST_DB)); + //check points written already to DB + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + + influxDB.deleteDatabase(TEST_DB); + } + + @Test + public void testWriteSomePointThroughUdpProxy() throws InterruptedException { + influxDB.query(new Query("CREATE DATABASE " + UDP_DB)); + influxDB.setDatabase(UDP_DB); + + int proxyUdpPort = Integer.parseInt(TestUtils.getProxyUdpPort()); + for(int i = 0; i < 20; i++) { + Point point = Point.measurement("weather") + .time(i,TimeUnit.HOURS) + .addField("temperature", (double) i) + .addField("humidity", (double) (i) * 1.1) + .addField("uv_index", "moderate").build(); + influxDB.write(proxyUdpPort, point); + } + + Thread.sleep(2000); + QueryResult result = influxDB.query(new Query("select * from weather", UDP_DB)); + //check points written already to DB + Assertions.assertEquals(20, result.getResults().get(0).getSeries().get(0).getValues().size()); + + influxDB.deleteDatabase(UDP_DB); + } + +} diff --git a/src/test/java/org/influxdb/InfluxDBTest.java b/src/test/java/org/influxdb/InfluxDBTest.java index 116f42552..2598b9f23 100644 --- a/src/test/java/org/influxdb/InfluxDBTest.java +++ b/src/test/java/org/influxdb/InfluxDBTest.java @@ -1,31 +1,47 @@ package org.influxdb; +import okhttp3.OkHttpClient; +import org.influxdb.InfluxDB.LogLevel; +import org.influxdb.InfluxDB.ResponseFormat; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.BoundParameterQuery.QueryBuilder; +import org.influxdb.dto.Point; +import org.influxdb.dto.Pong; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.influxdb.dto.QueryResult.Series; +import org.influxdb.impl.InfluxDBImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + import java.io.IOException; +import java.net.ConnectException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.LongAdder; import java.util.function.Consumer; +import java.util.regex.Pattern; -import org.influxdb.InfluxDB.LogLevel; -import org.influxdb.dto.BatchPoints; -import org.influxdb.dto.Point; -import org.influxdb.dto.Pong; -import org.influxdb.dto.Query; -import org.influxdb.dto.QueryResult; -import org.influxdb.impl.InfluxDBImpl; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import com.google.common.util.concurrent.Uninterruptibles; +import static org.assertj.core.api.Assertions.assertThat; /** * Test the InfluxDB API. @@ -33,49 +49,31 @@ * @author stefan.majer [at] gmail.com * */ +@RunWith(JUnitPlatform.class) public class InfluxDBTest { - private InfluxDB influxDB; + InfluxDB influxDB; private final static int UDP_PORT = 8089; - private final static String UDP_DATABASE = "udp"; + final static String UDP_DATABASE = "udp"; - @Rule public final ExpectedException exception = ExpectedException.none(); /** * Create a influxDB connection before all tests start. * * @throws InterruptedException * @throws IOException */ - @Before + @BeforeEach public void setUp() throws InterruptedException, IOException { - this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); - boolean influxDBstarted = false; - do { - Pong response; - try { - response = this.influxDB.ping(); - if (!response.getVersion().equalsIgnoreCase("unknown")) { - influxDBstarted = true; - } - } catch (Exception e) { - // NOOP intentional - e.printStackTrace(); - } - Thread.sleep(100L); - } while (!influxDBstarted); - this.influxDB.setLogLevel(LogLevel.NONE); - this.influxDB.createDatabase(UDP_DATABASE); - System.out.println("################################################################################## "); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); - System.out.println("##################################################################################"); - } - + this.influxDB = TestUtils.connectToInfluxDB(); + this.influxDB.query(new Query("CREATE DATABASE " + UDP_DATABASE)); + } + /** * delete UDP database after all tests end. */ - //@After - public void clearup(){ - this.influxDB.deleteDatabase(UDP_DATABASE); + @AfterEach + public void cleanup(){ + this.influxDB.query(new Query("DROP DATABASE " + UDP_DATABASE)); } /** @@ -84,8 +82,8 @@ public void clearup(){ @Test public void testPing() { Pong result = this.influxDB.ping(); - Assert.assertNotNull(result); - Assert.assertNotEquals(result.getVersion(), "unknown"); + Assertions.assertNotNull(result); + Assertions.assertNotEquals(result.getVersion(), "unknown"); } /** @@ -94,8 +92,8 @@ public void testPing() { @Test public void testVersion() { String version = this.influxDB.version(); - Assert.assertNotNull(version); - Assert.assertFalse(version.contains("unknown")); + Assertions.assertNotNull(version); + Assertions.assertFalse(version.contains("unknown")); } /** @@ -107,17 +105,131 @@ public void testQuery() { this.influxDB.query(new Query("DROP DATABASE mydb2", "mydb")); } + /** + * Simple Test for a query. + */ + @Test + public void testQueryWithoutDatabase() { + influxDB.setDatabase(UDP_DATABASE); + influxDB.query(new Query("CREATE DATABASE mydb2")); + Point point = Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build(); + influxDB.write(point); + + Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE atag = $atag") + .bind("atag", "test") + .create(); + QueryResult result = influxDB.query(query); + Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); + Series series = result.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(series.getValues().size() == 1); + + influxDB.query(new Query("DROP DATABASE mydb2")); + } + + @Test + public void testBoundParameterQuery() throws InterruptedException { + // set up + Point point = Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build(); + this.influxDB.setDatabase(UDP_DATABASE); + this.influxDB.write(point); + + // test + Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE atag = $atag") + .forDatabase(UDP_DATABASE) + .bind("atag", "test") + .create(); + QueryResult result = this.influxDB.query(query); + Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); + Series series = result.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(series.getValues().size() == 1); + + result = this.influxDB.query(query, TimeUnit.SECONDS); + Assertions.assertTrue(result.getResults().get(0).getSeries().size() == 1); + series = result.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(series.getValues().size() == 1); + + Object waitForTestresults = new Object(); + Consumer check = (queryResult) -> { + if (!"DONE".equals(queryResult.getError())) { + Assertions.assertTrue(queryResult.getResults().get(0).getSeries().size() == 1); + Series s = queryResult.getResults().get(0).getSeries().get(0); + Assertions.assertTrue(s.getValues().size() == 1); + synchronized (waitForTestresults) { + waitForTestresults.notifyAll(); + } + } + }; + this.influxDB.query(query, 10, check); + synchronized (waitForTestresults) { + waitForTestresults.wait(2000); + } + } + + /** + * Tests for callback query. + */ + @Test + public void testCallbackQuery() throws Throwable { + final AsyncResult result = new AsyncResult<>(); + final Consumer firstQueryConsumer = new Consumer() { + @Override + public void accept(QueryResult queryResult) { + influxDB.query(new Query("DROP DATABASE mydb2", "mydb"), result.resultConsumer, result.errorConsumer); + } + }; + + this.influxDB.query(new Query("CREATE DATABASE mydb2", "mydb"), firstQueryConsumer, result.errorConsumer); + + // Will throw exception in case of error. + result.result(); + } + + /** + * Tests for callback query with a failure. + * see Issue #602 + */ + @Test + public void testCallbackQueryFailureHandling() throws Throwable { + final AsyncResult res = new AsyncResult<>(); + + this.influxDB.query(new Query("SHOW SERRIES"), res.resultConsumer, res.errorConsumer); + + try{ + res.result(); + Assertions.fail("Malformed query should throw InfluxDBException"); + } + catch (InfluxDBException e){ + Pattern errorPattern = Pattern.compile("Bad Request.*error parsing query: found SERRIES, expected.*", + Pattern.DOTALL); + + Assertions.assertTrue(errorPattern.matcher(e.getMessage()).matches(), + "Error string \"" + e.getMessage() + "\" does not match error pattern"); + } + } + /** * Test that describe Databases works. */ @Test public void testDescribeDatabases() { String dbName = "unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); this.influxDB.describeDatabases(); List result = this.influxDB.describeDatabases(); - Assert.assertNotNull(result); - Assert.assertTrue(result.size() > 0); + Assertions.assertNotNull(result); + Assertions.assertTrue(result.size() > 0); boolean found = false; for (String database : result) { if (database.equals(dbName)) { @@ -126,10 +238,10 @@ public void testDescribeDatabases() { } } - Assert.assertTrue("It is expected that describeDataBases contents the newly create database.", found); - this.influxDB.deleteDatabase(dbName); + Assertions.assertTrue(found, "It is expected that describeDataBases contents the newly create database."); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - + /** * Test that Database exists works. */ @@ -137,12 +249,12 @@ public void testDescribeDatabases() { public void testDatabaseExists() { String existentdbName = "unittest_1"; String notExistentdbName = "unittest_2"; - this.influxDB.createDatabase(existentdbName); + this.influxDB.query(new Query("CREATE DATABASE " + existentdbName)); boolean checkDbExistence = this.influxDB.databaseExists(existentdbName); - Assert.assertTrue("It is expected that databaseExists return true for " + existentdbName + " database", checkDbExistence); + Assertions.assertTrue(checkDbExistence, "It is expected that databaseExists return true for " + existentdbName + " database"); checkDbExistence = this.influxDB.databaseExists(notExistentdbName); - Assert.assertFalse("It is expected that databaseExists return false for " + notExistentdbName + " database", checkDbExistence); - this.influxDB.deleteDatabase(existentdbName); + Assertions.assertFalse(checkDbExistence, "It is expected that databaseExists return false for " + notExistentdbName + " database"); + this.influxDB.query(new Query("DROP DATABASE " + existentdbName)); } /** @@ -151,7 +263,7 @@ public void testDatabaseExists() { @Test public void testWrite() { String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); BatchPoints batchPoints = BatchPoints.database(dbName).tag("async", "true").retentionPolicy(rp).build(); Point point1 = Point @@ -167,58 +279,208 @@ public void testWrite() { this.influxDB.write(batchPoints); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); - this.influxDB.deleteDatabase(dbName); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - + + /** + * Test that writing to the new lineprotocol, using {@link InfluxDB#setDatabase(String)} and not + * {@link BatchPoints#database(String)}. + */ + @Test + public void testWriteNoDatabase() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.builder().tag("async", "true").retentionPolicy(rp).build(); + Point point1 = Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build(); + Point point2 = Point.measurement("disk").tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + this.influxDB.write(batchPoints); + Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); + QueryResult result = this.influxDB.query(query); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + /** - * Test the implementation of {@link InfluxDB#write(int, Point)}'s sync support. + * Tests that database information is used from {@link InfluxDB} when database information + * is not present in query. */ - @Test - public void testSyncWritePointThroughUDP() { - this.influxDB.disableBatch(); - String measurement = TestUtils.getRandomMeasurement(); - Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); - this.influxDB.write(UDP_PORT, point); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + @Test + public void testQueryWithNoDatabase() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); // Set db here, then after write query should pass. + this.influxDB.write(Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build()); + + Query query = new Query("SELECT * FROM cpu GROUP BY *"); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertEquals( + result.getResults().get(0).getSeries().get(0).getTags(), + Collections.singletonMap("atag", "test") + ); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - + /** - * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. + * Tests that database information is used from {@link InfluxDB} when database information + * is not present in query. */ @Test - public void testAsyncWritePointThroughUDP() { - this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); - try{ - Assert.assertTrue(this.influxDB.isBatchEnabled()); - String measurement = TestUtils.getRandomMeasurement(); - Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); - this.influxDB.write(UDP_PORT, point); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); - QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); - }finally{ - this.influxDB.disableBatch(); + public void testQueryWithNoDatabaseWithConsumer() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); // Set db here, then after write query should pass. + this.influxDB.write(Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .build()); + + Query query = new Query("SELECT * FROM cpu GROUP BY *"); + this.influxDB.query(query, + queryResult -> + Assertions.assertEquals( + queryResult.getResults().get(0).getSeries().get(0).getTags(), + Collections.singletonMap("atag", "test") + ) + , + throwable -> Assertions.fail() + ); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + + /** + * Tests that database information is used from {@link InfluxDB} when database information + * is not present in query. + */ + // Note: this test is copied from InfluxDBTest#testChunking but changed so that database + // information is present in client not query. Combined both tests test situations with + // and without database information present in query, hence no need for additional test + // for situation where database info is not set in the client. + public void testQueryNoDatabaseWithChunking() throws Exception { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); // Set database -> no need to use it as query parameter. + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + Thread.sleep(2000); + final BlockingQueue queue = new LinkedBlockingQueue<>(); + Query query = new Query("SELECT * FROM disk"); + this.influxDB.query(query, 2, queue::add); + + Thread.sleep(2000); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + + QueryResult result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals("DONE", result.getError()); + } + + /** + * Tests that database information is used from {@link InfluxDB} when database information + * is not present in query and when different time format is requested from db. + */ + @Test + public void testQueryNoDatabaseWithTimeFormat() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + long time = 1559027876L; + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.setDatabase(dbName); // Set db here, then after write query should pass. + this.influxDB.write(Point + .measurement("cpu") + .tag("atag", "test") + .addField("idle", 90L) + .addField("usertime", 9L) + .addField("system", 1L) + .time(time, TimeUnit.MILLISECONDS) // Set time. + .build()); + + Query query = new Query("SELECT * FROM cpu GROUP BY *"); + + // Test milliseconds + QueryResult result = this.influxDB.query(query, TimeUnit.MILLISECONDS); + Series series = result.getResults().get(0).getSeries().get(0); + Assertions.assertEquals( + ((Number)series.getValues().get(0).get(series.getColumns().indexOf("time"))).longValue() , + time + ); + + // Test nanoseconds + result = this.influxDB.query(query, TimeUnit.NANOSECONDS); + series = result.getResults().get(0).getSeries().get(0); + Assertions.assertEquals( + ((Number)series.getValues().get(0).get(series.getColumns().indexOf("time"))).longValue(), + TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS) + ); + + // Test seconds + result = this.influxDB.query(query, TimeUnit.SECONDS); + series = result.getResults().get(0).getSeries().get(0); + Assertions.assertEquals( + ((Number)series.getValues().get(0).get(series.getColumns().indexOf("time"))).longValue(), + TimeUnit.SECONDS.convert(time, TimeUnit.MILLISECONDS) + + ); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - - + /** * Test the implementation of {@link InfluxDB#write(int, Point)}'s async support. */ - @Test(expected = RuntimeException.class) + @Test public void testAsyncWritePointThroughUDPFail() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ - Assert.assertTrue(this.influxDB.isBatchEnabled()); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); String measurement = TestUtils.getRandomMeasurement(); Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); Thread.currentThread().interrupt(); - this.influxDB.write(UDP_PORT, point); + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, point); + }); }finally{ this.influxDB.disableBatch(); } @@ -230,68 +492,31 @@ public void testAsyncWritePointThroughUDPFail() { @Test public void testWriteStringData() { String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, "cpu,atag=test idle=90,usertime=9,system=1"); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); - this.influxDB.deleteDatabase(dbName); - } - - /** - * Test writing to the database using string protocol through UDP. - */ - @Test - public void testWriteStringDataThroughUDP() { - String measurement = TestUtils.getRandomMeasurement(); - this.influxDB.write(UDP_PORT, measurement + ",atag=test idle=90,usertime=9,system=1"); - //write with UDP may be executed on server after query with HTTP. so sleep 2s to handle this case - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); - QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - /** - * Test writing multiple records to the database using string protocol through UDP. - */ - @Test - public void testWriteMultipleStringDataThroughUDP() { - String measurement = TestUtils.getRandomMeasurement(); - this.influxDB.write(UDP_PORT, measurement + ",atag=test1 idle=100,usertime=10,system=1\n" + - measurement + ",atag=test2 idle=200,usertime=20,system=2\n" + - measurement + ",atag=test3 idle=300,usertime=30,system=3"); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); - QueryResult result = this.influxDB.query(query); - - Assert.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); - } - - /** - * Test writing multiple separate records to the database using string protocol through UDP. - */ - @Test - public void testWriteMultipleStringDataLinesThroughUDP() { - String measurement = TestUtils.getRandomMeasurement(); - this.influxDB.write(UDP_PORT, Arrays.asList( - measurement + ",atag=test1 idle=100,usertime=10,system=1", - measurement + ",atag=test2 idle=200,usertime=20,system=2", - measurement + ",atag=test3 idle=300,usertime=30,system=3" - )); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); - QueryResult result = this.influxDB.query(query); - - Assert.assertEquals(3, result.getResults().get(0).getSeries().size()); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); - } + /** + * Test writing to the database using string protocol with simpler interface. + */ + @Test + public void testWriteStringDataSimple() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + this.influxDB.setDatabase(dbName); + this.influxDB.setRetentionPolicy(rp); + this.influxDB.write("cpu,atag=test idle=90,usertime=9,system=1"); + Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); + QueryResult result = this.influxDB.query(query); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } /** * When batch of points' size is over UDP limit, the expected exception @@ -299,8 +524,8 @@ public void testWriteMultipleStringDataLinesThroughUDP() { * The message is larger than the maximum supported by the underlying transport: Datagram send failed * @throws Exception */ - @Test(expected = RuntimeException.class) - public void writeMultipleStringDataLinesOverUDPLimit() throws Exception { + @Test + public void testWriteMultipleStringDataLinesOverUDPLimit() throws Exception { //prepare data List lineProtocols = new ArrayList(); int i = 0; @@ -315,7 +540,9 @@ public void writeMultipleStringDataLinesOverUDPLimit() throws Exception { } } //write batch of string which size is over 64K - this.influxDB.write(UDP_PORT, lineProtocols); + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, lineProtocols); + }); } /** @@ -324,27 +551,49 @@ public void writeMultipleStringDataLinesOverUDPLimit() throws Exception { @Test public void testWriteMultipleStringData() { String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, "cpu,atag=test1 idle=100,usertime=10,system=1\ncpu,atag=test2 idle=200,usertime=20,system=2\ncpu,atag=test3 idle=300,usertime=30,system=3"); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); - this.influxDB.deleteDatabase(dbName); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } + /** + * Test writing multiple records to the database using string protocol with simpler interface. + */ + @Test + public void testWriteMultipleStringDataSimple() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + this.influxDB.setDatabase(dbName); + this.influxDB.setRetentionPolicy(rp); + + this.influxDB.write("cpu,atag=test1 idle=100,usertime=10,system=1\ncpu,atag=test2 idle=200,usertime=20,system=2\ncpu,atag=test3 idle=300,usertime=30,system=3"); + Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); + QueryResult result = this.influxDB.query(query); + + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + /** * Test writing multiple separate records to the database using string protocol. */ @Test public void testWriteMultipleStringDataLines() { String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, Arrays.asList( @@ -355,33 +604,221 @@ public void testWriteMultipleStringDataLines() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); - this.influxDB.deleteDatabase(dbName); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } + /** + * Tests writing points using the time precision feature + * @throws Exception + */ + @Test + public void testWriteBatchWithPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + + // GIVEN a batch of points using second precision + DateTimeFormatter formatter = DateTimeFormatter + .ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'") + .withZone(ZoneId.of("UTC")); + int t1 = 1485273600; + Point p1 = Point + .measurement(measurement) + .addField("foo", 1d) + .tag("device", "one") + .time(t1, TimeUnit.SECONDS).build(); // 2017-01-27T16:00:00 + String timeP1 = formatter.format(Instant.ofEpochSecond(t1)); + + int t2 = 1485277200; + Point p2 = Point + .measurement(measurement) + .addField("foo", 2d) + .tag("device", "two") + .time(t2, TimeUnit.SECONDS).build(); // 2017-01-27T17:00:00 + String timeP2 = formatter.format(Instant.ofEpochSecond(t2)); + + int t3 = 1485280800; + Point p3 = Point + .measurement(measurement) + .addField("foo", 3d) + .tag("device", "three") + .time(t3, TimeUnit.SECONDS).build(); // 2017-01-27T18:00:00 + String timeP3 = formatter.format(Instant.ofEpochSecond(t3)); + + BatchPoints batchPoints = BatchPoints + .database(dbName) + .retentionPolicy(rp) + .precision(TimeUnit.SECONDS) + .points(p1, p2, p3) + .build(); + + // WHEN I write the batch + this.influxDB.write(batchPoints); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName)); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0), timeP1); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0), timeP2); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0), timeP3); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Test + public void testWriteBatchWithoutPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + + // GIVEN a batch of points that has no specific precision + long t1 = 1485273600000000100L; + Point p1 = Point + .measurement(measurement) + .addField("foo", 1d) + .tag("device", "one") + .time(t1, TimeUnit.NANOSECONDS).build(); // 2017-01-27T16:00:00.000000100Z + Double timeP1 = Double.valueOf(t1); + + long t2 = 1485277200000000200L; + Point p2 = Point + .measurement(measurement) + .addField("foo", 2d) + .tag("device", "two") + .time(t2, TimeUnit.NANOSECONDS).build(); // 2017-01-27T17:00:00.000000200Z + Double timeP2 = Double.valueOf(t2); + + long t3 = 1485280800000000300L; + Point p3 = Point + .measurement(measurement) + .addField("foo", 3d) + .tag("device", "three") + .time(t3, TimeUnit.NANOSECONDS).build(); // 2017-01-27T18:00:00.000000300Z + Double timeP3 = Double.valueOf(t3); + + BatchPoints batchPoints = BatchPoints + .database(dbName) + .retentionPolicy(rp) + .points(p1, p2, p3) + .build(); + + // WHEN I write the batch + this.influxDB.write(batchPoints); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName), TimeUnit.NANOSECONDS); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0), timeP1); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0), timeP2); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0), timeP3); + + // WHEN I use the post query + queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName, true), TimeUnit.NANOSECONDS); + + // THEN result will be same + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0), timeP1); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0), timeP2); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0), timeP3); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Test + public void testWriteRecordsWithPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + + // GIVEN a set of records using second precision + DateTimeFormatter formatter = DateTimeFormatter + .ofPattern("yyyy-MM-dd'T'HH:mm:ss'Z'") + .withZone(ZoneId.of("UTC")); + List records = new ArrayList<>(); + records.add(measurement + ",atag=test1 idle=100,usertime=10,system=1 1485273600"); + String timeP1 = formatter.format(Instant.ofEpochSecond(1485273600)); + + records.add(measurement + ",atag=test2 idle=200,usertime=20,system=2 1485277200"); + String timeP2 = formatter.format(Instant.ofEpochSecond(1485277200)); + + records.add(measurement + ",atag=test3 idle=300,usertime=30,system=3 1485280800"); + String timeP3 = formatter.format(Instant.ofEpochSecond(1485280800)); + + // WHEN I write the batch + this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, TimeUnit.SECONDS, records); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName)); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0), timeP1); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0), timeP2); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0), timeP3); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + /** + * Test writing multiple separate records to the database using string protocol with simpler interface. + */ + @Test + public void testWriteMultipleStringDataLinesSimple() { + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + this.influxDB.setDatabase(dbName); + this.influxDB.setRetentionPolicy(rp); + + this.influxDB.write(Arrays.asList( + "cpu,atag=test1 idle=100,usertime=10,system=1", + "cpu,atag=test2 idle=200,usertime=20,system=2", + "cpu,atag=test3 idle=300,usertime=30,system=3" + )); + Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); + QueryResult result = this.influxDB.query(query); + + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + /** * Test that creating database which name is composed of numbers only works */ @Test public void testCreateNumericNamedDatabase() { - String numericDbName = "123"; + this.influxDB.query(new Query("CREATE DATABASE \"123\"")); - this.influxDB.createDatabase(numericDbName); List result = this.influxDB.describeDatabases(); - Assert.assertTrue(result.contains(numericDbName)); - this.influxDB.deleteDatabase(numericDbName); + Assertions.assertTrue(result.contains("123")); + this.influxDB.query(new Query("DROP DATABASE \"123\"")); } - + /** * Test that creating database which name is empty will throw expected exception */ - @Test(expected = IllegalArgumentException.class) + @Test public void testCreateEmptyNamedDatabase() { - String emptyName = ""; - this.influxDB.createDatabase(emptyName); + Assertions.assertThrows(org.influxdb.InfluxDBException.class, () -> { + this.influxDB.query(new Query(Query.encode("CREATE DATABASE \"\""))); + }); } /** @@ -389,13 +826,12 @@ public void testCreateEmptyNamedDatabase() { */ @Test() public void testCreateDatabaseWithNameContainHyphen() { - String databaseName = "123-456"; - this.influxDB.createDatabase(databaseName); + this.influxDB.query(new Query("CREATE DATABASE \"123-456\"")); try { List result = this.influxDB.describeDatabases(); - Assert.assertTrue(result.contains(databaseName)); + Assertions.assertTrue(result.contains("123-456")); } finally { - this.influxDB.deleteDatabase(databaseName); + this.influxDB.query(new Query("DROP DATABASE \"123-456\"")); } } @@ -404,15 +840,15 @@ public void testCreateDatabaseWithNameContainHyphen() { */ @Test public void testIsBatchEnabled() { - Assert.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); - Assert.assertTrue(this.influxDB.isBatchEnabled()); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); this.influxDB.disableBatch(); - Assert.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); } - + /** * Test the implementation of {@link InfluxDB#enableBatch(int, int, TimeUnit, ThreadFactory)}. */ @@ -420,7 +856,7 @@ public void testIsBatchEnabled() { public void testBatchEnabledWithThreadFactory() { final String threadName = "async_influxdb_write"; this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, new ThreadFactory() { - + @Override public Thread newThread(Runnable r) { Thread thread = new Thread(r); @@ -435,31 +871,53 @@ public Thread newThread(Runnable r) { existThreadWithSettedName = true; break; } - + } - Assert.assertTrue(existThreadWithSettedName); + Assertions.assertTrue(existThreadWithSettedName); this.influxDB.disableBatch(); } - @Test(expected = NullPointerException.class) + @Test public void testBatchEnabledWithThreadFactoryIsNull() { - this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, null); + Assertions.assertThrows(NullPointerException.class, () -> { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, null); + }); } - + /** * Test the implementation of {@link InfluxDBImpl#InfluxDBImpl(String, String, String, okhttp3.OkHttpClient.Builder)}. */ - @Test(expected = RuntimeException.class) + @Test public void testWrongHostForInfluxdb(){ String errorHost = "10.224.2.122_error_host"; - InfluxDBFactory.connect("http://" + errorHost + ":" + TestUtils.getInfluxPORT(true)); + Assertions.assertThrows(RuntimeException.class, () -> { + InfluxDBFactory.connect("http://" + errorHost + ":" + TestUtils.getInfluxPORT(true)); + }); + + String unresolvableHost = "a.b.c"; + Assertions.assertThrows(InfluxDBIOException.class, () -> { + InfluxDBFactory.connect("http://" + unresolvableHost + ":" + TestUtils.getInfluxPORT(true)); + }); } - @Test(expected = IllegalStateException.class) + @Test + public void testInvalidUrlHandling(){ + Assertions.assertThrows(IllegalArgumentException.class, () -> { + InfluxDBFactory.connect("@@@http://@@@"); + }); + + Assertions.assertThrows(IllegalArgumentException.class, () -> { + InfluxDBFactory.connect("http://@@@abc"); + }); + } + + @Test public void testBatchEnabledTwice() { this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); try{ - this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + Assertions.assertThrows(IllegalStateException.class, () -> { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + }); } finally { this.influxDB.disableBatch(); } @@ -472,9 +930,9 @@ public void testBatchEnabledTwice() { public void testCloseInfluxDBClient() { InfluxDB influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); influxDB.enableBatch(1, 1, TimeUnit.SECONDS); - Assert.assertTrue(influxDB.isBatchEnabled()); + Assertions.assertTrue(influxDB.isBatchEnabled()); influxDB.close(); - Assert.assertFalse(influxDB.isBatchEnabled()); + Assertions.assertFalse(influxDB.isBatchEnabled()); } /** @@ -487,7 +945,7 @@ public void testWriteEnableGzip() { try { influxDBForTestGzip.setLogLevel(LogLevel.NONE); influxDBForTestGzip.enableGzip(); - influxDBForTestGzip.createDatabase(dbName); + influxDBForTestGzip.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); influxDBForTestGzip.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, Arrays.asList( @@ -498,12 +956,12 @@ public void testWriteEnableGzip() { Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); QueryResult result = influxDBForTestGzip.query(query); - Assert.assertEquals(result.getResults().get(0).getSeries().size(), 3); - Assert.assertEquals(result.getResults().get(0).getSeries().get(0).getTags().get("atag"), "test1"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(1).getTags().get("atag"), "test2"); - Assert.assertEquals(result.getResults().get(0).getSeries().get(2).getTags().get("atag"), "test3"); + Assertions.assertEquals(result.getResults().get(0).getSeries().size(), 3); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); } finally { - influxDBForTestGzip.deleteDatabase(dbName); + influxDBForTestGzip.query(new Query("DROP DATABASE " + dbName)); influxDBForTestGzip.close(); } } @@ -517,11 +975,11 @@ public void testWriteEnableGzipAndDisableGzip() { InfluxDB influxDBForTestGzip = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); try { //test default: gzip is disable - Assert.assertFalse(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertFalse(influxDBForTestGzip.isGzipEnabled()); influxDBForTestGzip.enableGzip(); - Assert.assertTrue(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertTrue(influxDBForTestGzip.isGzipEnabled()); influxDBForTestGzip.disableGzip(); - Assert.assertFalse(influxDBForTestGzip.isGzipEnabled()); + Assertions.assertFalse(influxDBForTestGzip.isGzipEnabled()); } finally { influxDBForTestGzip.close(); } @@ -533,75 +991,121 @@ public void testWriteEnableGzipAndDisableGzip() { */ @Test public void testChunking() throws InterruptedException { - if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { - // do not test version 0.13 and 1.0 - return; - } - String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); - String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); - Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); - Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); - Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); - batchPoints.point(point1); - batchPoints.point(point2); - batchPoints.point(point3); - this.influxDB.write(batchPoints); + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - final BlockingQueue queue = new LinkedBlockingQueue<>(); - Query query = new Query("SELECT * FROM disk", dbName); - this.influxDB.query(query, 2, new Consumer() { - @Override - public void accept(QueryResult result) { - queue.add(result); - }}); - - Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS); - this.influxDB.deleteDatabase(dbName); - - QueryResult result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); - System.out.println(result); - Assert.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); - - result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); - System.out.println(result); - Assert.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); - - result = queue.poll(20, TimeUnit.SECONDS); - Assert.assertNotNull(result); - System.out.println(result); - Assert.assertEquals("DONE", result.getError()); - } + Thread.sleep(2000); + final BlockingQueue queue = new LinkedBlockingQueue<>(); + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, new Consumer() { + @Override + public void accept(QueryResult result) { + queue.add(result); + }}); - /** - * Test chunking edge case. - * @throws InterruptedException - */ - @Test - public void testChunkingFail() throws InterruptedException { - if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { - // do not test version 0.13 and 1.0 - return; - } - String dbName = "write_unittest_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); - final CountDownLatch countDownLatch = new CountDownLatch(1); - Query query = new Query("UNKNOWN_QUERY", dbName); - this.influxDB.query(query, 10, new Consumer() { - @Override - public void accept(QueryResult result) { - countDownLatch.countDown(); - } - }); - this.influxDB.deleteDatabase(dbName); - Assert.assertFalse(countDownLatch.await(10, TimeUnit.SECONDS)); - } + Thread.sleep(2000); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); - /** + QueryResult result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals("DONE", result.getError()); + } + + /** + * Test chunking edge case. + * + * @throws InterruptedException + */ + @Test + public void testChunkingFail() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final CountDownLatch countDownLatchFailure = new CountDownLatch(1); + Query query = new Query("UNKNOWN_QUERY", dbName); + this.influxDB.query(query, 10, + (cancellable, queryResult) -> { + countDownLatch.countDown(); + }, () -> { + }, + throwable -> { + countDownLatchFailure.countDown(); + }); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + Assertions.assertTrue(countDownLatchFailure.await(10, TimeUnit.SECONDS)); + Assertions.assertFalse(countDownLatch.await(10, TimeUnit.SECONDS)); + } + + @Test + public void testChunkingFailInConsumer() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + final CountDownLatch countDownLatchFailure = new CountDownLatch(1); + final CountDownLatch countDownLatchComplete = new CountDownLatch(1); + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, + (cancellable, queryResult) -> { + countDownLatch.countDown(); + throw new RuntimeException("my error"); + }, () -> { + countDownLatchComplete.countDown(); + System.out.println("onComplete()"); + }, + throwable -> { + Assertions.assertEquals(throwable.getMessage(), "my error"); + countDownLatchFailure.countDown(); + }); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + Assertions.assertTrue(countDownLatchFailure.await(10, TimeUnit.SECONDS)); + Assertions.assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + Assertions.assertFalse(countDownLatchComplete.await(10, TimeUnit.SECONDS)); + } + + /** * Test chunking on 0.13 and 1.0. * @throws InterruptedException */ @@ -610,22 +1114,241 @@ public void testChunkingOldVersion() throws InterruptedException { if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { - this.exception.expect(RuntimeException.class); + Assertions.assertThrows(RuntimeException.class, () -> { String dbName = "write_unittest_" + System.currentTimeMillis(); Query query = new Query("SELECT * FROM cpu GROUP BY *", dbName); this.influxDB.query(query, 10, new Consumer() { @Override public void accept(QueryResult result) { } - }); + }); + }); } } - @Test + @Test + public void testChunkingOnComplete() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + CountDownLatch countDownLatch = new CountDownLatch(1); + + Thread.sleep(2000); + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, result -> {}, countDownLatch::countDown); + + Thread.sleep(2000); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + + boolean await = countDownLatch.await(10, TimeUnit.SECONDS); + Assertions.assertTrue(await, "The onComplete action did not arrive!"); + } + + @Test + public void testChunkingFailOnComplete() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + final CountDownLatch countDownLatch = new CountDownLatch(1); + Query query = new Query("UNKNOWN_QUERY", dbName); + this.influxDB.query(query, 10, result -> {}, countDownLatch::countDown); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + + boolean await = countDownLatch.await(5, TimeUnit.SECONDS); + Assertions.assertFalse(await, "The onComplete action arrive!"); + } + + @Test + public void testChunkingCancelQuery() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + for (int i = 0; i < 10; i++) + { + Point point = Point.measurement("disk") + .tag("atag", "a") + .addField("used", 60L + (i * 10)) + .addField("free", 1L + i) + .time(i, TimeUnit.SECONDS) + .build(); + + batchPoints.point(point); + } + + Assertions.assertEquals(batchPoints.getPoints().size(), 10); + this.influxDB.write(batchPoints); + Thread.sleep(2000); + + LongAdder chunkCount = new LongAdder(); + + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, (cancellable, queryResult) -> { + + chunkCount.increment(); + + // after three chunks stop stream ("free" field == 5) + Number free = (Number) queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(2); + if (free.intValue() == 5) { + + cancellable.cancel(); + } + }); + + Thread.sleep(5_000); + + Assertions.assertEquals(3, chunkCount.intValue()); + } + + @Test + public void testChunkingCancelQueryOnComplete() throws InterruptedException { + + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + CountDownLatch countDownLatch = new CountDownLatch(1); + + Thread.sleep(2000); + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, (cancellable, queryResult) -> cancellable.cancel(), countDownLatch::countDown); + + Thread.sleep(2000); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + + boolean await = countDownLatch.await(5, TimeUnit.SECONDS); + Assertions.assertFalse(await, "The onComplete action arrive!"); + } + + @Test + public void testChunkingOnFailure() throws InterruptedException { + + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + + CountDownLatch countDownLatch = new CountDownLatch(1); + Query query = new Query("XXXSELECT * FROM disk", "not-existing-db"); + this.influxDB.query(query, 2, + //onNext - process result + (cancellable, queryResult) -> { + //assert that this is not executed in this test case + Assertions.fail("onNext() is executed!"); + }, + //onComplete + () -> Assertions.fail("onComplete() is executed !"), + //onFailure + throwable -> { + Assertions.assertTrue(throwable.getLocalizedMessage().contains("error parsing query: found XXXSELECT")); + countDownLatch.countDown(); + }); + + Assertions.assertTrue(countDownLatch.await(2, TimeUnit.SECONDS)); + + } + + @Test + public void testChunkingOnFailureConnectionError() throws InterruptedException { + + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + //connect to non existing port + InfluxDB influxDB = InfluxDBFactory.connect("http://"+TestUtils.getInfluxIP()+":12345"); + + CountDownLatch countDownLatch = new CountDownLatch(1); + Query query = new Query("SELECT * FROM disk", "not-existing-db"); + influxDB.query(query, 2, + //onNext - process result + (cancellable, queryResult) -> { + //assert that this is not executed in this test case + Assertions.fail("onNext() is executed!"); + }, + //onComplete + () -> Assertions.fail("onComplete() is executed !"), + //onFailure + throwable -> { + Assertions.assertTrue(throwable instanceof ConnectException); + countDownLatch.countDown(); + }); + + Assertions.assertTrue(countDownLatch.await(2, TimeUnit.SECONDS)); + + } + + @Test + public void testChunkingQueryPost() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + CountDownLatch countDownLatch = new CountDownLatch(2); + + Thread.sleep(2000); + Query query = new Query("SELECT * FROM disk", dbName, true); + this.influxDB.query(query, 2, result -> countDownLatch.countDown()); + + boolean await = countDownLatch.await(10, TimeUnit.SECONDS); + Assertions.assertTrue(await, "The QueryResults did not arrive!"); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Test public void testFlushPendingWritesWhenBatchingEnabled() { String dbName = "flush_tests_" + System.currentTimeMillis(); try { - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); // Enable batching with a very large buffer and flush interval so writes will be triggered by our call to flush(). this.influxDB.enableBatch(Integer.MAX_VALUE, Integer.MAX_VALUE, TimeUnit.HOURS); @@ -637,17 +1360,142 @@ public void testFlushPendingWritesWhenBatchingEnabled() { Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", dbName); QueryResult result = this.influxDB.query(query); - Assert.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); } finally { - this.influxDB.deleteDatabase(dbName); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); this.influxDB.disableBatch(); } } - @Test(expected = IllegalStateException.class) + @Test public void testFlushThrowsIfBatchingIsNotEnabled() { - Assert.assertFalse(this.influxDB.isBatchEnabled()); - this.influxDB.flush(); + Assertions.assertFalse(this.influxDB.isBatchEnabled()); + Assertions.assertThrows(IllegalStateException.class, () -> { + this.influxDB.flush(); + }); } + /** + * Test creation and deletion of retention policies + */ + @Test + public void testCreateDropRetentionPolicies() { + String dbName = "rpTest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + this.influxDB.query(new Query("CREATE RETENTION POLICY testRP1 ON " + dbName + " DURATION 30h REPLICATION 2")); + this.influxDB.query(new Query("CREATE RETENTION POLICY testRP2 ON " + dbName + " DURATION 10d REPLICATION 2 SHARD DURATION 20m")); + this.influxDB.query(new Query("CREATE RETENTION POLICY testRP3 ON " + dbName + " DURATION 2d4w REPLICATION 2 SHARD DURATION 20m DEFAULT")); + + Query query = new Query("SHOW RETENTION POLICIES", dbName); + QueryResult result = this.influxDB.query(query); + Assertions.assertNull(result.getError()); + List> retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); + Assertions.assertTrue(retentionPolicies.get(1).contains("testRP1")); + Assertions.assertTrue(retentionPolicies.get(2).contains("testRP2")); + Assertions.assertTrue(retentionPolicies.get(3).contains("testRP3")); + + this.influxDB.query(new Query("DROP RETENTION POLICY testRP1 ON " + dbName)); + this.influxDB.query(new Query("DROP RETENTION POLICY testRP2 ON " + dbName)); + this.influxDB.query(new Query("DROP RETENTION POLICY testRP3 ON " + dbName)); + + result = this.influxDB.query(query); + Assertions.assertNull(result.getError()); + retentionPolicies = result.getResults().get(0).getSeries().get(0).getValues(); + Assertions.assertTrue(retentionPolicies.size() == 1); + } + + /** + * Test the implementation of {@link InfluxDB#isBatchEnabled() with consistency}. + */ + @Test + public void testIsBatchEnabledWithConsistency() { + Assertions.assertFalse(this.influxDB.isBatchEnabled()); + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS, Executors.defaultThreadFactory(), + (a, b) -> { + }, InfluxDB.ConsistencyLevel.ALL); + Assertions.assertTrue(this.influxDB.isBatchEnabled()); + } + + /** + * Test initialize InfluxDBImpl with MessagePack format for InfluxDB versions before 1.4 will throw exception + */ + @Test + @EnabledIfEnvironmentVariable(named = "INFLUXDB_VERSION", matches = "1\\.3|1\\.2|1\\.1") + public void testMessagePackOnOldDbVersion() { + Assertions.assertThrows(UnsupportedOperationException.class, () -> { + InfluxDB influxDB = TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK); + influxDB.describeDatabases(); + }); + } + + /** + * test for issue #445 + * make sure reusing of OkHttpClient.Builder causes no error + * @throws InterruptedException + */ + @Test + public void testIssue445() throws InterruptedException { + ExecutorService executor = Executors.newFixedThreadPool(100); + + final int maxCallables = 10_000; + List> callableList = new ArrayList<>(maxCallables); + for (int i = 0; i < maxCallables; i++) { + callableList.add(new Callable() { + @Override + public String call() throws Exception { + MyInfluxDBBean myBean = new MyInfluxDBBean(); + return myBean.connectAndDoNothing1(); + } + }); + } + executor.invokeAll(callableList); + executor.shutdown(); + if (!executor.awaitTermination(20, TimeUnit.SECONDS)) { + executor.shutdownNow(); + } + Assertions.assertTrue(MyInfluxDBBean.OK); + //assert that MyInfluxDBBean.OKHTTP_BUILDER stays untouched (no interceptor added) + Assertions.assertTrue(MyInfluxDBBean.OKHTTP_BUILDER.interceptors().isEmpty()); + } + + @Test + public void testQueryPostWithGZIPCompression() { + this.influxDB.enableGzip(); + String database = "db_gzip_" + System.currentTimeMillis(); + this.influxDB.query(new Query(String.format("CREATE DATABASE %s", database), null, true)); + QueryResult query = this.influxDB.query(new Query("SHOW DATABASES", null, true)); + assertThat(query.getResults()).hasSize(1); + assertThat(query.getResults().get(0).getSeries()).hasSize(1); + assertThat(query.getResults().get(0).getSeries().get(0).getValues()).contains(Collections.singletonList(database)); + this.influxDB.query(new Query(String.format("DROP DATABASE %s", database), null, true)); + } + + private static final class MyInfluxDBBean { + + static final OkHttpClient.Builder OKHTTP_BUILDER = new OkHttpClient.Builder(); + static Boolean OK = true; + static final String URL = "http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true); + + InfluxDB influxClient; + + String connectAndDoNothing1() { + synchronized (OK) { + if (!OK) { + return null; + } + } + try { + influxClient = InfluxDBFactory.connect(URL, "admin", "admin", OKHTTP_BUILDER); + influxClient.close(); + } catch (Exception e) { + synchronized (OK) { + if (OK) { + OK = false; + } + } + } + return null; + } + } } diff --git a/src/test/java/org/influxdb/LogLevelTest.java b/src/test/java/org/influxdb/LogLevelTest.java new file mode 100644 index 000000000..59205d579 --- /dev/null +++ b/src/test/java/org/influxdb/LogLevelTest.java @@ -0,0 +1,33 @@ +package org.influxdb; + +import java.util.HashMap; +import java.util.Map; + +import org.influxdb.InfluxDB.LogLevel; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * Test the InfluxDBimpl log level setting from system property. + * + * @author hoan.le [at] bonitoo.io + * + */ +@RunWith(JUnitPlatform.class) +public class LogLevelTest { + @Test + public void testParseLogLevel() { + Map logLevelMap = new HashMap<>(); + logLevelMap.put(null, LogLevel.NONE); + logLevelMap.put("NONE", LogLevel.NONE); + logLevelMap.put("BASIC", LogLevel.BASIC); + logLevelMap.put("HEADERS", LogLevel.HEADERS); + logLevelMap.put("FULL", LogLevel.FULL); + logLevelMap.put("abc", LogLevel.NONE); + logLevelMap.forEach((value, logLevel) -> { + Assertions.assertEquals(LogLevel.parseLogLevel(value), logLevel); + }); + } +} diff --git a/src/test/java/org/influxdb/MessagePackBatchOptionsTest.java b/src/test/java/org/influxdb/MessagePackBatchOptionsTest.java new file mode 100644 index 000000000..4cb8c0238 --- /dev/null +++ b/src/test/java/org/influxdb/MessagePackBatchOptionsTest.java @@ -0,0 +1,26 @@ +package org.influxdb; + +import java.io.IOException; + +import org.influxdb.InfluxDB.ResponseFormat; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * Test the BatchOptions with MessagePack format + * + * @author hoan.le [at] bonitoo.io + * + */ +@RunWith(JUnitPlatform.class) +@EnabledIfEnvironmentVariable(named = "INFLUXDB_VERSION", matches = "1\\.6|1\\.5|1\\.4") +public class MessagePackBatchOptionsTest extends BatchOptionsTest { + + @Override + @BeforeEach + public void setUp() throws InterruptedException, IOException { + influxDB = TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK); + } +} diff --git a/src/test/java/org/influxdb/MessagePackInfluxDBTest.java b/src/test/java/org/influxdb/MessagePackInfluxDBTest.java new file mode 100644 index 000000000..f19e2343b --- /dev/null +++ b/src/test/java/org/influxdb/MessagePackInfluxDBTest.java @@ -0,0 +1,305 @@ +package org.influxdb; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import org.influxdb.InfluxDB.ResponseFormat; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * Test the InfluxDB API over MessagePack format + * + * @author hoan.le [at] bonitoo.io + * + */ +@RunWith(JUnitPlatform.class) +@EnabledIfEnvironmentVariable(named = "INFLUXDB_VERSION", matches = "1\\.6|1\\.5|1\\.4") +public class MessagePackInfluxDBTest extends InfluxDBTest { + /** + * Create a influxDB connection before all tests start. + * + * @throws InterruptedException + * @throws IOException + */ + @Override + @BeforeEach + public void setUp() throws InterruptedException, IOException { + influxDB = TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK); + influxDB.query(new Query("CREATE DATABASE " + UDP_DATABASE)); + } + + /** + * Tests writing points using the time precision feature + * @throws Exception + */ + @Override + @Test + public void testWriteBatchWithPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + + long t1 = 1485273600; + Point p1 = Point + .measurement(measurement) + .addField("foo", 1d) + .tag("device", "one") + .time(t1, TimeUnit.SECONDS).build(); // 2017-01-27T16:00:00 + + long t2 = 1485277200; + Point p2 = Point + .measurement(measurement) + .addField("foo", 2d) + .tag("device", "two") + .time(t2, TimeUnit.SECONDS).build(); // 2017-01-27T17:00:00 + + long t3 = 1485280800; + Point p3 = Point + .measurement(measurement) + .addField("foo", 3d) + .tag("device", "three") + .time(t3, TimeUnit.SECONDS).build(); // 2017-01-27T18:00:00 + + BatchPoints batchPoints = BatchPoints + .database(dbName) + .retentionPolicy(rp) + .precision(TimeUnit.SECONDS) + .points(p1, p2, p3) + .build(); + + // WHEN I write the batch + this.influxDB.write(batchPoints); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName)); + long bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0)); + Assertions.assertEquals(bySecond, t1); + + bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0)); + Assertions.assertEquals(bySecond, t2); + + bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0)); + Assertions.assertEquals(bySecond, t3); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Override + @Test + public void testWriteBatchWithoutPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + + // GIVEN a batch of points that has no specific precision + long t1 = 1485273600000000100L; + Point p1 = Point + .measurement(measurement) + .addField("foo", 1d) + .tag("device", "one") + .time(t1, TimeUnit.NANOSECONDS).build(); // 2017-01-27T16:00:00.000000100Z + Double timeP1 = Double.valueOf(t1); + + long t2 = 1485277200000000200L; + Point p2 = Point + .measurement(measurement) + .addField("foo", 2d) + .tag("device", "two") + .time(t2, TimeUnit.NANOSECONDS).build(); // 2017-01-27T17:00:00.000000200Z + Double timeP2 = Double.valueOf(t2); + + long t3 = 1485280800000000300L; + Point p3 = Point + .measurement(measurement) + .addField("foo", 3d) + .tag("device", "three") + .time(t3, TimeUnit.NANOSECONDS).build(); // 2017-01-27T18:00:00.000000300Z + Double timeP3 = Double.valueOf(t3); + + BatchPoints batchPoints = BatchPoints + .database(dbName) + .retentionPolicy(rp) + .points(p1, p2, p3) + .build(); + + // WHEN I write the batch + this.influxDB.write(batchPoints); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName), TimeUnit.NANOSECONDS); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + Double value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0).toString()); + Assertions.assertEquals(value, timeP1); + value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0).toString()); + Assertions.assertEquals(value, timeP2); + value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0).toString()); + Assertions.assertEquals(value, timeP3); + + // WHEN I use the post query + queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName, true), TimeUnit.NANOSECONDS); + + // THEN result will be same + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0).toString()); + Assertions.assertEquals(value, timeP1); + value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0).toString()); + Assertions.assertEquals(value, timeP2); + value = Double.valueOf(queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0).toString()); + Assertions.assertEquals(value, timeP3); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Override + @Test + public void testWriteRecordsWithPrecision() throws Exception { + // GIVEN a database and a measurement + String dbName = "precision_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + + String measurement = TestUtils.getRandomMeasurement(); + List records = new ArrayList<>(); + records.add(measurement + ",atag=test1 idle=100,usertime=10,system=1 1485273600"); + long timeP1 = 1485273600; + + records.add(measurement + ",atag=test2 idle=200,usertime=20,system=2 1485277200"); + long timeP2 = 1485277200; + + records.add(measurement + ",atag=test3 idle=300,usertime=30,system=3 1485280800"); + long timeP3 = 1485280800; + + // WHEN I write the batch + this.influxDB.write(dbName, rp, InfluxDB.ConsistencyLevel.ONE, TimeUnit.SECONDS, records); + + // THEN the measure points have a timestamp with second precision + QueryResult queryResult = this.influxDB.query(new Query("SELECT * FROM " + measurement, dbName)); + Assertions.assertEquals(queryResult.getResults().get(0).getSeries().get(0).getValues().size(), 3); + + long bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(0).get(0)); + Assertions.assertEquals(bySecond, timeP1); + + bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(1).get(0)); + Assertions.assertEquals(bySecond, timeP2); + + bySecond = TimeUnit.NANOSECONDS.toSeconds( + (Long) queryResult.getResults().get(0).getSeries().get(0).getValues().get(2).get(0)); + Assertions.assertEquals(bySecond, timeP3); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + } + + @Override + @Test + public void testChunking() throws InterruptedException { + if (this.influxDB.version().startsWith("0.") || this.influxDB.version().startsWith("1.0")) { + // do not test version 0.13 and 1.0 + return; + } + String dbName = "write_unittest_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); + BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy(rp).build(); + Point point1 = Point.measurement("disk").tag("atag", "a").addField("used", 60L).addField("free", 1L).build(); + Point point2 = Point.measurement("disk").tag("atag", "b").addField("used", 70L).addField("free", 2L).build(); + Point point3 = Point.measurement("disk").tag("atag", "c").addField("used", 80L).addField("free", 3L).build(); + batchPoints.point(point1); + batchPoints.point(point2); + batchPoints.point(point3); + this.influxDB.write(batchPoints); + + Thread.sleep(2000); + final BlockingQueue queue = new LinkedBlockingQueue<>(); + Query query = new Query("SELECT * FROM disk", dbName); + this.influxDB.query(query, 2, new Consumer() { + @Override + public void accept(QueryResult result) { + queue.add(result); + }}); + + Thread.sleep(2000); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + + QueryResult result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(2, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + System.out.println(result); + Assertions.assertEquals(1, result.getResults().get(0).getSeries().get(0).getValues().size()); + + result = queue.poll(5, TimeUnit.SECONDS); + Assertions.assertNull(result); + } + + @Test + public void testInfluxDBVersionChecking() throws InterruptedException, IOException { + + InfluxDB spy = spy(TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK)); + + doReturn("1.5.2").when(spy).version(); + spy.databaseExists("abc"); + spy.close(); + + spy = spy(TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK)); + doReturn("v1.6.0").when(spy).version(); + spy.databaseExists("abc"); + spy.close(); + + assertThrows(UnsupportedOperationException.class, () -> { + InfluxDB spy1 = spy(TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK)); + try { + doReturn("1.3.0").when(spy1).version(); + spy1.databaseExists("abc"); + } finally { + spy1.close(); + } + + }); + + assertThrows(UnsupportedOperationException.class, () -> { + InfluxDB spy1 = spy(TestUtils.connectToInfluxDB(ResponseFormat.MSGPACK)); + try { + doReturn("a.b.c").when(spy1).version(); + spy1.databaseExists("abc"); + } finally { + spy1.close(); + } + }); + + } +} diff --git a/src/test/java/org/influxdb/PerformanceTests.java b/src/test/java/org/influxdb/PerformanceTests.java index ec783abb8..a1389826e 100644 --- a/src/test/java/org/influxdb/PerformanceTests.java +++ b/src/test/java/org/influxdb/PerformanceTests.java @@ -1,50 +1,58 @@ package org.influxdb; -import com.google.common.base.Stopwatch; import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; - +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; + +import static org.mockito.Mockito.*; + +import java.net.SocketTimeoutException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; +@RunWith(JUnitPlatform.class) public class PerformanceTests { private InfluxDB influxDB; private final static int COUNT = 1; private final static int POINT_COUNT = 100000; private final static int SINGLE_POINT_COUNT = 10000; - + private final static int UDP_PORT = 8089; private final static String UDP_DATABASE = "udp"; - @Before + @BeforeEach public void setUp() { this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "root", "root"); this.influxDB.setLogLevel(LogLevel.NONE); - this.influxDB.createDatabase(UDP_DATABASE); + this.influxDB.query(new Query("CREATE DATABASE " + UDP_DATABASE)); } - + /** * delete UDP database after all tests end. */ - @After - public void clearup(){ - this.influxDB.deleteDatabase(UDP_DATABASE); + @AfterEach + public void cleanup(){ + this.influxDB.query(new Query("CREATE DATABASE " + UDP_DATABASE)); } @Test - public void writeSinglePointPerformance() { + public void testWriteSinglePointPerformance() { String dbName = "write_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); this.influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int j = 0; j < SINGLE_POINT_COUNT; j++) { Point point = Point.measurement("cpu") .addField("idle", (double) j) @@ -53,18 +61,18 @@ public void writeSinglePointPerformance() { this.influxDB.write(dbName, rp, point); } this.influxDB.disableBatch(); - System.out.println("Single Point Write for " + SINGLE_POINT_COUNT + " writes of Points took:" + watch); - this.influxDB.deleteDatabase(dbName); + System.out.println("Single Point Write for " + SINGLE_POINT_COUNT + " writes of Points took:" + (System.currentTimeMillis() - start)); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - @Ignore + @Disabled @Test - public void writePerformance() { + public void testWritePerformance() { String dbName = "writepoints_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int i = 0; i < COUNT; i++) { BatchPoints batchPoints = BatchPoints @@ -84,51 +92,119 @@ public void writePerformance() { this.influxDB.write(batchPoints); } - System.out.println("WritePoints for " + COUNT + " writes of " + POINT_COUNT + " Points took:" + watch); - this.influxDB.deleteDatabase(dbName); + System.out.println("WritePoints for " + COUNT + " writes of " + POINT_COUNT + " Points took:" + (System.currentTimeMillis() - start)); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } @Test - public void maxWritePointsPerformance() { + public void testMaxWritePointsPerformance() { String dbName = "d"; - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); this.influxDB.enableBatch(100000, 60, TimeUnit.SECONDS); String rp = TestUtils.defaultRetentionPolicy(this.influxDB.version()); - Stopwatch watch = Stopwatch.createStarted(); + long start = System.currentTimeMillis(); for (int i = 0; i < 2000000; i++) { Point point = Point.measurement("s").addField("v", 1.0).build(); this.influxDB.write(dbName, rp, point); } - System.out.println("5Mio points:" + watch); - this.influxDB.deleteDatabase(dbName); + System.out.println("5Mio points:" + (System.currentTimeMillis() - start)); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } + /** + * states that String.join("\n", records)*/ @Test - public void writeCompareUDPPerformanceForBatchWithSinglePoints() { - //prepare data - List lineProtocols = new ArrayList(); - for (int i = 0; i < 1000; i++) { - Point point = Point.measurement("udp_single_poit").addField("v", i).build(); - lineProtocols.add(point.lineProtocol()); - } - - //write batch of 1000 single string. - Stopwatch watch = Stopwatch.createStarted(); - this.influxDB.write(UDP_PORT, lineProtocols); - long elapsedForBatchWrite = watch.elapsed(TimeUnit.MILLISECONDS); - System.out.println("performance(ms):write udp with batch of 1000 string:" + elapsedForBatchWrite); - - //write 1000 single string by udp. - watch = Stopwatch.createStarted(); - for (String lineProtocol: lineProtocols){ - this.influxDB.write(UDP_PORT, lineProtocol); - } - - long elapsedForSingleWrite = watch.elapsed(TimeUnit.MILLISECONDS); - System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); - - Assert.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); - } + public void testWriteCompareUDPPerformanceForBatchWithSinglePoints() { + //prepare data + List lineProtocols = new ArrayList(); + for (int i = 0; i < 2000; i++) { + Point point = Point.measurement("udp_single_poit").addField("v", i).build(); + lineProtocols.add(point.lineProtocol()); + } + + String dbName = "write_compare_udp_" + System.currentTimeMillis(); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + this.influxDB.enableBatch(10000, 100, TimeUnit.MILLISECONDS); + + int repetitions = 15; + long start = System.currentTimeMillis(); + for (int i = 0; i < repetitions; i++) { + //write batch of 2000 single string. + this.influxDB.write(UDP_PORT, lineProtocols); + } + long elapsedForBatchWrite = System.currentTimeMillis() - start; + System.out.println("performance(ms):write udp with batch of 1000 string:" + elapsedForBatchWrite); + + // write 2000 single string by udp. + start = System.currentTimeMillis(); + for (int i = 0; i < repetitions; i++) { + for (String lineProtocol : lineProtocols) { + this.influxDB.write(UDP_PORT, lineProtocol); + } + } + + long elapsedForSingleWrite = System.currentTimeMillis() - start; + System.out.println("performance(ms):write udp with 1000 single strings:" + elapsedForSingleWrite); + + this.influxDB.query(new Query("DROP DATABASE " + dbName)); + Assertions.assertTrue(elapsedForSingleWrite - elapsedForBatchWrite > 0); + } + + @Test + public void testRetryWritePointsInBatch() throws InterruptedException { + String dbName = "d"; + + InfluxDB spy = spy(influxDB); + TestAnswer answer = new TestAnswer() { + boolean started = false; + InfluxDBException influxDBException = new InfluxDBException(new SocketTimeoutException()); + @Override + protected void check(InvocationOnMock invocation) { + if (started || System.currentTimeMillis() >= (Long) params.get("startTime")) { + System.out.println("call real"); + started = true; + } else { + System.out.println("throw"); + throw influxDBException; + } + } + }; + + answer.params.put("startTime", System.currentTimeMillis() + 8000); + doAnswer(answer).when(spy).write(any(BatchPoints.class)); + + spy.query(new Query("CREATE DATABASE " + dbName)); + BatchOptions batchOptions = BatchOptions.DEFAULTS.actions(10000).flushDuration(2000).bufferLimit(300000).exceptionHandler((points, throwable) -> { + System.out.println("+++++++++++ exceptionHandler +++++++++++"); + System.out.println(throwable); + System.out.println("++++++++++++++++++++++++++++++++++++++++"); + }); + + //this.influxDB.enableBatch(100000, 60, TimeUnit.SECONDS); + spy.enableBatch(batchOptions); + String rp = TestUtils.defaultRetentionPolicy(spy.version()); + + for (long i = 0; i < 40000; i++) { + Point point = Point.measurement("s").time(i, TimeUnit.MILLISECONDS).addField("v", 1.0).build(); + spy.write(dbName, rp, point); + } + + System.out.println("sleep"); + Thread.sleep(12000); + try { + QueryResult result = spy.query(new Query("select count(v) from s", dbName)); + double d = Double.parseDouble(result.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString()); + Assertions.assertEquals(40000, d); + } catch (Exception e) { + System.out.println("+++++++++++++++++count() +++++++++++++++++++++"); + System.out.println(e); + System.out.println("++++++++++++++++++++++++++++++++++++++++++++++"); + + } + + spy.disableBatch(); + spy.query(new Query("DROP DATABASE " + dbName)); + } } diff --git a/src/test/java/org/influxdb/TestAnswer.java b/src/test/java/org/influxdb/TestAnswer.java new file mode 100644 index 000000000..8b0a2cd41 --- /dev/null +++ b/src/test/java/org/influxdb/TestAnswer.java @@ -0,0 +1,27 @@ +package org.influxdb; + +import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; + +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public abstract class TestAnswer implements Answer { + + Map params = new HashMap<>(); + + protected abstract void check(InvocationOnMock invocation); + + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + check(invocation); + //call only non-abstract real method + if (Modifier.isAbstract(invocation.getMethod().getModifiers())) { + return null; + } else { + return invocation.callRealMethod(); + } + } + +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/TestUtils.java b/src/test/java/org/influxdb/TestUtils.java index 7ad8dff24..25112ac44 100644 --- a/src/test/java/org/influxdb/TestUtils.java +++ b/src/test/java/org/influxdb/TestUtils.java @@ -1,47 +1,102 @@ package org.influxdb; +import okhttp3.OkHttpClient; + +import org.influxdb.InfluxDB.ResponseFormat; +import org.influxdb.dto.Pong; + +import java.io.IOException; import java.util.Map; public class TestUtils { - public static String getInfluxIP() { - String ip = "127.0.0.1"; - - Map getenv = System.getenv(); - if (getenv.containsKey("INFLUXDB_IP")) { - ip = getenv.get("INFLUXDB_IP"); - } - - return ip; - } - - public static String getRandomMeasurement() { - return "measurement_" + System.nanoTime(); - } - - public static String getInfluxPORT(boolean apiPort) { - String port = "8086"; - - Map getenv = System.getenv(); - if(apiPort) { - if (getenv.containsKey("INFLUXDB_PORT_API")) - port = getenv.get("INFLUXDB_PORT_API"); - } - else { - port = "8096"; - if (getenv.containsKey("INFLUXDB_PORT_COLLECTD")) - port = getenv.get("INFLUXDB_PORT_COLLECTD"); - } - - return port; - } - - public static String defaultRetentionPolicy(String version) { - if (version.startsWith("0.") ) { - return "default"; - } else { - return "autogen"; - } - } + private static String getEnv(String name, String defaultValue) { + Map getenv = System.getenv(); + + if (getenv.containsKey(name)) { + return getenv.get(name); + } else { + return defaultValue; + } + } + + public static String getInfluxIP() { + return getEnv("INFLUXDB_IP", "127.0.0.1"); + } + + public static String getRandomMeasurement() { + return "measurement_" + System.nanoTime(); + } + + public static String getInfluxPORT(boolean apiPort) { + if(apiPort) { + return getEnv("INFLUXDB_PORT_API", "8086"); + } + else { + return getEnv("INFLUXDB_PORT_COLLECTD", "8096"); + } + } + + public static String getProxyApiUrl() { + return getEnv("PROXY_API_URL", "http://127.0.0.1:8086/"); + } + + public static String getProxyUdpPort() { + return getEnv("PROXY_UDP_PORT", "8089"); + } + + public static String defaultRetentionPolicy(String version) { + if (version.startsWith("0.") ) { + return "default"; + } else { + return "autogen"; + } + } + + public static InfluxDB connectToInfluxDB() throws InterruptedException, IOException { + return connectToInfluxDB(null, null, ResponseFormat.JSON); + } + public static InfluxDB connectToInfluxDB(ResponseFormat responseFormat) throws InterruptedException, IOException { + return connectToInfluxDB(null, null, responseFormat); + } + public static InfluxDB connectToInfluxDB(String apiUrl) throws InterruptedException, IOException { + return connectToInfluxDB(new OkHttpClient.Builder(), apiUrl, ResponseFormat.JSON); + } + + public static InfluxDB connectToInfluxDB(final OkHttpClient.Builder client, String apiUrl, + ResponseFormat responseFormat) throws InterruptedException, IOException { + OkHttpClient.Builder clientToUse; + if (client == null) { + clientToUse = new OkHttpClient.Builder(); + } else { + clientToUse = client; + } + String apiUrlToUse; + if (apiUrl == null) { + apiUrlToUse = "http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true); + } else { + apiUrlToUse = apiUrl; + } + InfluxDB influxDB = InfluxDBFactory.connect(apiUrlToUse, "admin", "admin", clientToUse, responseFormat); + boolean influxDBstarted = false; + do { + Pong response; + try { + response = influxDB.ping(); + if (response.isGood()) { + influxDBstarted = true; + } + } catch (Exception e) { + // NOOP intentional + e.printStackTrace(); + } + Thread.sleep(100L); + } while (!influxDBstarted); + influxDB.setLogLevel(InfluxDB.LogLevel.NONE); + System.out.println("##################################################################################"); + System.out.println("# Connected to InfluxDB Version: " + influxDB.version() + " #"); + System.out.println("##################################################################################"); + return influxDB; + } } diff --git a/src/test/java/org/influxdb/TicketTest.java b/src/test/java/org/influxdb/TicketTest.java index cd73b3ac0..936026d28 100644 --- a/src/test/java/org/influxdb/TicketTest.java +++ b/src/test/java/org/influxdb/TicketTest.java @@ -5,19 +5,22 @@ import java.util.Date; import java.util.concurrent.TimeUnit; -import org.influxdb.InfluxDB.LogLevel; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; -import org.influxdb.dto.Pong; -import org.junit.Before; -import org.junit.Test; - +import org.influxdb.dto.Query; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; /** * Test the InfluxDB API. * * @author stefan.majer [at] gmail.com * */ +@DisplayName("Test for github issues") +@RunWith(JUnitPlatform.class) public class TicketTest { private InfluxDB influxDB; @@ -28,27 +31,9 @@ public class TicketTest { * @throws InterruptedException * @throws IOException */ - @Before + @BeforeEach public void setUp() throws InterruptedException, IOException { - this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); - boolean influxDBstarted = false; - do { - Pong response; - try { - response = this.influxDB.ping(); - if (!response.getVersion().equalsIgnoreCase("unknown")) { - influxDBstarted = true; - } - } catch (Exception e) { - // NOOP intentional - e.printStackTrace(); - } - Thread.sleep(100L); - } while (!influxDBstarted); - this.influxDB.setLogLevel(LogLevel.NONE); - System.out.println("##################################################################################"); - System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); - System.out.println("##################################################################################"); + this.influxDB = TestUtils.connectToInfluxDB(); } /** @@ -58,7 +43,7 @@ public void setUp() throws InterruptedException, IOException { @Test public void testTicket38() { String dbName = "ticket38_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); Point point1 = Point .measurement("metric") .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS) @@ -69,7 +54,7 @@ public void testTicket38() { .tag("region", "region") .build(); this.influxDB.write(dbName, TestUtils.defaultRetentionPolicy(this.influxDB.version()), point1); - this.influxDB.deleteDatabase(dbName); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } /** @@ -79,7 +64,7 @@ public void testTicket38() { @Test public void testTicket39() { String dbName = "ticket39_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); BatchPoints batchPoints = BatchPoints .database(dbName) .tag("async", "true") @@ -91,7 +76,7 @@ public void testTicket39() { Point point = builder.build(); batchPoints.point(point); this.influxDB.write(batchPoints); - this.influxDB.deleteDatabase(dbName); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } /** @@ -100,15 +85,15 @@ public void testTicket39() { @Test public void testTicket40() { String dbName = "ticket40_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); this.influxDB.enableBatch(100, 100, TimeUnit.MICROSECONDS); for (int i = 0; i < 1000; i++) { Point point = Point.measurement("cpu").addField("idle", 99.0).build(); this.influxDB.write(dbName, TestUtils.defaultRetentionPolicy(this.influxDB.version()), point); } - this.influxDB.deleteDatabase(dbName); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } - + /** * Test for ticket #303 * @@ -116,22 +101,22 @@ public void testTicket40() { @Test public void testTicket303() { String dbName = "ticket303_" + System.currentTimeMillis(); - this.influxDB.createDatabase(dbName); - - - Date rundate1 = new Date() ; + this.influxDB.query(new Query("CREATE DATABASE " + dbName)); + + + Date rundate1 = new Date() ; long rundate1Sec = rundate1.getTime() / 1000; - - - + + + Point point1 = Point .measurement("TestSlash") .time(rundate1Sec, TimeUnit.SECONDS) - .tag("precision", "Second") - .addField("MultipleSlash" , "echo \\\".ll 12.0i\\\";") - .build(); + .tag("precision", "Second") + .addField("MultipleSlash" , "echo \\\".ll 12.0i\\\";") + .build(); this.influxDB.write(dbName, TestUtils.defaultRetentionPolicy(this.influxDB.version()), point1); - this.influxDB.deleteDatabase(dbName); + this.influxDB.query(new Query("DROP DATABASE " + dbName)); } } diff --git a/src/test/java/org/influxdb/UDPInfluxDBTest.java b/src/test/java/org/influxdb/UDPInfluxDBTest.java new file mode 100644 index 000000000..c63fcae86 --- /dev/null +++ b/src/test/java/org/influxdb/UDPInfluxDBTest.java @@ -0,0 +1,214 @@ +package org.influxdb; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import org.influxdb.InfluxDB.LogLevel; +import org.influxdb.dto.Point; +import org.influxdb.dto.Pong; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +/** + * Test the InfluxDB API. + * + * @author stefan.majer [at] gmail.com + * + */ +@RunWith(JUnitPlatform.class) +public class UDPInfluxDBTest { + + private InfluxDB influxDB; + private final static int UDP_PORT = 8089; + private final static String UDP_DATABASE = "udp"; + + /** + * Create a influxDB connection before all tests start. + * + * @throws InterruptedException + * @throws IOException + */ + @BeforeEach + public void setUp() throws InterruptedException, IOException { + this.influxDB = InfluxDBFactory.connect("http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true), "admin", "admin"); + boolean influxDBstarted = false; + do { + Pong response; + try { + response = this.influxDB.ping(); + if (!response.getVersion().equalsIgnoreCase("unknown")) { + influxDBstarted = true; + } + } catch (Exception e) { + // NOOP intentional + e.printStackTrace(); + } + Thread.sleep(100L); + } while (!influxDBstarted); + this.influxDB.setLogLevel(LogLevel.NONE); + this.influxDB.createDatabase(UDP_DATABASE); + System.out.println("################################################################################## "); + System.out.println("# Connected to InfluxDB Version: " + this.influxDB.version() + " #"); + System.out.println("##################################################################################"); + } + + /** + * delete UDP database after all tests end. + */ + @AfterEach + public void cleanup() { + this.influxDB.deleteDatabase(UDP_DATABASE); + } + + /** + * Test the implementation of {@link InfluxDB#write(int, Point)}'s sync + * support. + */ + @Test + public void testSyncWritePointThroughUDP() throws InterruptedException { + this.influxDB.disableBatch(); + String measurement = TestUtils.getRandomMeasurement(); + Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); + this.influxDB.write(UDP_PORT, point); + Thread.sleep(2000); + Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + QueryResult result = this.influxDB.query(query); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + } + + /** + * Test the implementation of {@link InfluxDB#write(int, Point)}'s async + * support. + */ + @Test + public void testAsyncWritePointThroughUDP() throws InterruptedException { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + try { + Assertions.assertTrue(this.influxDB.isBatchEnabled()); + String measurement = TestUtils.getRandomMeasurement(); + Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); + this.influxDB.write(UDP_PORT, point); + Thread.sleep(2000); + Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + QueryResult result = this.influxDB.query(query); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + } finally { + this.influxDB.disableBatch(); + } + } + + /** + * Test the implementation of {@link InfluxDB#write(int, Point)}'s async + * support. + */ + @Test + public void testAsyncWritePointThroughUDPFail() { + this.influxDB.enableBatch(1, 1, TimeUnit.SECONDS); + try { + Assertions.assertTrue(this.influxDB.isBatchEnabled()); + String measurement = TestUtils.getRandomMeasurement(); + Point point = Point.measurement(measurement).tag("atag", "test").addField("used", 80L).addField("free", 1L).build(); + Thread.currentThread().interrupt(); + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, point); + }); + } finally { + this.influxDB.disableBatch(); + } + } + + /** + * Test writing to the database using string protocol through UDP. + */ + @Test + public void testWriteStringDataThroughUDP() throws InterruptedException { + String measurement = TestUtils.getRandomMeasurement(); + this.influxDB.write(UDP_PORT, measurement + ",atag=test idle=90,usertime=9,system=1"); + //write with UDP may be executed on server after query with HTTP. so sleep 2s to handle this case + Thread.sleep(2000); + Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + QueryResult result = this.influxDB.query(query); + Assertions.assertFalse(result.getResults().get(0).getSeries().get(0).getTags().isEmpty()); + } + + + + /** + * When batch of points' size is over UDP limit, the expected exception is + * java.lang.RuntimeException: java.net.SocketException: The message is + * larger than the maximum supported by the underlying transport: Datagram + * send failed + * + * @throws Exception + */ + @Test + public void testWriteMultipleStringDataLinesOverUDPLimit() throws Exception { + //prepare data + List lineProtocols = new ArrayList(); + int i = 0; + int length = 0; + while (true) { + Point point = Point.measurement("udp_single_poit").addField("v", i).build(); + String lineProtocol = point.lineProtocol(); + length += (lineProtocol.getBytes("utf-8")).length; + lineProtocols.add(lineProtocol); + if (length > 65535) { + break; + } + } + //write batch of string which size is over 64K + Assertions.assertThrows(RuntimeException.class, () -> { + this.influxDB.write(UDP_PORT, lineProtocols); + }); + } + + /** + * Test writing multiple records to the database using string protocol + * through UDP. + */ + @Test + public void testWriteMultipleStringDataThroughUDP() throws InterruptedException { + String measurement = TestUtils.getRandomMeasurement(); + this.influxDB.write(UDP_PORT, measurement + ",atag=test1 idle=100,usertime=10,system=1\n" + + measurement + ",atag=test2 idle=200,usertime=20,system=2\n" + + measurement + ",atag=test3 idle=300,usertime=30,system=3"); + Thread.sleep(2000); + Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + QueryResult result = this.influxDB.query(query); + + Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + } + + /** + * Test writing multiple separate records to the database using string + * protocol through UDP. + */ + @Test + public void testWriteMultipleStringDataLinesThroughUDP() throws InterruptedException { + String measurement = TestUtils.getRandomMeasurement(); + this.influxDB.write(UDP_PORT, Arrays.asList( + measurement + ",atag=test1 idle=100,usertime=10,system=1", + measurement + ",atag=test2 idle=200,usertime=20,system=2", + measurement + ",atag=test3 idle=300,usertime=30,system=3" + )); + Thread.sleep(2000); + Query query = new Query("SELECT * FROM " + measurement + " GROUP BY *", UDP_DATABASE); + QueryResult result = this.influxDB.query(query); + + Assertions.assertEquals(3, result.getResults().get(0).getSeries().size()); + Assertions.assertEquals("test1", result.getResults().get(0).getSeries().get(0).getTags().get("atag")); + Assertions.assertEquals("test2", result.getResults().get(0).getSeries().get(1).getTags().get("atag")); + Assertions.assertEquals("test3", result.getResults().get(0).getSeries().get(2).getTags().get("atag")); + } +} diff --git a/src/test/java/org/influxdb/dto/BatchPointTest.java b/src/test/java/org/influxdb/dto/BatchPointTest.java index cd6af1dd4..236e7165d 100644 --- a/src/test/java/org/influxdb/dto/BatchPointTest.java +++ b/src/test/java/org/influxdb/dto/BatchPointTest.java @@ -1,30 +1,37 @@ package org.influxdb.dto; import static org.assertj.core.api.Assertions.assertThat; - +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.influxdb.InfluxDB; -import org.junit.Test; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +@RunWith(JUnitPlatform.class) public class BatchPointTest { @Test public void testEquals() throws Exception { // GIVEN two batchpoint objects with the same values - Map tags = Maps.newHashMap(); + Map tags = new HashMap<>(); tags.put("key", "value"); InfluxDB.ConsistencyLevel consistencyLevel = InfluxDB.ConsistencyLevel.ANY; String db = "my database"; - List points = Lists.newArrayList(); + List points = new ArrayList<>(); Point p = new Point(); p.setPrecision(TimeUnit.MILLISECONDS); p.setMeasurement("my measurements"); @@ -56,10 +63,10 @@ public void testEquals() throws Exception { @Test public void testUnEquals() throws Exception { // GIVEN two batchpoint objects with different values - Map tags1 = Maps.newHashMap(); + Map tags1 = new HashMap<>(); tags1.put("key", "value1"); - Map tags2 = Maps.newHashMap(); + Map tags2 = new HashMap<>(); tags2.put("key", "value2"); InfluxDB.ConsistencyLevel consistencyLevel1 = InfluxDB.ConsistencyLevel.ANY; @@ -68,7 +75,7 @@ public void testUnEquals() throws Exception { String db1 = "my database 1"; String db2 = "my database 2"; - List points = Lists.newArrayList(); + List points = new ArrayList<>(); Point p = new Point(); p.setPrecision(TimeUnit.MILLISECONDS); p.setMeasurement("my measurements"); @@ -97,4 +104,21 @@ public void testUnEquals() throws Exception { // THEN equals returns true assertThat(equals).isEqualTo(false); } + + @Test + public void emptyDatabase() throws Exception { + BatchPoints b = BatchPoints.builder().build(); + assertNull(b.getDatabase()); + } + + @Test + public void pointsCollection() { + Point p1 = Point.measurement("something").addField("one", 1).build(); + Point p2 = Point.measurement("something2").addField("two", 2).build(); + Collection points = Arrays.asList(p1, p2); + BatchPoints b = BatchPoints.builder().points(points).build(); + List returned = b.getPoints(); + assertNotNull(returned); + assertEquals(2, returned.size()); + } } diff --git a/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java new file mode 100644 index 000000000..3ab185272 --- /dev/null +++ b/src/test/java/org/influxdb/dto/BoundParameterQueryTest.java @@ -0,0 +1,91 @@ +package org.influxdb.dto; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; + +import org.influxdb.dto.BoundParameterQuery.QueryBuilder; +import org.junit.Assert; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.Moshi; + +/** + * Test for the BoundParameterQuery DTO. + */ +@RunWith(JUnitPlatform.class) +public class BoundParameterQueryTest { + + @Test + public void testGetParameterJsonWithUrlEncoded() throws IOException { + BoundParameterQuery query = QueryBuilder.newQuery("SELECT * FROM abc WHERE integer > $i" + + "AND double = $d AND bool = $bool AND string = $string AND other = $object") + .forDatabase("foobar") + .bind("i", 0) + .bind("d", 1.0) + .bind("bool", true) + .bind("string", "test") + .bind("object", new Object()) + .create(); + + Moshi moshi = new Moshi.Builder().build(); + JsonAdapter adapter = moshi.adapter(Point.class); + Point point = adapter.fromJson(decode(query.getParameterJsonWithUrlEncoded())); + Assert.assertEquals(0, point.i); + Assert.assertEquals(1.0, point.d, 0.0); + Assert.assertEquals(true, point.bool); + Assert.assertEquals("test", point.string); + Assert.assertTrue(point.object.matches("java.lang.Object@[a-z0-9]+")); + } + + @Test + public void testEqualsAndHashCode() { + String stringA0 = "SELECT * FROM foobar WHERE a = $a"; + String stringA1 = "SELECT * FROM foobar WHERE a = $a"; + String stringB0 = "SELECT * FROM foobar WHERE b = $b"; + + Query queryA0 = QueryBuilder.newQuery(stringA0) + .forDatabase(stringA0) + .bind("a", 0) + .create(); + Query queryA1 = QueryBuilder.newQuery(stringA1) + .forDatabase(stringA1) + .bind("a", 0) + .create(); + Query queryA2 = QueryBuilder.newQuery(stringA1) + .forDatabase(stringA1) + .bind("a", 10) + .create(); + Query queryB0 = QueryBuilder.newQuery(stringB0) + .forDatabase(stringB0) + .bind("b", 10) + .create(); + + assertThat(queryA0).isEqualTo(queryA0); + assertThat(queryA0).isEqualTo(queryA1); + assertThat(queryA0).isNotEqualTo(queryA2); + assertThat(queryA0).isNotEqualTo(queryB0); + assertThat(queryA0).isNotEqualTo("foobar"); + + assertThat(queryA0.hashCode()).isEqualTo(queryA1.hashCode()); + assertThat(queryA0.hashCode()).isNotEqualTo(queryB0.hashCode()); + } + + private static String decode(String str) throws UnsupportedEncodingException { + return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); + } + + private static class Point { + int i; + double d; + String string; + Boolean bool; + String object; + } +} diff --git a/src/test/java/org/influxdb/dto/PointTest.java b/src/test/java/org/influxdb/dto/PointTest.java old mode 100644 new mode 100755 index 62301db39..9148c64ba --- a/src/test/java/org/influxdb/dto/PointTest.java +++ b/src/test/java/org/influxdb/dto/PointTest.java @@ -1,320 +1,1224 @@ package org.influxdb.dto; -import static org.assertj.core.api.Assertions.assertThat; - +import org.influxdb.BuilderException; +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.impl.InfluxDBImpl; +import org.junit.Assert; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.Mockito; + +import java.lang.reflect.Field; import java.math.BigDecimal; import java.math.BigInteger; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.collect.ImmutableMap; -import org.junit.Test; - -import com.google.common.collect.Maps; +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; /** * Test for the Point DTO. * * @author stefan.majer [at] gmail.com - * */ +@RunWith(JUnitPlatform.class) public class PointTest { - /** - * Test that lineprotocol is conformant to: - * - * https://github.com/influxdb/influxdb/blob/master/tsdb/README.md - * - */ - @Test - public void lineProtocol() { - Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1"); - - point = Point.measurement("test,1").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test\\,1 a=1.0 1"); - - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A").build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\" 1"); - - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A\"B").build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\" 1"); - - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A\"B\"C").build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\\\"C\" 1"); - - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A B C").build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A B C\" 1"); - - point = Point - .measurement("test") - .time(1, TimeUnit.NANOSECONDS) - .addField("a", "A\"B") - .addField("b", "D E \"F") - .build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\",b=\"D E \\\"F\" 1"); - - //Integer type - point = Point.measurement("inttest").time(1, TimeUnit.NANOSECONDS).addField("a", (Integer)1).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("inttest a=1i 1"); - - point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", (Integer)1).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=1i 1"); - - point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", 1L).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=1i 1"); - - point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", BigInteger.valueOf(100)).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=100i 1"); - } - - /** - * Test for ticket #44 - */ - @Test - public void testTicket44() { - Point point = Point.measurement("test").time(1, TimeUnit.MICROSECONDS).addField("a", 1.0).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1000"); - - point = Point.measurement("test").time(1, TimeUnit.MILLISECONDS).addField("a", 1.0).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1000000"); - - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); - BatchPoints batchPoints = BatchPoints.database("db").point(point).build(); - assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1\n"); - - point = Point.measurement("test").time(1, TimeUnit.MICROSECONDS).addField("a", 1.0).build(); - batchPoints = BatchPoints.database("db").point(point).build(); - assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000\n"); - - point = Point.measurement("test").time(1, TimeUnit.MILLISECONDS).addField("a", 1.0).build(); - batchPoints = BatchPoints.database("db").point(point).build(); - assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000000\n"); - - point = Point.measurement("test").addField("a", 1.0).time(1, TimeUnit.MILLISECONDS).build(); - batchPoints = BatchPoints.database("db").build(); - batchPoints = batchPoints.point(point); - assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000000\n"); - - } - - /** - * Test for ticket #54 - */ - @Test - public void testTicket54() { - Byte byteNumber = 100; - Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", byteNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100i 1"); - - int intNumber = 100000000; - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", intNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); - - Integer integerNumber = 100000000; - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", integerNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); - - AtomicInteger atomicIntegerNumber = new AtomicInteger(100000000); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", atomicIntegerNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); - - Long longNumber = 1000000000000000000L; - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", longNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1000000000000000000i 1"); - - AtomicLong atomicLongNumber = new AtomicLong(1000000000000000000L); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", atomicLongNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1000000000000000000i 1"); - - BigInteger bigIntegerNumber = BigInteger.valueOf(100000000); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", bigIntegerNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); - - Double doubleNumber = Double.valueOf(100000000.0001); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", doubleNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000.0001 1"); - - Float floatNumber = Float.valueOf(0.1f); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", floatNumber).build(); - assertThat(point.lineProtocol()).asString().startsWith("test a=0.10"); - - BigDecimal bigDecimalNumber = BigDecimal.valueOf(100000000.00000001); - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", bigDecimalNumber).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000.00000001 1"); - } - - @Test - public void testEscapingOfKeysAndValues() { - // Test escaping of spaces - Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar baz").addField( "a", 1.0 ).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\ baz a=1.0 1"); - - // Test escaping of commas - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar,baz").addField( "a", 1.0 ).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\,baz a=1.0 1"); - - // Test escaping of equals sign - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar=baz").addField( "a", 1.0 ).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\=baz a=1.0 1"); - - // Test escaping of escape character - point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("foo", "test\\test").build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test foo=\"test\\\\test\" 1"); - } - - @Test - public void testDeprecatedFieldMethodOnlyProducesFloatingPointValues() { - - Object[] ints = {(byte) 1, (short) 1, (int) 1, (long) 1, BigInteger.ONE}; - - for (Object intExample : ints) { - Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).field("a", intExample ).build(); - assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1"); - } - - } - /** - * Test for issue #117. - */ - @Test - public void testIgnoreNullPointerValue() { - // Test omission of null values - Point.Builder pointBuilder = Point.measurement("nulltest").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar"); - - pointBuilder.field("field1", "value1"); - pointBuilder.field("field2", (Number) null); - pointBuilder.field("field3", (Integer) 1); - - Point point = pointBuilder.build(); - - assertThat(point.lineProtocol()).asString().isEqualTo("nulltest,foo=bar field1=\"value1\",field3=1.0 1"); - } - - /** - * Tests for issue #110 - */ - @Test(expected = IllegalArgumentException.class) - public void testAddingTagsWithNullNameThrowsAnError() { - Point.measurement("dontcare").tag(null, "DontCare"); - } - - @Test(expected = IllegalArgumentException.class) - public void testAddingTagsWithNullValueThrowsAnError() { - Point.measurement("dontcare").tag("DontCare", null); - } - - @Test(expected = IllegalArgumentException.class) - public void testAddingMapOfTagsWithNullNameThrowsAnError() { - Map map = Maps.newHashMap(); - map.put(null, "DontCare"); - Point.measurement("dontcare").tag(map); - } - - @Test(expected = IllegalArgumentException.class) - public void testAddingMapOfTagsWithNullValueThrowsAnError() { - Map map = Maps.newHashMap(); - map.put("DontCare", null); - Point.measurement("dontcare").tag(map); - } - - @Test(expected = IllegalArgumentException.class) - public void testNullValueThrowsExceptionViaAddField() { - Point.measurement("dontcare").addField("field", (String) null); - } - - @Test - public void testEmptyValuesAreIgnored() { - Point point = Point.measurement("dontcare").tag("key","").addField("dontcare", true).build(); - assertThat(point.getTags().size()).isEqualTo(0); - - point = Point.measurement("dontcare").tag("","value").addField("dontcare", true).build(); - assertThat(point.getTags().size()).isEqualTo(0); - - point = Point.measurement("dontcare").tag(ImmutableMap.of("key","")).addField("dontcare", true).build(); - assertThat(point.getTags().size()).isEqualTo(0); - - point = Point.measurement("dontcare").tag(ImmutableMap.of("","value")).addField("dontcare", true).build(); - assertThat(point.getTags().size()).isEqualTo(0); - } - - /** - * Tests for issue #266 - */ - @Test - public void testEquals() throws Exception { - // GIVEN two point objects with identical data - Map fields = Maps.newHashMap(); - fields.put("foo", "bar"); - - String measurement = "measurement"; - - TimeUnit precision = TimeUnit.NANOSECONDS; - - Map tags = Maps.newHashMap(); - tags.put("bar", "baz"); - - Long time = System.currentTimeMillis(); - - Point p1 = new Point(); - p1.setFields(fields); - p1.setMeasurement(measurement); - p1.setPrecision(precision); - p1.setTags(tags); - p1.setTime(time); - - Point p2 = new Point(); - p2.setFields(fields); - p2.setMeasurement(measurement); - p2.setPrecision(precision); - p2.setTags(tags); - p2.setTime(time); - - // WHEN I call equals on one with the other as arg - boolean equals = p1.equals(p2); - - // THEN equals returns true - assertThat(equals).isEqualTo(true); - } - - @Test - public void testUnEquals() throws Exception { - // GIVEN two point objects with different data - Map fields1 = Maps.newHashMap(); - fields1.put("foo", "bar"); - - Map fields2 = Maps.newHashMap(); - fields2.put("foo", "baz"); - - String measurement = "measurement"; - - TimeUnit precision = TimeUnit.NANOSECONDS; - - Map tags = Maps.newHashMap(); - tags.put("bar", "baz"); - - Long time = System.currentTimeMillis(); - - Point p1 = new Point(); - p1.setFields(fields1); - p1.setMeasurement(measurement); - p1.setPrecision(precision); - p1.setTags(tags); - p1.setTime(time); - - Point p2 = new Point(); - p2.setFields(fields2); - p2.setMeasurement(measurement); - p2.setPrecision(precision); - p2.setTags(tags); - p2.setTime(time); - - // WHEN I call equals on one with the other as arg - boolean equals = p1.equals(p2); - - // THEN equals returns true - assertThat(equals).isEqualTo(false); - } + /** + * Test that lineprotocol is conformant to: + *

+ * https://github.com/influxdb/influxdb/blob/master/tsdb/README.md + */ + @Test + public void testLineProtocol() { + Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1"); + + point = Point.measurement("test,1").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test\\,1 a=1.0 1"); + + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A").build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\" 1"); + + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A\"B").build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\" 1"); + + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A\"B\"C").build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\\\"C\" 1"); + + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", "A B C").build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A B C\" 1"); + + point = Point + .measurement("test") + .time(1, TimeUnit.NANOSECONDS) + .addField("a", "A\"B") + .addField("b", "D E \"F") + .build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=\"A\\\"B\",b=\"D E \\\"F\" 1"); + + //Integer type + point = Point.measurement("inttest").time(1, TimeUnit.NANOSECONDS).addField("a", (Integer) 1).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("inttest a=1i 1"); + + point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", (Integer) 1).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=1i 1"); + + point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", 1L).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=1i 1"); + + point = Point.measurement("inttest,1").time(1, TimeUnit.NANOSECONDS).addField("a", BigInteger.valueOf(100)).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("inttest\\,1 a=100i 1"); + } + + /** + * Test for ticket #44 + */ + @Test + public void testTicket44() { + Point point = Point.measurement("test").time(1, TimeUnit.MICROSECONDS).addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1000"); + + point = Point.measurement("test").time(1, TimeUnit.MILLISECONDS).addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1000000"); + + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", 1.0).build(); + BatchPoints batchPoints = BatchPoints.database("db").point(point).build(); + assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1\n"); + + point = Point.measurement("test").time(1, TimeUnit.MICROSECONDS).addField("a", 1.0).build(); + batchPoints = BatchPoints.database("db").point(point).build(); + assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000\n"); + + point = Point.measurement("test").time(1, TimeUnit.MILLISECONDS).addField("a", 1.0).build(); + batchPoints = BatchPoints.database("db").point(point).build(); + assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000000\n"); + + point = Point.measurement("test").addField("a", 1.0).time(1, TimeUnit.MILLISECONDS).build(); + batchPoints = BatchPoints.database("db").build(); + batchPoints = batchPoints.point(point); + assertThat(batchPoints.lineProtocol()).asString().isEqualTo("test a=1.0 1000000\n"); + + } + + /** + * Test for ticket #54 + */ + @Test + public void testTicket54() { + Byte byteNumber = 100; + Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", byteNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100i 1"); + + int intNumber = 100000000; + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", intNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); + + Integer integerNumber = 100000000; + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", integerNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); + + AtomicInteger atomicIntegerNumber = new AtomicInteger(100000000); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", atomicIntegerNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); + + Long longNumber = 1000000000000000000L; + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", longNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1000000000000000000i 1"); + + AtomicLong atomicLongNumber = new AtomicLong(1000000000000000000L); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", atomicLongNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1000000000000000000i 1"); + + BigInteger bigIntegerNumber = BigInteger.valueOf(100000000); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", bigIntegerNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000i 1"); + + Double doubleNumber = Double.valueOf(100000000.0001); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", doubleNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000.0001 1"); + + Float floatNumber = Float.valueOf(0.1f); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", floatNumber).build(); + assertThat(point.lineProtocol()).asString().startsWith("test a=0.10"); + + BigDecimal bigDecimalNumber = BigDecimal.valueOf(100000000.00000001); + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("a", bigDecimalNumber).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=100000000.00000001 1"); + } + + @Test + public void testEscapingOfKeysAndValues() { + // Test escaping of spaces + Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar baz").addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\ baz a=1.0 1"); + + // Test escaping of commas + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar,baz").addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\,baz a=1.0 1"); + + // Test escaping of equals sign + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar=baz").addField("a", 1.0).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test,foo=bar\\=baz a=1.0 1"); + + // Test escaping of escape character + point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).addField("foo", "test\\test").build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test foo=\"test\\\\test\" 1"); + } + + @Test + public void testDeprecatedFieldMethodOnlyProducesFloatingPointValues() { + + Object[] ints = {(byte) 1, (short) 1, (int) 1, (long) 1, BigInteger.ONE}; + + for (Object intExample : ints) { + Point point = Point.measurement("test").time(1, TimeUnit.NANOSECONDS).field("a", intExample).build(); + assertThat(point.lineProtocol()).asString().isEqualTo("test a=1.0 1"); + } + + } + + /** + * Test for issue #117. + */ + @Test + public void testIgnoreNullPointerValue() { + // Test omission of null values + Point.Builder pointBuilder = Point.measurement("nulltest").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar"); + + pointBuilder.field("field1", "value1"); + pointBuilder.field("field2", (Number) null); + pointBuilder.field("field3", 1); + + Point point = pointBuilder.build(); + + assertThat(point.lineProtocol()).asString().isEqualTo("nulltest,foo=bar field1=\"value1\",field3=1.0 1"); + } + + /** + * Tests for issue #110 + */ + @Test + public void testAddingTagsWithNullNameThrowsAnError() { + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(null, "DontCare"); + }); + } + + @Test + public void testAddingTagsWithNullValueThrowsAnError() { + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag("DontCare", null); + }); + } + + @Test + public void testAddingMapOfTagsWithNullNameThrowsAnError() { + Map map = new HashMap<>(); + map.put(null, "DontCare"); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(map); + }); + } + + @Test + public void testAddingMapOfTagsWithNullValueThrowsAnError() { + Map map = new HashMap<>(); + map.put("DontCare", null); + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").tag(map); + }); + } + + @Test + public void testNullValueThrowsExceptionViaAddField() { + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurement("dontcare").addField("field", (String) null); + }); + } + + @Test + public void testEmptyValuesAreIgnored() { + Point point = Point.measurement("dontcare").tag("key", "").addField("dontcare", true).build(); + assertThat(point.getTags().size()).isEqualTo(0); + + point = Point.measurement("dontcare").tag("", "value").addField("dontcare", true).build(); + assertThat(point.getTags().size()).isEqualTo(0); + + point = Point.measurement("dontcare").tag(Collections.singletonMap("key", "")).addField("dontcare", true).build(); + assertThat(point.getTags().size()).isEqualTo(0); + + point = Point.measurement("dontcare").tag(Collections.singletonMap("", "value")).addField("dontcare", true).build(); + assertThat(point.getTags().size()).isEqualTo(0); + } + + /** + * Tests for issue #266 + */ + @Test + public void testEquals() throws Exception { + // GIVEN two point objects with identical data + Map fields = new HashMap<>(); + fields.put("foo", "bar"); + + String measurement = "measurement"; + + TimeUnit precision = TimeUnit.NANOSECONDS; + + Map tags = new HashMap<>(); + tags.put("bar", "baz"); + + Long time = System.currentTimeMillis(); + + Point p1 = new Point(); + p1.setFields(fields); + p1.setMeasurement(measurement); + p1.setPrecision(precision); + p1.setTags(tags); + p1.setTime(time); + + Point p2 = new Point(); + p2.setFields(fields); + p2.setMeasurement(measurement); + p2.setPrecision(precision); + p2.setTags(tags); + p2.setTime(time); + + // WHEN I call equals on one with the other as arg + boolean equals = p1.equals(p2); + + // THEN equals returns true + assertThat(equals).isEqualTo(true); + } + + @Test + public void testUnEquals() throws Exception { + // GIVEN two point objects with different data + Map fields1 = new HashMap<>(); + fields1.put("foo", "bar"); + + Map fields2 = new HashMap<>(); + fields2.put("foo", "baz"); + + String measurement = "measurement"; + + TimeUnit precision = TimeUnit.NANOSECONDS; + + Map tags = new HashMap<>(); + tags.put("bar", "baz"); + + Long time = System.currentTimeMillis(); + + Point p1 = new Point(); + p1.setFields(fields1); + p1.setMeasurement(measurement); + p1.setPrecision(precision); + p1.setTags(tags); + p1.setTime(time); + + Point p2 = new Point(); + p2.setFields(fields2); + p2.setMeasurement(measurement); + p2.setPrecision(precision); + p2.setTags(tags); + p2.setTime(time); + + // WHEN I call equals on one with the other as arg + boolean equals = p1.equals(p2); + + // THEN equals returns true + assertThat(equals).isEqualTo(false); + } + + @Test + public void testBuilderHasFields() { + Point.Builder pointBuilder = Point.measurement("nulltest").time(1, TimeUnit.NANOSECONDS).tag("foo", "bar"); + assertThat(pointBuilder.hasFields()).isFalse(); + + pointBuilder.addField("testfield", 256); + assertThat(pointBuilder.hasFields()).isTrue(); + } + + /** + * Tests for #267 + * + * @throws Exception + */ + @Test + public void testLineProtocolBigInteger() throws Exception { + // GIVEN a point with nanosecond precision farther in the future than a long can hold + Instant instant = Instant.EPOCH.plus(600L * 365, ChronoUnit.DAYS); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(BigInteger.valueOf(instant.getEpochSecond()) + .multiply(BigInteger.valueOf(1000000000L)) + .add(BigInteger.valueOf(instant.getNano())), TimeUnit.NANOSECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.NANOSECONDS) + String nanosTime = p.lineProtocol(TimeUnit.NANOSECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in nanoseconds + assertThat(nanosTime).isEqualTo(BigInteger.valueOf(instant.getEpochSecond()) + .multiply(BigInteger.valueOf(1000000000L)) + .add(BigInteger.valueOf(instant.getNano())).toString()); + } + + /** + * Tests for #267 + * + * @throws Exception + */ + @Test + public void testLineProtocolBigIntegerSeconds() throws Exception { + // GIVEN a point with nanosecond precision farther in the future than a long can hold + Instant instant = Instant.EPOCH.plus(600L * 365, ChronoUnit.DAYS); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(BigInteger.valueOf(instant.getEpochSecond()) + .multiply(BigInteger.valueOf(1000000000L)) + .add(BigInteger.valueOf(instant.getNano())), TimeUnit.NANOSECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.SECONDS) + String secondTime = p.lineProtocol(TimeUnit.SECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is the seconds part of the Instant + assertThat(secondTime).isEqualTo(Long.toString(instant.getEpochSecond())); + } + + /** + * Tests for #267 + * + * @throws Exception + */ + @Test + public void testLineProtocolBigDecimal() throws Exception { + // GIVEN a point with nanosecond precision farther in the future than a long can hold + Instant instant = Instant.EPOCH.plus(600L * 365, ChronoUnit.DAYS); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(BigDecimal.valueOf(instant.getEpochSecond()) + .multiply(BigDecimal.valueOf(1000000000L)) + .add(BigDecimal.valueOf(instant.getNano())).add(BigDecimal.valueOf(1.9123456)), TimeUnit.NANOSECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.NANOSECONDS) + String nanosTime = p.lineProtocol(TimeUnit.NANOSECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is the integer part of the BigDecimal + assertThat(nanosTime).isEqualTo("18921600000000000001"); + } + + /** + * Tests for #267 + * + * @throws Exception + */ + @Test + public void testLineProtocolBigDecimalSeconds() throws Exception { + // GIVEN a point with nanosecond precision farther in the future than a long can hold + Instant instant = Instant.EPOCH.plus(600L * 365, ChronoUnit.DAYS); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(BigDecimal.valueOf(instant.getEpochSecond()) + .multiply(BigDecimal.valueOf(1000000000L)) + .add(BigDecimal.valueOf(instant.getNano())).add(BigDecimal.valueOf(1.9123456)), TimeUnit.NANOSECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.SECONDS) + String secondTime = p.lineProtocol(TimeUnit.SECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is the seconds part of the Instant + assertThat(secondTime).isEqualTo(Long.toString(instant.getEpochSecond())); + } + + /** + * Tests for #182 + * + * @throws Exception + */ + @Test + public void testLineProtocolNanosecondPrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.NANOSECONDS) + String nanosTime = p.lineProtocol(TimeUnit.NANOSECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in nanoseconds + assertThat(nanosTime).isEqualTo(String.valueOf(pDate.getTime() * 1000000)); + } + + @Test + public void testLineProtocolMicrosecondPrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.MICROSECONDS) + String microsTime = p.lineProtocol(TimeUnit.MICROSECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in microseconds + assertThat(microsTime).isEqualTo(String.valueOf(pDate.getTime() * 1000)); + } + + @Test + public void testLineProtocolMillisecondPrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.MILLISECONDS) + String millisTime = p.lineProtocol(TimeUnit.MILLISECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in microseconds + assertThat(millisTime).isEqualTo(String.valueOf(pDate.getTime())); + } + + @Test + public void testLineProtocolSecondPrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.SECONDS) + String secondTime = p.lineProtocol(TimeUnit.SECONDS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in seconds + String expectedSecondTimeStamp = String.valueOf(pDate.getTime() / 1000); + assertThat(secondTime).isEqualTo(expectedSecondTimeStamp); + } + + @Test + public void testLineProtocolMinutePrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.MINUTE) + String secondTime = p.lineProtocol(TimeUnit.MINUTES).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in seconds + String expectedSecondTimeStamp = String.valueOf(pDate.getTime() / 60000); + assertThat(secondTime).isEqualTo(expectedSecondTimeStamp); + } + + @Test + public void testLineProtocolHourPrecision() throws Exception { + // GIVEN a point with millisecond precision + Date pDate = new Date(); + Point p = Point + .measurement("measurement") + .addField("foo", "bar") + .time(pDate.getTime(), TimeUnit.MILLISECONDS) + .build(); + + // WHEN i call lineProtocol(TimeUnit.NANOSECONDS) + String hourTime = p.lineProtocol(TimeUnit.HOURS).replace("measurement foo=\"bar\" ", ""); + + // THEN the timestamp is in hours + String expectedHourTimeStamp = String.valueOf(Math.round(pDate.getTime() / 3600000)); // 1000ms * 60s * 60m + assertThat(hourTime).isEqualTo(expectedHourTimeStamp); + } + + /* + * Test if representation of tags in line protocol format should be sorted by tag key + */ + @Test + public void testTagKeyIsSortedInLineProtocol() { + Point p = Point + .measurement("cpu") + .time(1000000000L, TimeUnit.MILLISECONDS) + .addField("value", 1) + .tag("region", "us-west") + .tag("host", "serverA") + .tag("env", "prod") + .tag("target", "servers") + .tag("zone", "1c") + .tag("tag5", "value5") + .tag("tag1", "value1") + .tag("tag2", "value2") + .tag("tag3", "value3") + .tag("tag4", "value4") + .build(); + + String lineProtocol = p.lineProtocol(); + String correctOrder = "env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c"; + String tags = lineProtocol.substring(lineProtocol.indexOf(',') + 1, lineProtocol.indexOf(' ')); + assertThat(tags).isEqualTo(correctOrder); + } + + @Test + public void lineProtocolSkippingOfNanFields() { + String lineProtocol; + + lineProtocol = Point + .measurement("test") + .time(1, TimeUnit.MILLISECONDS) + .addField("float-valid", 1f) + .addField("float-nan", Float.NaN) + .addField("float-inf1", Float.NEGATIVE_INFINITY) + .addField("float-inf2", Float.POSITIVE_INFINITY) + .tag("host", "serverA") + .build() + .lineProtocol(TimeUnit.MILLISECONDS); + assertThat(lineProtocol).isEqualTo("test,host=serverA float-valid=1.0 1"); + + lineProtocol = Point + .measurement("test") + .time(1, TimeUnit.MILLISECONDS) + .addField("double-valid", 1d) + .addField("double-nan", Double.NaN) + .addField("double-inf1", Double.NEGATIVE_INFINITY) + .addField("double-inf2", Double.POSITIVE_INFINITY) + .tag("host", "serverA") + .build() + .lineProtocol(TimeUnit.MILLISECONDS); + assertThat(lineProtocol).isEqualTo("test,host=serverA double-valid=1.0 1"); + + lineProtocol = Point + .measurement("test") + .time(1, TimeUnit.MILLISECONDS) + .addField("double-nan", Double.NaN) + .tag("host", "serverA") + .build() + .lineProtocol(TimeUnit.MILLISECONDS); + assertThat(lineProtocol).isEqualTo(""); + } + + + @Test + public void testAddFieldsFromPOJONullCheck() { + Assertions.assertThrows(NullPointerException.class, () -> { + Point.measurementByPOJO(null); + }); + } + + @Test + public void testAddFieldsFromPOJOWithoutAnnotation() { + PojoWithoutAnnotation pojo = new PojoWithoutAnnotation(); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Point.measurementByPOJO(pojo.getClass()); + }); + } + + @Test + public void testAddFieldsFromPOJOWithoutColumnAnnotation() { + PojoWithMeasurement pojo = new PojoWithMeasurement(); + Assertions.assertThrows(BuilderException.class, () -> { + Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo); + }); + } + + @Test + public void testAddFieldsFromPOJOWithoutData() { + Pojo pojo = new Pojo(); + Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + } + + @Test + public void testAddFieldsFromPOJOWithTimeColumn() throws NoSuchFieldException, IllegalAccessException { + TimeColumnPojo pojo = new TimeColumnPojo(); + pojo.time = Instant.now(); + pojo.booleanPrimitive = true; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + Field timeField = p.getClass().getDeclaredField("time"); + Field precisionField = p.getClass().getDeclaredField("precision"); + timeField.setAccessible(true); + precisionField.setAccessible(true); + + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(TimeUnit.MILLISECONDS, precisionField.get(p)); + Assertions.assertEquals(TimeUnit.MILLISECONDS.convert(pojo.time.toEpochMilli(), TimeUnit.MILLISECONDS), timeField.get(p)); + + pojo.time = null; + } + + @Test + public void testAddFieldsFromPOJOWithTimeColumnNanoseconds() throws NoSuchFieldException, IllegalAccessException { + TimeColumnPojoNano pojo = new TimeColumnPojoNano(); + pojo.time = Instant.now().plusNanos(13213L).plus(365L * 12000, ChronoUnit.DAYS); + pojo.booleanPrimitive = true; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + Field timeField = p.getClass().getDeclaredField("time"); + Field precisionField = p.getClass().getDeclaredField("precision"); + timeField.setAccessible(true); + precisionField.setAccessible(true); + + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(TimeUnit.NANOSECONDS, precisionField.get(p)); + Assertions.assertEquals(BigInteger.valueOf(pojo.time.getEpochSecond()) + .multiply(BigInteger.valueOf(1000000000L)).add( + BigInteger.valueOf(pojo.time.getNano())), timeField.get(p)); + + pojo.time = null; + } + + @Test + public void testAddFieldsFromPOJOWithTimeColumnSeconds() throws NoSuchFieldException, IllegalAccessException { + TimeColumnPojoSec pojo = new TimeColumnPojoSec(); + pojo.time = Instant.now().plusSeconds(132L).plus(365L * 12000, ChronoUnit.DAYS); + pojo.booleanPrimitive = true; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + Field timeField = p.getClass().getDeclaredField("time"); + Field precisionField = p.getClass().getDeclaredField("precision"); + timeField.setAccessible(true); + precisionField.setAccessible(true); + + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(TimeUnit.SECONDS, precisionField.get(p)); + Assertions.assertEquals(pojo.time.getEpochSecond(), timeField.get(p)); + } + + @Test + public void testAddFieldsFromPOJOWithBadTimeColumn() { + BadTimeColumnPojo pojo = new BadTimeColumnPojo(); + Assertions.assertThrows(InfluxDBMapperException.class, + () -> Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build()); + } + + @Test + public void testAddFieldsFromPOJOWithTimeColumnNull() throws NoSuchFieldException, IllegalAccessException { + TimeColumnPojo pojo = new TimeColumnPojo(); + pojo.booleanPrimitive = true; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + Field timeField = p.getClass().getDeclaredField("time"); + Field precisionField = p.getClass().getDeclaredField("precision"); + timeField.setAccessible(true); + precisionField.setAccessible(true); + + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + + pojo.time = null; + } + + @Test + public void testAddFieldsFromPOJOWithData() { + Pojo pojo = new Pojo(); + pojo.booleanObject = true; + pojo.booleanPrimitive = false; + pojo.doubleObject = 2.0; + pojo.doublePrimitive = 3.1; + pojo.integerObject = 32; + pojo.integerPrimitive = 64; + pojo.longObject = 1L; + pojo.longPrimitive = 2L; + pojo.time = Instant.now(); + pojo.uuid = "TEST"; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + + Assertions.assertEquals(pojo.booleanObject, p.getFields().get("booleanObject")); + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(pojo.doubleObject, p.getFields().get("doubleObject")); + Assertions.assertEquals(pojo.doublePrimitive, p.getFields().get("doublePrimitive")); + Assertions.assertEquals(pojo.integerObject, p.getFields().get("integerObject")); + Assertions.assertEquals(pojo.integerPrimitive, p.getFields().get("integerPrimitive")); + Assertions.assertEquals(pojo.longObject, p.getFields().get("longObject")); + Assertions.assertEquals(pojo.longPrimitive, p.getFields().get("longPrimitive")); + Assertions.assertEquals(pojo.uuid, p.getTags().get("uuid")); + } + + @Test + public void testAddFieldsFromPOJOConsistentWithAddField() { + PojoNumberPrimitiveTypes pojo = new PojoNumberPrimitiveTypes(); + pojo.shortType = 128; + pojo.intType = 1_048_576; + pojo.longType = 1_073_741_824; + pojo.floatType = 246.8f; + pojo.doubleType = 123.4; + + Point pojo_point = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + mockInfluxDB.write(pojo_point); + + Point expected = Point.measurement("PojoNumberPrimitiveTypes") + .addField("shortType", pojo.shortType) + .addField("intType", pojo.intType) + .addField("longType", pojo.longType) + .addField("floatType", pojo.floatType) + .addField("doubleType", pojo.doubleType) + .build(); + + Assert.assertEquals(pojo_point.lineProtocol(), expected.lineProtocol()); + Mockito.verify(mockInfluxDB).write(expected); + } + + @Test + public void testAddFieldsFromPOJOWithPublicAttributes() { + + PojoWithPublicAttributes pojo = new PojoWithPublicAttributes(); + pojo.booleanObject = true; + pojo.booleanPrimitive = false; + pojo.doubleObject = 2.0; + pojo.doublePrimitive = 3.1; + pojo.integerObject = 32; + pojo.integerPrimitive = 64; + pojo.longObject = 1L; + pojo.longPrimitive = 2L; + pojo.time = Instant.now(); + pojo.uuid = "TEST"; + + Point p = Point.measurementByPOJO(pojo.getClass()).addFieldsFromPOJO(pojo).build(); + + Assertions.assertEquals(pojo.booleanObject, p.getFields().get("booleanObject")); + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(pojo.doubleObject, p.getFields().get("doubleObject")); + Assertions.assertEquals(pojo.doublePrimitive, p.getFields().get("doublePrimitive")); + Assertions.assertEquals(pojo.integerObject, p.getFields().get("integerObject")); + Assertions.assertEquals(pojo.integerPrimitive, p.getFields().get("integerPrimitive")); + Assertions.assertEquals(pojo.longObject, p.getFields().get("longObject")); + Assertions.assertEquals(pojo.longPrimitive, p.getFields().get("longPrimitive")); + Assertions.assertEquals(pojo.uuid, p.getTags().get("uuid")); + } + + @Test + public void testAddFieldsFromPojoWithAllFieldsAnnotation() { + + PojoWithAllFieldsAnnotation pojo = new PojoWithAllFieldsAnnotation(); + pojo.booleanObject = true; + pojo.booleanPrimitive = false; + pojo.doubleObject = 2.0; + pojo.doublePrimitive = 3.1; + pojo.integerObject = 32; + pojo.integerPrimitive = 64; + pojo.longObject = 1L; + pojo.longPrimitive = 2L; + pojo.time = Instant.now(); + pojo.uuid = "TEST"; + + Point p = Point.measurementByPOJO(PojoWithAllFieldsAnnotation.class).addFieldsFromPOJO(pojo).build(); + + assertThat(p.lineProtocol()).startsWith("mymeasurement"); + assertThat(p.getFields()).doesNotContainKey("staticField"); + Assertions.assertEquals(pojo.booleanObject, p.getFields().get("booleanObject")); + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(pojo.doubleObject, p.getFields().get("doubleObject")); + Assertions.assertEquals(pojo.doublePrimitive, p.getFields().get("doublePrimitive")); + Assertions.assertEquals(pojo.integerObject, p.getFields().get("integerObject")); + Assertions.assertEquals(pojo.integerPrimitive, p.getFields().get("integerPrimitive")); + Assertions.assertEquals(pojo.longObject, p.getFields().get("longObject")); + Assertions.assertEquals(pojo.longPrimitive, p.getFields().get("longPrimitive")); + Assertions.assertEquals(pojo.uuid, p.getTags().get("uuid")); + } + + @Test + public void testAddFieldsFromPojoWithBlankColumnAnnotations() { + PojoWithBlankColumnAnnotations pojo = new PojoWithBlankColumnAnnotations(); + pojo.booleanObject = true; + pojo.booleanPrimitive = false; + pojo.doubleObject = 2.0; + pojo.doublePrimitive = 3.1; + pojo.integerObject = 32; + pojo.integerPrimitive = 64; + pojo.longObject = 1L; + pojo.longPrimitive = 2L; + pojo.time = Instant.now(); + pojo.uuid = "TEST"; + + Point p = Point.measurementByPOJO(PojoWithBlankColumnAnnotations.class).addFieldsFromPOJO(pojo).build(); + + Assertions.assertEquals(pojo.booleanObject, p.getFields().get("booleanObject")); + Assertions.assertEquals(pojo.booleanPrimitive, p.getFields().get("booleanPrimitive")); + Assertions.assertEquals(pojo.doubleObject, p.getFields().get("doubleObject")); + Assertions.assertEquals(pojo.doublePrimitive, p.getFields().get("doublePrimitive")); + Assertions.assertEquals(pojo.integerObject, p.getFields().get("integerObject")); + Assertions.assertEquals(pojo.integerPrimitive, p.getFields().get("integerPrimitive")); + Assertions.assertEquals(pojo.longObject, p.getFields().get("longObject")); + Assertions.assertEquals(pojo.longPrimitive, p.getFields().get("longPrimitive")); + Assertions.assertEquals(pojo.uuid, p.getTags().get("uuid")); + } + + @Test + public void testInheritMeasurement() { + Point expected = Point.measurementByPOJO(SubClassMeasurement.class) + .addField("superClassField", "super") + .addField("subClassField", "sub") + .build(); + SubClassMeasurement scm = new SubClassMeasurement(); + scm.subValue = "sub"; + scm.superValue = "super"; + + Point actual = Point.measurementByPOJO(SubClassMeasurement.class) + .addFieldsFromPOJO(scm) + .build(); + Assert.assertEquals(expected, actual); + } + + @Test + public void testGenericInheritMeasurement() { + Point expected = Point.measurementByPOJO(MyGenericSubMeasurement.class) + .addField("superValue", "super") + .addField("subValue", "sub") + .build(); + MyGenericSubMeasurement scm = new MyGenericSubMeasurement(); + scm.subValue = "sub"; + scm.superValue = "super"; + + Point actual = Point.measurementByPOJO(MyGenericSubMeasurement.class) + .addFieldsFromPOJO(scm) + .build(); + Assert.assertEquals(expected, actual); + } + + static class PojoWithoutAnnotation { + + private String id; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + } + + @Measurement(name = "mymeasurement") + static class PojoWithMeasurement { + + private String id; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + } + + @Measurement(name = "tcmeasurement") + static class TimeColumnPojo { + @Column(name = "booleanPrimitive") + private boolean booleanPrimitive; + + @TimeColumn + @Column(name = "time") + private Instant time; + } + + @Measurement(name = "tcmeasurement") + static class TimeColumnPojoNano { + @Column(name = "booleanPrimitive") + private boolean booleanPrimitive; + + @TimeColumn(timeUnit = TimeUnit.NANOSECONDS) + @Column(name = "time") + private Instant time; + } + + @Measurement(name = "tcmeasurement", allFields = true) + static class TimeColumnPojoSec { + boolean booleanPrimitive; + + @TimeColumn(timeUnit = TimeUnit.SECONDS) + Instant time; + } + + @Measurement(name = "tcmeasurement", allFields = true) + static class BadTimeColumnPojo { + boolean booleanPrimitive; + + @TimeColumn + String time; + } + + @Measurement(name = "mymeasurement") + static class Pojo { + + @Column(name = "booleanPrimitive") + private boolean booleanPrimitive; + + @Column(name = "time") + @TimeColumn + private Instant time; + + @Column(name = "uuid", tag = true) + private String uuid; + + @Column(name = "doubleObject") + private Double doubleObject; + + @Column(name = "longObject") + private Long longObject; + + @Column(name = "integerObject") + private Integer integerObject; + + @Column(name = "doublePrimitive") + private double doublePrimitive; + + @Column(name = "longPrimitive") + private long longPrimitive; + + @Column(name = "integerPrimitive") + private int integerPrimitive; + + @Column(name = "booleanObject") + private Boolean booleanObject; + + + public Instant getTime() { + return time; + } + + public void setTime(Instant time) { + this.time = time; + } + + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } + + public Double getDoubleObject() { + return doubleObject; + } + + public void setDoubleObject(Double doubleObject) { + this.doubleObject = doubleObject; + } + + public Long getLongObject() { + return longObject; + } + + public void setLongObject(Long longObject) { + this.longObject = longObject; + } + + public Integer getIntegerObject() { + return integerObject; + } + + public void setIntegerObject(Integer integerObject) { + this.integerObject = integerObject; + } + + public double getDoublePrimitive() { + return doublePrimitive; + } + + public void setDoublePrimitive(double doublePrimitive) { + this.doublePrimitive = doublePrimitive; + } + + public long getLongPrimitive() { + return longPrimitive; + } + + public void setLongPrimitive(long longPrimitive) { + this.longPrimitive = longPrimitive; + } + + public int getIntegerPrimitive() { + return integerPrimitive; + } + + public void setIntegerPrimitive(int integerPrimitive) { + this.integerPrimitive = integerPrimitive; + } + + public Boolean getBooleanObject() { + return booleanObject; + } + + public void setBooleanObject(Boolean booleanObject) { + this.booleanObject = booleanObject; + } + + public boolean isBooleanPrimitive() { + return booleanPrimitive; + } + + public void setBooleanPrimitive(boolean booleanPrimitive) { + this.booleanPrimitive = booleanPrimitive; + } + + } + + @Measurement(name = "mymeasurement") + static class PojoWithPublicAttributes { + + @Column(name = "booleanPrimitive") + public boolean booleanPrimitive; + + @Column(name = "time") + @TimeColumn + public Instant time; + + @Column(name = "uuid", tag = true) + public String uuid; + + @Column(name = "doubleObject") + public Double doubleObject; + + @Column(name = "longObject") + public Long longObject; + + @Column(name = "integerObject") + public Integer integerObject; + + @Column(name = "doublePrimitive") + public double doublePrimitive; + + @Column(name = "longPrimitive") + public long longPrimitive; + + @Column(name = "integerPrimitive") + public int integerPrimitive; + + @Column(name = "booleanObject") + public Boolean booleanObject; + } + + @Measurement(name = "mymeasurement", allFields = true) + static class PojoWithAllFieldsAnnotation { + public static final String staticField = "static"; + + public boolean booleanPrimitive; + + @TimeColumn + public Instant time; + + @Column(tag = true) + public String uuid; + + public Double doubleObject; + public Long longObject; + public Integer integerObject; + public double doublePrimitive; + public long longPrimitive; + public int integerPrimitive; + public Boolean booleanObject; + } + + @Measurement(name = "mymeasurement") + static class PojoWithBlankColumnAnnotations { + + @Column + public boolean booleanPrimitive; + + @Column + @TimeColumn + public Instant time; + + @Column(tag = true) + public String uuid; + + @Column + public Double doubleObject; + + @Column + public Long longObject; + + @Column + public Integer integerObject; + + @Column + public double doublePrimitive; + + @Column + public long longPrimitive; + + @Column + public int integerPrimitive; + + @Column + public Boolean booleanObject; + } + + @Measurement(name = "SuperMeasuremet") + static class SuperMeasurement { + @Column(name = "superClassField") + String superValue; + } + + @Measurement(name = "SubMeasurement") + static class SubClassMeasurement extends SuperMeasurement { + @Column(name = "subClassField") + String subValue; + } + + @Measurement(name = "SuperMeasurement") + static class MyGenericSuperMeasurement { + + @Column(name = "superValue") + protected T superValue; + } + + @Measurement(name = "SubMeasurement") + static class MyGenericSubMeasurement extends MyGenericSuperMeasurement { + + @Column(name = "subValue") + protected String subValue; + } + + @Measurement(name = "PojoNumberPrimitiveTypes") + static class PojoNumberPrimitiveTypes + { + @Column(name = "shortType") + public short shortType; + + @Column(name = "intType") + public int intType; + + @Column(name = "longType") + public long longType; + + @Column(name = "floatType") + public float floatType; + + @Column(name = "doubleType") + public double doubleType; + } } diff --git a/src/test/java/org/influxdb/dto/QueryTest.java b/src/test/java/org/influxdb/dto/QueryTest.java index 5231faa9b..faf4e11ea 100644 --- a/src/test/java/org/influxdb/dto/QueryTest.java +++ b/src/test/java/org/influxdb/dto/QueryTest.java @@ -1,8 +1,11 @@ package org.influxdb.dto; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertNull; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; @@ -15,6 +18,7 @@ * @author jord [at] moz.com * */ +@RunWith(JUnitPlatform.class) public class QueryTest { /** @@ -87,4 +91,17 @@ public void testGetCommandWithUrlEncoded() throws UnsupportedEncodingException { private static String decode(String str) throws UnsupportedEncodingException { return URLDecoder.decode(str, StandardCharsets.UTF_8.toString()); } + + /** + * Test that equals does what it is supposed to do. + */ + @Test + public void testDatabaseName() { + String command = "go"; + + Query query = new Query(command); + + assertNull(query.getDatabase()); + } + } diff --git a/src/test/java/org/influxdb/impl/BatchProcessorTest.java b/src/test/java/org/influxdb/impl/BatchProcessorTest.java index b43a6a322..df28da328 100644 --- a/src/test/java/org/influxdb/impl/BatchProcessorTest.java +++ b/src/test/java/org/influxdb/impl/BatchProcessorTest.java @@ -1,23 +1,38 @@ package org.influxdb.impl; -import static org.hamcrest.CoreMatchers.hasItems; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; import static org.mockito.hamcrest.MockitoHamcrest.argThat; import java.io.IOException; +import java.lang.reflect.Field; +import java.util.Collection; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import org.hamcrest.Matchers; +import org.influxdb.BatchOptions; import org.influxdb.InfluxDB; +import org.influxdb.TestUtils; import org.influxdb.dto.BatchPoints; import org.influxdb.dto.Point; -import org.junit.Test; +import org.influxdb.dto.Query; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +@RunWith(JUnitPlatform.class) public class BatchProcessorTest { @Test @@ -60,7 +75,7 @@ public void testSchedulerExceptionHandlingCallback() throws InterruptedException batchProcessor.put(batchEntry1); Thread.sleep(200); // wait for scheduler - verify(mockHandler, times(1)).accept(argThat(hasItems(point, point)), any(RuntimeException.class)); + verify(mockHandler, times(1)).accept(argThat(Matchers.hasItems(point, point)), any(RuntimeException.class)); } @Test @@ -107,24 +122,133 @@ public void testFlushWritesBufferedPointsAndDoesNotShutdownScheduler() throws In verifyNoMoreInteractions(mockInfluxDB); } - @Test(expected = IllegalArgumentException.class) + @Test public void testActionsIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); - BatchProcessor.builder(mockInfluxDB).actions(0) - .interval(1, TimeUnit.NANOSECONDS).build(); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(0) + .interval(1, TimeUnit.NANOSECONDS).build(); + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testIntervalIsZero() throws InterruptedException, IOException { InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); - BatchProcessor.builder(mockInfluxDB).actions(1) - .interval(0, TimeUnit.NANOSECONDS).build(); + Assertions.assertThrows(IllegalArgumentException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(1) + .interval(0, TimeUnit.NANOSECONDS).build(); + }); } - @Test(expected = NullPointerException.class) + @Test public void testInfluxDBIsNull() throws InterruptedException, IOException { InfluxDB mockInfluxDB = null; - BatchProcessor.builder(mockInfluxDB).actions(1) - .interval(1, TimeUnit.NANOSECONDS).build(); + Assertions.assertThrows(NullPointerException.class, () -> { + BatchProcessor.builder(mockInfluxDB).actions(1) + .interval(1, TimeUnit.NANOSECONDS).build(); + }); } + + @Test + public void testConsistencyLevelNull() throws InterruptedException, IOException { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).build(); + assertNull(batchProcessor.getConsistencyLevel()); + } + + @Test + public void testConsistencyLevelUpdated() throws InterruptedException, IOException { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).consistencyLevel(InfluxDB.ConsistencyLevel.ANY).build(); + assertThat(batchProcessor.getConsistencyLevel(), is(equalTo(InfluxDB.ConsistencyLevel.ANY))); + } + + @Test + public void testDropOnActionQueueExhaustionDefault() { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).build(); + Assertions.assertEquals(false, batchProcessor.isDropActionsOnQueueExhaustion()); + } + + + @Test + public void testDropOnActionQueueExhaustionUpdated() { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).dropActionsOnQueueExhaustion(true).build(); + Assertions.assertEquals(true, batchProcessor.isDropActionsOnQueueExhaustion()); + } + + @Test + @SuppressWarnings("unchecked") + public void precision() throws Exception { + String dbName = "write_unittest_" + System.currentTimeMillis(); + String rpName = "somePolicy"; + BatchWriter batchWriter; + try (InfluxDB influxDB = TestUtils.connectToInfluxDB()) { + try { + influxDB.createDatabase(dbName); + influxDB.query(new Query("CREATE RETENTION POLICY " + rpName + " ON " + dbName + " DURATION 30h REPLICATION 2 DEFAULT")); + + influxDB.enableBatch(BatchOptions.DEFAULTS.actions(2000).precision(TimeUnit.SECONDS).flushDuration(100)); + + BatchProcessor batchProcessor = getPrivateField(influxDB, "batchProcessor"); + BatchWriter originalBatchWriter = getPrivateField(batchProcessor, "batchWriter"); + batchWriter = Mockito.spy(originalBatchWriter); + setPrivateField(batchProcessor, "batchWriter", batchWriter); + + Point point1 = Point.measurement("cpu") + .time(System.currentTimeMillis() /1000, TimeUnit.SECONDS) + .addField("idle", 90L) + .addField("user", 9L) + .addField("system", 1L) + .build(); + + influxDB.write(dbName, rpName, point1); + + } finally { + influxDB.deleteDatabase(dbName); + } + } + + ArgumentCaptor> argument = ArgumentCaptor.forClass(Collection.class); + + verify(batchWriter, atLeastOnce()).write(argument.capture()); + + for (Collection list : argument.getAllValues()) { + for (BatchPoints p : list) { + assertTrue(p.toString().contains("precision=SECONDS")); + assertFalse(p.toString().contains("precision=NANOSECONDS")); + } + } + } + + @Test + @SuppressWarnings("unchecked") + public void randomSupplier() { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BatchProcessor batchProcessor = BatchProcessor.builder(mockInfluxDB).actions(Integer.MAX_VALUE) + .interval(1, TimeUnit.NANOSECONDS).build(); + + Double random = batchProcessor.randomSupplier.get(); + assertTrue(random >= 0); + assertTrue(random < 1); + Assertions.assertNotEquals(random, batchProcessor.randomSupplier.get()); + } + + @SuppressWarnings("unchecked") + public static T getPrivateField(final Object obj, final String name) throws Exception { + Field field = obj.getClass().getDeclaredField(name); + field.setAccessible(true); + return (T) field.get(obj); + } + + public static void setPrivateField(final Object obj, final String name, final Object value) throws Exception { + Field field = obj.getClass().getDeclaredField(name); + field.setAccessible(true); + field.set(obj, value); + } } diff --git a/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java new file mode 100644 index 000000000..2d8f6acdd --- /dev/null +++ b/src/test/java/org/influxdb/impl/ChunkingExceptionTest.java @@ -0,0 +1,110 @@ +package org.influxdb.impl; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.EOFException; +import java.io.IOException; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import org.influxdb.InfluxDB; +import org.influxdb.TestUtils; +import org.influxdb.dto.Query; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; + +import com.squareup.moshi.JsonAdapter; +import com.squareup.moshi.JsonReader; + +import okhttp3.OkHttpClient; +import okhttp3.ResponseBody; +import okio.Buffer; +import retrofit2.Call; +import retrofit2.Callback; +import retrofit2.Response; + +@RunWith(JUnitPlatform.class) +public class ChunkingExceptionTest { + + @Test + public void testChunkingIOException() throws IOException, InterruptedException { + + testChunkingException(new IOException(), "java.io.IOException", null); + } + + @Test + public void testChunkingEOFException() throws IOException, InterruptedException { + + testChunkingException(new EOFException(), "DONE", null); + } + + @Test + public void testChunkingIOExceptionOnFailure() throws IOException, InterruptedException { + + testChunkingException(new IOException(), "java.io.IOException", Assertions::assertNotNull); + } + + @Test + public void testChunkingEOFExceptionOnFailure() throws IOException, InterruptedException { + + testChunkingException(new EOFException(), "DONE", Assertions::assertNotNull); + } + + public void testChunkingException(Exception ex, String message, Consumer onFailure) throws IOException, InterruptedException { + + InfluxDBService influxDBService = mock(InfluxDBService.class); + JsonAdapter adapter = mock(JsonAdapter.class); + Call call = mock(Call.class); + ResponseBody responseBody = mock(ResponseBody.class); + + when(influxDBService.query(any(String.class), any(String.class), anyInt())).thenReturn(call); + when(responseBody.source()).thenReturn(new Buffer()); + doThrow(ex).when(adapter).fromJson(any(JsonReader.class)); + + String url = "http://" + TestUtils.getInfluxIP() + ":" + TestUtils.getInfluxPORT(true); + try (InfluxDB influxDB = new InfluxDBImpl(url, "admin", "admin", new OkHttpClient.Builder(), influxDBService, adapter) { + @Override + public String version() { + return "9.99"; + } + }) { + + String dbName = "write_unittest_" + System.currentTimeMillis(); + final BlockingQueue queue = new LinkedBlockingQueue<>(); + Query query = new Query("SELECT * FROM disk", dbName); + + if (onFailure == null) { + influxDB.query(query, 2, queue::add); + } else { + //test with onComplete and onFailure consumer + influxDB.query(query, 2, (cancellable, result) -> queue.add(result), + //on complete + () -> { }, + onFailure + ); + } + + ArgumentCaptor> argumentCaptor = ArgumentCaptor.forClass(Callback.class); + verify(call).enqueue(argumentCaptor.capture()); + Callback callback = argumentCaptor.getValue(); + + callback.onResponse(call, Response.success(responseBody)); + + QueryResult result = queue.poll(20, TimeUnit.SECONDS); + Assertions.assertNotNull(result); + Assertions.assertEquals(message, result.getError()); + } + } + +} diff --git a/src/test/java/org/influxdb/impl/InfluxDBAndroidDesugaredRecordResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBAndroidDesugaredRecordResultMapperTest.java new file mode 100644 index 000000000..f1dc21dce --- /dev/null +++ b/src/test/java/org/influxdb/impl/InfluxDBAndroidDesugaredRecordResultMapperTest.java @@ -0,0 +1,819 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.impl; + +import com.android.tools.r8.RecordTag; +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.dto.QueryResult; +import org.junit.Assert; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * Test measurement classes simulate Android desugared records. + * + * @author Eran Leshem + */ +@SuppressWarnings({"removal", "deprecation"}) +@RunWith(JUnitPlatform.class) +public class InfluxDBAndroidDesugaredRecordResultMapperTest { + + private final InfluxDBResultMapper mapper = new InfluxDBResultMapper(); + + @Test + public void testToRecord_HappyPath() { + // Given... + List columnList = Arrays.asList("time", "uuid"); + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setName("CustomMeasurement"); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertEquals(1, myList.size(), "there must be one entry in the result list"); + } + + @Test + public void testThrowExceptionIfMissingAnnotation() { + Assertions.assertThrows(IllegalArgumentException.class, + () -> mapper.throwExceptionIfMissingAnnotation(String.class)); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultHasError() { + QueryResult queryResult = new QueryResult(); + queryResult.setError("main queryresult error"); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> mapper.throwExceptionIfResultWithError(queryResult)); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { + QueryResult.Result seriesResult = new QueryResult.Result(); + seriesResult.setError("series error"); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(seriesResult)); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> mapper.throwExceptionIfResultWithError(queryResult)); + } + + @Test + public void testGetMeasurementName_testStateMeasurement() { + Assertions.assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); + } + + @Test + public void testParseSeriesAs_testTwoValidSeries() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time", "uuid"); + + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + List secondSeriesResult = Arrays.asList(Instant.now().plusSeconds(1).toEpochMilli(), + UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult, secondSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 2, "there must be two series in the result list"); + + Assertions.assertEquals(firstSeriesResult.get(0), result.get(0).time().toEpochMilli(), + "Field 'time' (1st series) is not valid"); + Assertions.assertEquals(firstSeriesResult.get(1), result.get(0).uuid(), "Field 'uuid' (1st series) is not valid"); + + Assertions.assertEquals(secondSeriesResult.get(0), result.get(1).time().toEpochMilli(), + "Field 'time' (2nd series) is not valid"); + Assertions.assertEquals(secondSeriesResult.get(1), result.get(1).uuid(), "Field 'uuid' (2nd series) is not valid"); + } + + @Test + public void testParseSeriesAs_testNonNullAndValidValues() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurementWithPrimitives.class); + + List columnList = Arrays.asList("time", "uuid", + "doubleObject", "longObject", "integerObject", + "doublePrimitive", "longPrimitive", "integerPrimitive", + "booleanObject", "booleanPrimitive"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + // InfluxDB client returns any number as Double. + // See https://github.com/influxdata/influxdb-java/issues/153#issuecomment-259681987 + // for more information. + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + String uuidAsString = UUID.randomUUID().toString(); + List seriesResult = Arrays.asList(now, uuidAsString, + new Double("1.01"), new Double("2"), new Double("3"), + new Double("1.01"), new Double("4"), new Double("5"), + "false", "true"); + series.setValues(Arrays.asList(seriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurementWithPrimitives.class, result); + + //Then... + MyCustomMeasurementWithPrimitives myObject = result.get(0); + Assertions.assertEquals(now.longValue(), myObject.time().toEpochMilli(), "field 'time' does not match"); + Assertions.assertEquals(uuidAsString, myObject.uuid(), "field 'uuid' does not match"); + + Assertions.assertEquals(asDouble(seriesResult.get(2)), myObject.doubleObject(), + "field 'doubleObject' does not match"); + Assertions.assertEquals(Long.valueOf(asDouble(seriesResult.get(3)).longValue()), myObject.longObject(), + "field 'longObject' does not match"); + Assertions.assertEquals(Integer.valueOf(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject(), + "field 'integerObject' does not match"); + + Assertions.assertTrue( + Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive()) == 0, + "field 'doublePrimitive' does not match"); + + Assertions.assertTrue(asDouble(seriesResult.get(6)).longValue() == myObject.longPrimitive(), + "field 'longPrimitive' does not match"); + + Assertions.assertTrue(asDouble(seriesResult.get(7)).intValue() == myObject.integerPrimitive(), + "field 'integerPrimitive' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject(), + "field 'booleanObject' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive(), + "field 'booleanPrimitive' does not match"); + } + + private static Double asDouble(Object obj) { + return (Double) obj; + } + + @Test + public void testFieldValueModified_DateAsISO8601() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList("2017-06-19T09:29:45.655123Z"); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testFieldValueModified_DateAsInteger() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList(1_000); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testUnsupportedField() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyRecordWithUnsupportedField.class); + + List columnList = Arrays.asList("bar"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList("content representing a Date"); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + Assertions.assertThrows(InfluxDBMapperException.class, + () -> mapper.parseSeriesAs(series, MyRecordWithUnsupportedField.class, result)); + } + + /** + * for more information. + */ + @Test + public void testToRecord_SeriesFromQueryResultIsNull() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(null); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertTrue(myList.isEmpty(), "there must NO entry in the result list"); + } + + @Test + public void testToRecord_QueryResultCreatedByGroupByClause() { + // Given... + InfluxDBResultMapper.cacheRecordClass(GroupByCarrierDeviceOS.class); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + // When the "GROUP BY" clause is used, "tags" are returned as Map + Map firstSeriesTagMap = new HashMap<>(2); + firstSeriesTagMap.put("CARRIER", "000/00"); + firstSeriesTagMap.put("DEVICE_OS_VERSION", "4.4.2"); + + Map secondSeriesTagMap = new HashMap<>(2); + secondSeriesTagMap.put("CARRIER", "000/01"); + secondSeriesTagMap.put("DEVICE_OS_VERSION", "9.3.5"); + + QueryResult.Series firstSeries = new QueryResult.Series(); + List columnList = Arrays.asList("time", "median", "min", "max"); + firstSeries.setColumns(columnList); + List firstSeriesResult = Arrays.asList(now, new Double("233.8"), new Double("0.0"), + new Double("3090744.0")); + firstSeries.setValues(Arrays.asList(firstSeriesResult)); + firstSeries.setTags(firstSeriesTagMap); + firstSeries.setName("tb_network"); + + QueryResult.Series secondSeries = new QueryResult.Series(); + secondSeries.setColumns(columnList); + List secondSeriesResult = Arrays.asList(now, new Double("552.0"), new Double("135.0"), + new Double("267705.0")); + secondSeries.setValues(Arrays.asList(secondSeriesResult)); + secondSeries.setTags(secondSeriesTagMap); + secondSeries.setName("tb_network"); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(firstSeries, secondSeries)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, GroupByCarrierDeviceOS.class); + + // Then... + GroupByCarrierDeviceOS firstGroupByEntry = myList.get(0); + Assertions.assertEquals("000/00", firstGroupByEntry.carrier(), "field 'carrier' does not match"); + Assertions.assertEquals("4.4.2", firstGroupByEntry.deviceOsVersion(), "field 'deviceOsVersion' does not match"); + + GroupByCarrierDeviceOS secondGroupByEntry = myList.get(1); + Assertions.assertEquals("000/01", secondGroupByEntry.carrier(), "field 'carrier' does not match"); + Assertions.assertEquals("9.3.5", secondGroupByEntry.deviceOsVersion(), "field 'deviceOsVersion' does not match"); + } + + @Test + public void testToRecord_ticket363() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList("2000-01-01T00:00:00.000000001Z"); + series.setValues(Arrays.asList(firstSeriesResult)); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elemets"); + Assertions.assertEquals(1, result.get(0).time().getNano(), "incorrect value for the nanoseconds field"); + } + + @Test + void testToRecord_Precision() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("CustomMeasurement"); + List columnList = Arrays.asList("time"); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList(1_500_000L); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List result = mapper.toPOJO(queryResult, MyCustomMeasurement.class, TimeUnit.SECONDS); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elements"); + Assertions.assertEquals(1_500_000_000L, result.get(0).time().toEpochMilli(), + "incorrect value for the millis field"); + } + + @Test + void testToRecord_SetMeasureName() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("MySeriesName"); + List columnList = Arrays.asList("uuid"); + series.setColumns(columnList); + List firstSeriesResult = Collections.singletonList(UUID.randomUUID().toString()); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List result = + mapper.toPOJO(queryResult, MyCustomMeasurement.class, "MySeriesName"); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testToRecord_HasTimeColumn() { + // Given... + InfluxDBResultMapper.cacheRecordClass(HasTimeColumnMeasurement.class); + + List columnList = Arrays.asList("time"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List> valuesList = Arrays.asList( + Arrays.asList("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + Arrays.asList("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + Arrays.asList("2000-01-01T00:00:00-00:00"), + Arrays.asList("2000-01-02T00:00:00+00:00") + ); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, HasTimeColumnMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time().equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time().equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time().equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time().equals(Instant.parse("2000-01-02T00:00:00Z"))); + + } + + @Test + public void testToRecord_ticket573() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List> valuesList = Arrays.asList( + Arrays.asList("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + Arrays.asList("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + Arrays.asList("2000-01-01T00:00:00-00:00"), + Arrays.asList("2000-01-02T00:00:00+00:00") + ); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time().equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time().equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time().equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time().equals(Instant.parse("2000-01-02T00:00:00Z"))); + } + + @Test + public void testMultipleConstructors() { + // Given... + InfluxDBResultMapper.cacheRecordClass(MultipleConstructors.class); + + List columnList = Arrays.asList("i", "s"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + List firstSeriesResult = Arrays.asList(9.0, "str"); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MultipleConstructors.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + + Assert.assertEquals(9, result.get(0).i()); + Assert.assertEquals("str", result.get(0).s()); + } + + @Test + public void testConflictingConstructors() { + Assert.assertThrows(InfluxDBMapperException.class, + () -> InfluxDBResultMapper.cacheRecordClass(ConflictingConstructors.class)); + } + + @Measurement(name = "HasTimeColumnMeasurement") + static final class HasTimeColumnMeasurement extends RecordTag { + @TimeColumn + private final Instant time; + private final Integer value; + + HasTimeColumnMeasurement(Instant time, Integer value) { + this.time = time; + this.value = value; + } + + public Instant time() { + return time; + } + + public Integer value() { + return value; + } + } + + @Measurement(name = "CustomMeasurement") + static final class MyCustomMeasurement extends RecordTag { + private final Instant time; + private final String uuid; + private final Double doubleObject; + private final Long longObject; + private final Integer integerObject; + private final Boolean booleanObject; + + @SuppressWarnings("unused") + private final String nonColumn1; + + @SuppressWarnings("unused") + private final Random rnd; + + MyCustomMeasurement( + Instant time, + String uuid, + Double doubleObject, + Long longObject, + Integer integerObject, + Boolean booleanObject, + + @SuppressWarnings("unused") + String nonColumn1, + + @SuppressWarnings("unused") + Random rnd) { + this.time = time; + this.uuid = uuid; + this.doubleObject = doubleObject; + this.longObject = longObject; + this.integerObject = integerObject; + this.booleanObject = booleanObject; + this.nonColumn1 = nonColumn1; + this.rnd = rnd; + } + + public Instant time() { + return time; + } + + public String uuid() { + return uuid; + } + + public Double doubleObject() { + return doubleObject; + } + + public Long longObject() { + return longObject; + } + + public Integer integerObject() { + return integerObject; + } + + public Boolean booleanObject() { + return booleanObject; + } + + @SuppressWarnings("unused") + public String nonColumn1() { + return nonColumn1; + } + + @SuppressWarnings("unused") + public Random rnd() { + return rnd; + } + } + + @Measurement(name = "CustomMeasurement") + static final class MyCustomMeasurementWithPrimitives extends RecordTag { + private final Instant time; + private final String uuid; + private final Double doubleObject; + private final Long longObject; + private final Integer integerObject; + private final double doublePrimitive; + private final long longPrimitive; + private final int integerPrimitive; + private final Boolean booleanObject; + private final boolean booleanPrimitive; + + @SuppressWarnings("unused") + private final String nonColumn1; + + @SuppressWarnings("unused") + private final Random rnd; + + MyCustomMeasurementWithPrimitives( + Instant time, + String uuid, + Double doubleObject, + Long longObject, + Integer integerObject, + double doublePrimitive, + long longPrimitive, + int integerPrimitive, + Boolean booleanObject, + boolean booleanPrimitive, + + @SuppressWarnings("unused") + String nonColumn1, + + @SuppressWarnings("unused") + Random rnd) { + this.time = time; + this.uuid = uuid; + this.doubleObject = doubleObject; + this.longObject = longObject; + this.integerObject = integerObject; + this.doublePrimitive = doublePrimitive; + this.longPrimitive = longPrimitive; + this.integerPrimitive = integerPrimitive; + this.booleanObject = booleanObject; + this.booleanPrimitive = booleanPrimitive; + this.nonColumn1 = nonColumn1; + this.rnd = rnd; + } + + public Instant time() { + return time; + } + + public String uuid() { + return uuid; + } + + public Double doubleObject() { + return doubleObject; + } + + public Long longObject() { + return longObject; + } + + public Integer integerObject() { + return integerObject; + } + + public double doublePrimitive() { + return doublePrimitive; + } + + public long longPrimitive() { + return longPrimitive; + } + + public int integerPrimitive() { + return integerPrimitive; + } + + public Boolean booleanObject() { + return booleanObject; + } + + public boolean booleanPrimitive() { + return booleanPrimitive; + } + + @SuppressWarnings("unused") + public String nonColumn1() { + return nonColumn1; + } + + @SuppressWarnings("unused") + public Random rnd() { + return rnd; + } + } + + @Measurement(name = "foo") + static final class MyRecordWithUnsupportedField extends RecordTag { + @Column(name = "bar") + private final Date myDate; + + MyRecordWithUnsupportedField(Date myDate) { + this.myDate = myDate; + } + + public Date myDate() { + return myDate; + } + } + + /** + * Class created based on example from this issue + */ + @Measurement(name = "tb_network") + static final class GroupByCarrierDeviceOS extends RecordTag { + private final Instant time; + + @Column(name = "CARRIER", tag = true) + private final String carrier; + + @Column(name = "DEVICE_OS_VERSION", tag = true) + private final String deviceOsVersion; + + private final Double median; + private final Double min; + private final Double max; + + GroupByCarrierDeviceOS( + Instant time, + String carrier, + String deviceOsVersion, + Double median, + Double min, + Double max) { + this.time = time; + this.carrier = carrier; + this.deviceOsVersion = deviceOsVersion; + this.median = median; + this.min = min; + this.max = max; + } + + public Instant time() { + return time; + } + + public String carrier() { + return carrier; + } + + public String deviceOsVersion() { + return deviceOsVersion; + } + + public Double median() { + return median; + } + + public Double min() { + return min; + } + + public Double max() { + return max; + } + } + + static final class MultipleConstructors extends RecordTag { + private final int i; + private final String s; + + MultipleConstructors(int i, String s) { + this.i = i; + this.s = s; + } + + MultipleConstructors(String i, String s) { + this(Integer.parseInt(i), s); + } + + MultipleConstructors(int i, String s, double d) { + this(i, s); + } + + int i() { + return i; + } + + String s() { + return s; + } + } + + + static final class ConflictingConstructors extends RecordTag { + private final int i; + private final String s; + + private ConflictingConstructors(int i, String s) { + this.i = i; + this.s = s; + } + + private ConflictingConstructors(String s, int i) { + this(i, s); + } + + public int i() { + return i; + } + + public String s() { + return s; + } + } +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/impl/InfluxDBImplTest.java b/src/test/java/org/influxdb/impl/InfluxDBImplTest.java new file mode 100644 index 000000000..9f3804c9c --- /dev/null +++ b/src/test/java/org/influxdb/impl/InfluxDBImplTest.java @@ -0,0 +1,51 @@ +package org.influxdb.impl; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.lang.reflect.Field; + +import org.influxdb.InfluxDB; +import org.influxdb.TestUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import okhttp3.OkHttpClient; + +@RunWith(JUnitPlatform.class) +public class InfluxDBImplTest { + + private InfluxDB influxDB; + + @BeforeEach + public void setUp() throws Exception { + this.influxDB = TestUtils.connectToInfluxDB(); + } + + @AfterEach + public void cleanup() { + influxDB.close(); + } + + @Test + public void closeOkHttpClient() throws Exception { + OkHttpClient client = getPrivateField(influxDB, "client"); + assertFalse(client.dispatcher().executorService().isShutdown()); + assertFalse(client.connectionPool().connectionCount() == 0); + influxDB.close(); + assertTrue(client.dispatcher().executorService().isShutdown()); + assertTrue(client.connectionPool().connectionCount() == 0); + } + + @SuppressWarnings("unchecked") + static T getPrivateField(final Object obj, final String name) + throws Exception { + Field field = obj.getClass().getDeclaredField(name); + field.setAccessible(true); + return (T) field.get(obj); + } + +} diff --git a/src/test/java/org/influxdb/impl/InfluxDBMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBMapperTest.java new file mode 100644 index 000000000..3d5bd86be --- /dev/null +++ b/src/test/java/org/influxdb/impl/InfluxDBMapperTest.java @@ -0,0 +1,350 @@ +package org.influxdb.impl; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.math.BigDecimal; +import java.time.Instant; +import java.util.List; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBMapperException; +import org.influxdb.TestUtils; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.dto.Query; +import org.junit.Assert; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +@RunWith(JUnitPlatform.class) +public class InfluxDBMapperTest { + + private InfluxDB influxDB; + private InfluxDBMapper influxDBMapper; + static final String UDP_DATABASE = "udp"; + + @BeforeEach + public void setUp() throws Exception { + this.influxDB = TestUtils.connectToInfluxDB(); + this.influxDB.query(new Query("CREATE DATABASE " + UDP_DATABASE, UDP_DATABASE)); + this.influxDB.setDatabase(UDP_DATABASE); + this.influxDBMapper = new InfluxDBMapper(influxDB); + } + + @Test + public void testSave() { + ServerMeasure serverMeasure = createMeasure(); + influxDBMapper.save(serverMeasure); + + ServerMeasure persistedMeasure = influxDBMapper.query(ServerMeasure.class).get(0); + Assert.assertEquals(serverMeasure.getName(), persistedMeasure.getName()); + Assert.assertEquals(serverMeasure.getCpu(), persistedMeasure.getCpu(), 0); + Assert.assertEquals(serverMeasure.isHealthy(), persistedMeasure.isHealthy()); + Assert.assertEquals(serverMeasure.getUptime(), persistedMeasure.getUptime()); + Assert.assertEquals(serverMeasure.getIp(),persistedMeasure.getIp()); + Assert.assertEquals( + serverMeasure.getMemoryUtilization(), persistedMeasure.getMemoryUtilization()); + } + + @Test + public void testQuery() { + ServerMeasure serverMeasure = createMeasure(); + influxDBMapper.save(serverMeasure); + + List persistedMeasures = influxDBMapper.query(new Query("SELECT * FROM server_measure",UDP_DATABASE),ServerMeasure.class); + Assert.assertTrue(persistedMeasures.size()>0); + } + + @Test + public void testQueryWhenCalledWithClassOnly() { + ServerMeasure serverMeasure = createMeasure(); + influxDBMapper.save(serverMeasure); + + List persistedMeasures = influxDBMapper.query(ServerMeasure.class); + Assert.assertTrue(persistedMeasures.size()>0); + } + + @Test + public void testQueryWhenCalledWithQuery_Class_MeasurementName() { + ServerMeasure serverMeasure = createMeasure(); + influxDBMapper.save(serverMeasure); + + List persistedMeasures = influxDBMapper.query(new Query("SELECT * FROM server_measure",UDP_DATABASE), NonAnnotatedServerMeasure.class, "server_measure"); + Assert.assertTrue(persistedMeasures.size()>0); + } + + @Test + public void testIllegalField() { + InvalidMeasure invalidMeasure = new InvalidMeasure(); + invalidMeasure.setVal(new BigDecimal("2.3")); + assertThrows( + InfluxDBMapperException.class, + () -> influxDBMapper.save(invalidMeasure), + "Non supported field"); + } + + @Test + public void testNoDatabaseSpecified() { + NoDatabaseMeasure noDatabaseMeasure = new NoDatabaseMeasure(); + noDatabaseMeasure.setField(Integer.valueOf(12)); + assertThrows( + IllegalArgumentException.class, + () -> influxDBMapper.query(NoDatabaseMeasure.class), + "Should specify database for this query" + ); + } + + @Test + public void testNonInstantTime() { + NonInstantTime nonInstantTime = new NonInstantTime(); + nonInstantTime.setTime(1234566L); + assertThrows( + InfluxDBMapperException.class, + () -> influxDBMapper.save(nonInstantTime), + "time should be of type Instant" + ); + } + + @Test + public void testInstantOnTime() { + ServerMeasure serverMeasure = createMeasure(); + Instant instant = Instant.ofEpochMilli(System.currentTimeMillis()); + serverMeasure.setTime(instant); + influxDBMapper.save(serverMeasure); + ServerMeasure persistedMeasure = influxDBMapper.query(ServerMeasure.class).get(0); + Assert.assertEquals(instant,persistedMeasure.getTime()); + } + + + @AfterEach + public void cleanUp() throws Exception { + influxDB.query(new Query("DROP DATABASE udp", UDP_DATABASE)); + } + + @Measurement(name = "server_measure", database = UDP_DATABASE) + static class ServerMeasure { + + /** Check the instant conversions */ + @Column(name = "time") + @TimeColumn + private Instant time; + + @Column(name = "name", tag = true) + private String name; + + @Column(name = "cpu") + private double cpu; + + @Column(name = "healthy") + private boolean healthy; + + @Column(name = "min") + private long uptime; + + @Column(name = "memory_utilization") + private Double memoryUtilization; + + @Column(name = "ip") + private String ip; + + public Instant getTime() { + return time; + } + + public void setTime(Instant time) { + this.time = time; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public double getCpu() { + return cpu; + } + + public void setCpu(double cpu) { + this.cpu = cpu; + } + + public boolean isHealthy() { + return healthy; + } + + public void setHealthy(boolean healthy) { + this.healthy = healthy; + } + + public long getUptime() { + return uptime; + } + + public void setUptime(long uptime) { + this.uptime = uptime; + } + + public Double getMemoryUtilization() { + return memoryUtilization; + } + + public void setMemoryUtilization(Double memoryUtilization) { + this.memoryUtilization = memoryUtilization; + } + + public String getIp() { + return ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + } + + static class NonAnnotatedServerMeasure { + + /** Check the instant conversions */ + @Column(name = "time") + private Instant time; + + @Column(name = "name", tag = true) + private String name; + + @Column(name = "cpu") + private double cpu; + + @Column(name = "healthy") + private boolean healthy; + + @Column(name = "min") + private long uptime; + + @Column(name = "memory_utilization") + private Double memoryUtilization; + + @Column(name = "ip") + private String ip; + + public Instant getTime() { + return time; + } + + public void setTime(Instant time) { + this.time = time; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public double getCpu() { + return cpu; + } + + public void setCpu(double cpu) { + this.cpu = cpu; + } + + public boolean isHealthy() { + return healthy; + } + + public void setHealthy(boolean healthy) { + this.healthy = healthy; + } + + public long getUptime() { + return uptime; + } + + public void setUptime(long uptime) { + this.uptime = uptime; + } + + public Double getMemoryUtilization() { + return memoryUtilization; + } + + public void setMemoryUtilization(Double memoryUtilization) { + this.memoryUtilization = memoryUtilization; + } + + public String getIp() { + return ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + } + + @Measurement(name = "invalid_measure", database = UDP_DATABASE) + static class InvalidMeasure { + + /** Check the instant conversions */ + @Column(name = "illegal_val") + private BigDecimal val; + + public BigDecimal getVal() { + return val; + } + + public void setVal(BigDecimal val) { + this.val = val; + } + } + + @Measurement(name = "no_database_measure") + static class NoDatabaseMeasure { + + @Column(name = "field") + private Integer field; + + public Integer getField() { + return field; + } + + public void setField(Integer field) { + this.field = field; + } + } + + @Measurement(name = "non_instant_time") + static class NonInstantTime { + + @Column(name = "time") + @TimeColumn + private long time; + + public long getTime() { + return time; + } + + public void setTime(long time) { + this.time = time; + } + } + + private ServerMeasure createMeasure() { + ServerMeasure serverMeasure = new ServerMeasure(); + serverMeasure.setName("maverick"); + serverMeasure.setCpu(4.3d); + serverMeasure.setHealthy(true); + serverMeasure.setUptime(1234L); + serverMeasure.setMemoryUtilization(new Double(34.5)); + serverMeasure.setIp("19.087.4.5"); + return serverMeasure; + } + +} diff --git a/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java new file mode 100644 index 000000000..959678216 --- /dev/null +++ b/src/test/java/org/influxdb/impl/InfluxDBResultMapperTest.java @@ -0,0 +1,733 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2017 azeti Networks AG () + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, publish, distribute, + * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or + * substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package org.influxdb.impl; + +import org.influxdb.InfluxDBMapperException; +import org.influxdb.annotation.Column; +import org.influxdb.annotation.Exclude; +import org.influxdb.annotation.Measurement; +import org.influxdb.annotation.TimeColumn; +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * @author fmachado + */ +@RunWith(JUnitPlatform.class) +public class InfluxDBResultMapperTest { + + InfluxDBResultMapper mapper = new InfluxDBResultMapper(); + + @Test + public void testToPOJO_HappyPath() { + // Given... + List columnList = Arrays.asList("time", "uuid"); + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setName("CustomMeasurement"); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertEquals(1, myList.size(), "there must be one entry in the result list"); + + //When... + List myList1 = mapper.toPOJO(queryResult, MyAllFieldsCustomMeasurement.class); + + // Then... + Assertions.assertEquals(1, myList1.size(), "there must be one entry in the result list"); + } + + @Test + public void testThrowExceptionIfMissingAnnotation() { + Assertions.assertThrows(IllegalArgumentException.class, () -> { + mapper.throwExceptionIfMissingAnnotation(String.class); + }); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultHasError() { + QueryResult queryResult = new QueryResult(); + queryResult.setError("main queryresult error"); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.throwExceptionIfResultWithError(queryResult); + }); + } + + @Test + public void testThrowExceptionIfError_InfluxQueryResultSeriesHasError() { + QueryResult queryResult = new QueryResult(); + + QueryResult.Result seriesResult = new QueryResult.Result(); + seriesResult.setError("series error"); + + queryResult.setResults(Arrays.asList(seriesResult)); + + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.throwExceptionIfResultWithError(queryResult); + }); + } + + @Test + public void testGetMeasurementName_testStateMeasurement() { + Assertions.assertEquals("CustomMeasurement", mapper.getMeasurementName(MyCustomMeasurement.class)); + } + + @Test + public void testParseSeriesAs_testTwoValidSeries() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time", "uuid"); + + List firstSeriesResult = Arrays.asList(Instant.now().toEpochMilli(), UUID.randomUUID().toString()); + List secondSeriesResult = Arrays.asList(Instant.now().plusSeconds(1).toEpochMilli(), UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult, secondSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 2, "there must be two series in the result list"); + + Assertions.assertEquals(firstSeriesResult.get(0), result.get(0).time.toEpochMilli(), "Field 'time' (1st series) is not valid"); + Assertions.assertEquals(firstSeriesResult.get(1), result.get(0).uuid, "Field 'uuid' (1st series) is not valid"); + + Assertions.assertEquals(secondSeriesResult.get(0), result.get(1).time.toEpochMilli(), "Field 'time' (2nd series) is not valid"); + Assertions.assertEquals(secondSeriesResult.get(1), result.get(1).uuid, "Field 'uuid' (2nd series) is not valid"); + } + + @Test + public void testParseSeriesAs_testNonNullAndValidValues() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time", "uuid", + "doubleObject", "longObject", "integerObject", + "doublePrimitive", "longPrimitive", "integerPrimitive", + "booleanObject", "booleanPrimitive"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + String uuidAsString = UUID.randomUUID().toString(); + + // InfluxDB client returns any number as Double. + // See https://github.com/influxdata/influxdb-java/issues/153#issuecomment-259681987 + // for more information. + List seriesResult = Arrays.asList(now, uuidAsString, + new Double("1.01"), new Double("2"), new Double("3"), + new Double("1.01"), new Double("4"), new Double("5"), + "false", "true"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(seriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + MyCustomMeasurement myObject = result.get(0); + Assertions.assertEquals(now.longValue(), myObject.time.toEpochMilli(), "field 'time' does not match"); + Assertions.assertEquals(uuidAsString, myObject.uuid, "field 'uuid' does not match"); + + Assertions.assertEquals(asDouble(seriesResult.get(2)), myObject.doubleObject, "field 'doubleObject' does not match"); + Assertions.assertEquals(new Long(asDouble(seriesResult.get(3)).longValue()), myObject.longObject, "field 'longObject' does not match"); + Assertions.assertEquals(new Integer(asDouble(seriesResult.get(4)).intValue()), myObject.integerObject, "field 'integerObject' does not match"); + + Assertions.assertTrue( + Double.compare(asDouble(seriesResult.get(5)).doubleValue(), myObject.doublePrimitive) == 0, + "field 'doublePrimitive' does not match"); + + Assertions.assertTrue( + Long.compare(asDouble(seriesResult.get(6)).longValue(), myObject.longPrimitive) == 0, + "field 'longPrimitive' does not match"); + + Assertions.assertTrue( + Integer.compare(asDouble(seriesResult.get(7)).intValue(), myObject.integerPrimitive) == 0, + "field 'integerPrimitive' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(8))), myObject.booleanObject, + "field 'booleanObject' does not match"); + + Assertions.assertEquals( + Boolean.valueOf(String.valueOf(seriesResult.get(9))).booleanValue(), myObject.booleanPrimitive, + "field 'booleanPrimitive' does not match"); + } + + Double asDouble(Object obj) { + return (Double) obj; + } + + @Test + public void testFieldValueModified_DateAsISO8601() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList("2017-06-19T09:29:45.655123Z"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testFieldValueModified_DateAsInteger() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList(1_000); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + public void testUnsupportedField() { + // Given... + mapper.cacheMeasurementClass(MyPojoWithUnsupportedField.class); + + List columnList = Arrays.asList("bar"); + List firstSeriesResult = Arrays.asList("content representing a Date"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + //When... + List result = new LinkedList<>(); + Assertions.assertThrows(InfluxDBMapperException.class, () -> { + mapper.parseSeriesAs(series, MyPojoWithUnsupportedField.class, result); + }); + } + + /** + * https://github.com/influxdata/influxdb/issues/7596 for more information. + */ + @Test + public void testToPOJO_SeriesFromQueryResultIsNull() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(null); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, MyCustomMeasurement.class); + + // Then... + Assertions.assertTrue( myList.isEmpty(), "there must NO entry in the result list"); + } + + @Test + public void testToPOJO_QueryResultCreatedByGroupByClause() { + // Given... + mapper.cacheMeasurementClass(GroupByCarrierDeviceOS.class); + + List columnList = Arrays.asList("time", "median", "min", "max"); + + // InfluxDB client returns the time representation as Double. + Double now = Long.valueOf(System.currentTimeMillis()).doubleValue(); + + List firstSeriesResult = Arrays.asList(now, new Double("233.8"), new Double("0.0"), + new Double("3090744.0")); + // When the "GROUP BY" clause is used, "tags" are returned as Map + Map firstSeriesTagMap = new HashMap<>(); + firstSeriesTagMap.put("CARRIER", "000/00"); + firstSeriesTagMap.put("DEVICE_OS_VERSION", "4.4.2"); + + List secondSeriesResult = Arrays.asList(now, new Double("552.0"), new Double("135.0"), + new Double("267705.0")); + Map secondSeriesTagMap = new HashMap<>(); + secondSeriesTagMap.put("CARRIER", "000/01"); + secondSeriesTagMap.put("DEVICE_OS_VERSION", "9.3.5"); + + QueryResult.Series firstSeries = new QueryResult.Series(); + firstSeries.setColumns(columnList); + firstSeries.setValues(Arrays.asList(firstSeriesResult)); + firstSeries.setTags(firstSeriesTagMap); + firstSeries.setName("tb_network"); + + QueryResult.Series secondSeries = new QueryResult.Series(); + secondSeries.setColumns(columnList); + secondSeries.setValues(Arrays.asList(secondSeriesResult)); + secondSeries.setTags(secondSeriesTagMap); + secondSeries.setName("tb_network"); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(firstSeries, secondSeries)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List myList = mapper.toPOJO(queryResult, GroupByCarrierDeviceOS.class); + + // Then... + GroupByCarrierDeviceOS firstGroupByEntry = myList.get(0); + Assertions.assertEquals("000/00", firstGroupByEntry.carrier, "field 'carrier' does not match"); + Assertions.assertEquals("4.4.2", firstGroupByEntry.deviceOsVersion, "field 'deviceOsVersion' does not match"); + + GroupByCarrierDeviceOS secondGroupByEntry = myList.get(1); + Assertions.assertEquals("000/01", secondGroupByEntry.carrier, "field 'carrier' does not match"); + Assertions.assertEquals("9.3.5", secondGroupByEntry.deviceOsVersion, "field 'deviceOsVersion' does not match"); + } + + @Test + public void testToPOJO_ticket363() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList("2000-01-01T00:00:00.000000001Z"); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elemets"); + Assertions.assertEquals(1, result.get(0).time.getNano(), "incorrect value for the nanoseconds field"); + } + + @Test + void testToPOJO_Precision() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List firstSeriesResult = Arrays.asList(1_500_000L); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("CustomMeasurement"); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + // When... + List result = mapper.toPOJO(queryResult, MyCustomMeasurement.class, TimeUnit.SECONDS); + + // Then... + Assertions.assertEquals(1, result.size(), "incorrect number of elements"); + Assertions.assertEquals(1_500_000_000L, result.get(0).time.toEpochMilli(), "incorrect value for the millis field"); + } + + @Test + void testToPOJO_SetMeasureName() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("uuid"); + List firstSeriesResult = Arrays.asList(UUID.randomUUID().toString()); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("MySeriesName"); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List result = + mapper.toPOJO(queryResult, MyCustomMeasurement.class, "MySeriesName"); + + //Then... + Assertions.assertTrue(result.size() == 1); + } + + @Test + void testToPOJOInheritance() { + // Given... + mapper.cacheMeasurementClass(MySubMeasurement.class); + + String superValue = UUID.randomUUID().toString(); + String subValue = "my sub value"; + List columnList = Arrays.asList("superValue", "subValue"); + + List firstSeriesResult = Arrays.asList(superValue, subValue); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("MySeriesName"); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List result = + mapper.toPOJO(queryResult, MySubMeasurement.class, "MySeriesName"); + + //Then... + Assertions.assertTrue(result.size() == 1); + Assertions.assertEquals(superValue, result.get(0).superValue); + Assertions.assertEquals(subValue, result.get(0).subValue); + } + + @Test + void testToPOJOGenericInheritance() { + // Given... + mapper.cacheMeasurementClass(MyGenericSubMeasurement.class); + + String superValue = UUID.randomUUID().toString(); + String subValue = "my sub value"; + List columnList = Arrays.asList("superValue", "subValue"); + + List firstSeriesResult = Arrays.asList(superValue, subValue); + + QueryResult.Series series = new QueryResult.Series(); + series.setName("MySeriesName"); + series.setColumns(columnList); + series.setValues(Arrays.asList(firstSeriesResult)); + + QueryResult.Result internalResult = new QueryResult.Result(); + internalResult.setSeries(Arrays.asList(series)); + + QueryResult queryResult = new QueryResult(); + queryResult.setResults(Arrays.asList(internalResult)); + + //When... + List result = + mapper.toPOJO(queryResult, MyGenericSubMeasurement.class, "MySeriesName"); + + //Then... + Assertions.assertTrue(result.size() == 1); + Assertions.assertEquals(superValue, result.get(0).superValue); + Assertions.assertEquals(subValue, result.get(0).subValue); + } + + @Test + public void testToPOJO_HasTimeColumn() { + // Given... + mapper.cacheMeasurementClass(HasTimeColumnMeasurement.class); + + List columnList = Arrays.asList("time"); + List> valuesList = Arrays.asList( + Arrays.asList("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + Arrays.asList("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + Arrays.asList("2000-01-01T00:00:00-00:00"), + Arrays.asList("2000-01-02T00:00:00+00:00") + ); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, HasTimeColumnMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time.equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time.equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time.equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time.equals(Instant.parse("2000-01-02T00:00:00Z"))); + + } + + @Test + public void testToPOJO_ticket573() { + // Given... + mapper.cacheMeasurementClass(MyCustomMeasurement.class); + + List columnList = Arrays.asList("time"); + List> valuesList = Arrays.asList( + Arrays.asList("2015-08-17T19:00:00-05:00"), // Chicago (UTC-5) + Arrays.asList("2015-08-17T19:00:00.000000001-05:00"), // Chicago (UTC-5) + Arrays.asList("2000-01-01T00:00:00-00:00"), + Arrays.asList("2000-01-02T00:00:00+00:00") + ); + + QueryResult.Series series = new QueryResult.Series(); + series.setColumns(columnList); + series.setValues(valuesList); + + // When... + List result = new LinkedList<>(); + mapper.parseSeriesAs(series, MyCustomMeasurement.class, result); + + // Then... + Assertions.assertEquals(4, result.size(), "incorrect number of elemets"); + // Note: RFC3339 timestamp with TZ from InfluxDB are parsed into an Instant (UTC) + Assertions.assertTrue(result.get(0).time.equals(Instant.parse("2015-08-18T00:00:00Z"))); + Assertions.assertTrue(result.get(1).time.equals(Instant.parse("2015-08-18T00:00:00.000000001Z"))); + // RFC3339 section 4.3 https://tools.ietf.org/html/rfc3339#section-4.3 + Assertions.assertTrue(result.get(2).time.equals(Instant.parse("2000-01-01T00:00:00Z"))); + Assertions.assertTrue(result.get(3).time.equals(Instant.parse("2000-01-02T00:00:00Z"))); + } + + @Measurement(name = "HasTimeColumnMeasurement") + static class HasTimeColumnMeasurement { + @TimeColumn + @Column(name = "time") + private Instant time; + + @Column(name = "value") + private Integer value; + + public Instant getTime() { + return time; + } + + public void setTime(Instant time) { + this.time = time; + } + + public Integer getValue() { + return value; + } + + public void setValue(Integer value) { + this.value = value; + } + } + + + @Measurement(name = "CustomMeasurement") + static class MyCustomMeasurement { + @Column(name = "time") + private Instant time; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "doubleObject") + private Double doubleObject; + + @Column(name = "longObject") + private Long longObject; + + @Column(name = "integerObject") + private Integer integerObject; + + @Column(name = "doublePrimitive") + private double doublePrimitive; + + @Column(name = "longPrimitive") + private long longPrimitive; + + @Column(name = "integerPrimitive") + private int integerPrimitive; + + @Column(name = "booleanObject") + private Boolean booleanObject; + + @Column(name = "booleanPrimitive") + private boolean booleanPrimitive; + + @SuppressWarnings("unused") + private String nonColumn1; + + @SuppressWarnings("unused") + private Random rnd; + + @Override + public String toString() { + return "MyCustomMeasurement [time=" + time + ", uuid=" + uuid + ", doubleObject=" + doubleObject + ", longObject=" + longObject + + ", integerObject=" + integerObject + ", doublePrimitive=" + doublePrimitive + ", longPrimitive=" + longPrimitive + + ", integerPrimitive=" + integerPrimitive + ", booleanObject=" + booleanObject + ", booleanPrimitive=" + booleanPrimitive + "]"; + } + } + + @Measurement(name = "CustomMeasurement", allFields = true) + static class MyAllFieldsCustomMeasurement { + private Instant time; + private String uuid; + private Double doubleObject; + private Long longObject; + private Integer integerObject; + private double doublePrimitive; + private long longPrimitive; + private int integerPrimitive; + private Boolean booleanObject; + private boolean booleanPrimitive; + + @SuppressWarnings("unused") + @Exclude + private String nonColumn1; + + @SuppressWarnings("unused") + @Exclude + private Random rnd; + + @Override + public String toString() { + return "MyCustomMeasurement [time=" + time + ", uuid=" + uuid + ", doubleObject=" + doubleObject + ", longObject=" + longObject + + ", integerObject=" + integerObject + ", doublePrimitive=" + doublePrimitive + ", longPrimitive=" + longPrimitive + + ", integerPrimitive=" + integerPrimitive + ", booleanObject=" + booleanObject + ", booleanPrimitive=" + booleanPrimitive + "]"; + } + } + + @Measurement(name = "SuperMeasurement") + static class MySuperMeasurement { + + @Column(name = "superValue") + protected String superValue; + + @Override + public String toString() { + return "SuperMeasurement [superValue=" + superValue + "]"; + } + } + + @Measurement(name = "SubMeasurement") + static class MySubMeasurement extends MySuperMeasurement { + + @Column(name = "subValue") + protected String subValue; + + @Override + public String toString() { + return "MySubMeasurement [subValue=" + subValue + ", superValue=" + superValue + "]"; + } + } + + @Measurement(name = "SuperMeasurement") + static class MyGenericSuperMeasurement { + + @Column(name = "superValue") + protected T superValue; + + @Override + public String toString() { + return "SuperMeasurement [superValue=" + superValue + "]"; + } + } + + @Measurement(name = "SubMeasurement") + static class MyGenericSubMeasurement extends MyGenericSuperMeasurement { + + @Column(name = "subValue") + protected String subValue; + + @Override + public String toString() { + return "MySubMeasurement [subValue=" + subValue + ", superValue=" + superValue + "]"; + } + } + + @Measurement(name = "foo") + static class MyPojoWithUnsupportedField { + + @Column(name = "bar") + private Date myDate; + } + + /** + * Class created based on example from https://github.com/influxdata/influxdb-java/issues/343 + */ + @Measurement(name = "tb_network") + static class GroupByCarrierDeviceOS { + @Column(name = "time") + private Instant time; + + @Column(name = "CARRIER", tag = true) + private String carrier; + + @Column(name = "DEVICE_OS_VERSION", tag = true) + private String deviceOsVersion; + + @Column(name = "median") + private Double median; + + @Column(name = "min") + private Double min; + + @Column(name = "max") + private Double max; + + @Override + public String toString() { + return "GroupByCarrierDeviceOS [time=" + time + ", carrier=" + carrier + ", deviceOsVersion=" + deviceOsVersion + + ", median=" + median + ", min=" + min + ", max=" + max + "]"; + } + } +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/impl/PreconditionsTest.java b/src/test/java/org/influxdb/impl/PreconditionsTest.java new file mode 100644 index 000000000..adc88f25c --- /dev/null +++ b/src/test/java/org/influxdb/impl/PreconditionsTest.java @@ -0,0 +1,62 @@ +package org.influxdb.impl; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +@RunWith(JUnitPlatform.class) +public class PreconditionsTest { + + @Test + public void testCheckNonEmptyString1() { + final String string = "foo"; + Preconditions.checkNonEmptyString(string, "string"); + } + + @Test + public void testCheckNonEmptyString2() { + final String string = ""; + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkNonEmptyString(string, "string"); + }); + } + + @Test + public void testCheckNonEmptyString3() { + final String string = null; + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkNonEmptyString(string, "string"); + }); + } + + @Test + public void testCheckPositiveNumber1() { + final Number number = 42; + Preconditions.checkPositiveNumber(number, "number"); + } + + @Test + public void testCheckPositiveNumber2() { + final Number number = 0; + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkPositiveNumber(number, "number"); + }); + } + + @Test + public void testCheckPositiveNumber3() { + final Number number = null; + Assertions.assertThrows(IllegalArgumentException.class, () -> { + Preconditions.checkPositiveNumber(number, "number"); + }); + } + + @Test + public void testCheckDurationInf(){ + final String duration = "inf"; + Assertions.assertDoesNotThrow(()->{ + Preconditions.checkDuration(duration, "duration"); + }); + } +} \ No newline at end of file diff --git a/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java new file mode 100644 index 000000000..0a4f9de97 --- /dev/null +++ b/src/test/java/org/influxdb/impl/RetryCapableBatchWriterTest.java @@ -0,0 +1,299 @@ +package org.influxdb.impl; + +import org.influxdb.InfluxDB; +import org.influxdb.InfluxDBException; +import org.influxdb.TestAnswer; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.junit.Assert; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; + +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.lang.reflect.Field; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.*; + +@RunWith(JUnitPlatform.class) +public class RetryCapableBatchWriterTest { + + BatchPoints getBP(int count) { + BatchPoints.Builder b = BatchPoints.database("d1"); + for (int i = 0; i < count; i++) { + b.point(Point.measurement("x1").addField("x", 1).build()).build(); + } + return b.build(); + } + + @Test + public void test() { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BiConsumer errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + BatchPoints bp0 = getBP(5); + BatchPoints bp1 = getBP(90); + BatchPoints bp2 = getBP(90); + BatchPoints bp3 = getBP(8); + BatchPoints bp4 = getBP(100); + + Exception nonRecoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"database not found: cvfdgf\" }"); + Exception recoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"cache-max-memory-size exceeded 104/1400\" }"); + Mockito.doThrow(nonRecoverable).when(mockInfluxDB).write(bp0); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp1); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp2); + Mockito.doThrow(recoverable).when(mockInfluxDB).write(bp3); + // first one will fail with non-recoverable error + rw.write(Collections.singletonList(bp0)); + // second one will fail with recoverable error + rw.write(Collections.singletonList(bp1)); + // will fail with recoverable error again, will remove data due to buffer limit + rw.write(Collections.singletonList(bp2)); + // will write fail with recoverable error + rw.write(Collections.singletonList(bp3)); + + ArgumentCaptor captor = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(4)).write(captor.capture()); + final List capturedArgument1 = captor.getAllValues(); + for (BatchPoints b : capturedArgument1) { + System.out.println("batchSize written " + b.getPoints().size()); + } + + Assert.assertEquals(capturedArgument1.get(0).getPoints().size(), 5); + Assert.assertEquals(capturedArgument1.get(1).getPoints().size(), 90); + Assert.assertEquals(capturedArgument1.get(2).getPoints().size(), 90); + Assert.assertEquals(capturedArgument1.get(3).getPoints().size(), 98); + + // error handler called twice; once for first unrecoverable write, se + verify(errorHandler, times(2)).accept(any(),any()); + + // will write data that previously were not sent, will send additional data + Mockito.reset(mockInfluxDB); + rw.write(Collections.singletonList(bp4)); + + ArgumentCaptor captor2 = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(2)).write(captor2.capture()); + final List capturedArgument2 = captor2.getAllValues(); + for (BatchPoints b : capturedArgument2) { + System.out.println("batchSize written " + b.getPoints().size()); + } + Assert.assertEquals(capturedArgument2.get(0).getPoints().size(), 98); + Assert.assertEquals(capturedArgument2.get(1).getPoints().size(), 100); + + } + + @Test + public void testAllNonRecoverableExceptions() { + + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BiConsumer errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + + InfluxDBException nonRecoverable1 = InfluxDBException.buildExceptionForErrorState(createErrorBody("database not found: cvfdgf")); + InfluxDBException nonRecoverable2 = InfluxDBException.buildExceptionForErrorState(createErrorBody("points beyond retention policy 'abc'")); + InfluxDBException nonRecoverable3 = InfluxDBException.buildExceptionForErrorState(createErrorBody("unable to parse 'abc'")); + InfluxDBException nonRecoverable4 = InfluxDBException.buildExceptionForErrorState(createErrorBody("hinted handoff queue not empty service='abc'")); + InfluxDBException nonRecoverable5 = InfluxDBException.buildExceptionForErrorState(createErrorBody("field type conflict 'abc'")); + InfluxDBException nonRecoverable6 = new InfluxDBException.RetryBufferOverrunException(createErrorBody("Retry BufferOverrun Exception")); + InfluxDBException nonRecoverable7 = InfluxDBException.buildExceptionForErrorState(createErrorBody("user is not authorized to write to database")); + InfluxDBException nonRecoverable8 = InfluxDBException.buildExceptionForErrorState(createErrorBody("authorization failed")); + InfluxDBException nonRecoverable9 = InfluxDBException.buildExceptionForErrorState(createErrorBody("username required")); + + List exceptions = Arrays.asList(nonRecoverable1, nonRecoverable2, nonRecoverable3, + nonRecoverable4, nonRecoverable5, nonRecoverable6, nonRecoverable7, nonRecoverable8, nonRecoverable9); + int size = exceptions.size(); + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + if (i < size) { + throw exceptions.get(i++); + } + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + BatchPoints bp = getBP(8); + for (int i = 0; i < size; i++) { + rw.write(Collections.singletonList(bp)); + } + verify(errorHandler, times(size)).accept(any(), any());; + } + + @Test + public void testClosingWriter() { + InfluxDB mockInfluxDB = mock(InfluxDB.class); + BiConsumer, Throwable> errorHandler = mock(BiConsumer.class); + + BatchPoints bp5 = getBP(5); + BatchPoints bp6 = getBP(6); + BatchPoints bp90 = getBP(90); + + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + //first 4 calls + if (i++ < 4) { + throw InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + } + return; + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 150, 100); + + rw.write(Collections.singletonList(bp5)); + rw.write(Collections.singletonList(bp6)); + rw.write(Collections.singletonList(bp90)); + //recoverable exception -> never errorHandler + verify(errorHandler, never()).accept(any(), any()); + verify(mockInfluxDB, times(3)).write(any(BatchPoints.class)); + + rw.close(); + + ArgumentCaptor captor4Write = ArgumentCaptor.forClass(BatchPoints.class); + ArgumentCaptor> captor4Accept = ArgumentCaptor.forClass(List.class); + verify(errorHandler, times(1)).accept(captor4Accept.capture(), any()); + verify(mockInfluxDB, times(5)).write(captor4Write.capture()); + + //bp5 and bp6 were merged and writing of the merged batch points on closing should be failed + Assertions.assertEquals(11, captor4Accept.getValue().size()); + //bp90 was written because no more exception thrown + Assertions.assertEquals(90, captor4Write.getAllValues().get(4).getPoints().size()); + } + + @Test + public void testRetryingKeepChronologicalOrder() { + + BatchPoints.Builder b = BatchPoints.database("d1"); + for (int i = 0; i < 200; i++) { + b.point(Point.measurement("x1").time(1,TimeUnit.HOURS). + addField("x", 1). + tag("t", "v1").build()).build(); + } + + BatchPoints bp1 = b.build(); + + b = BatchPoints.database("d1"); + + b.point(Point.measurement("x1").time(1,TimeUnit.HOURS). + addField("x", 2). + tag("t", "v2").build()).build(); + + for (int i = 0; i < 199; i++) { + b.point(Point.measurement("x1").time(2,TimeUnit.HOURS). + addField("x", 2). + tag("t", "v2").build()).build(); + } + BatchPoints bp2 = b.build(); + + InfluxDB mockInfluxDB = mock(InfluxDB.class); + BiConsumer, Throwable> errorHandler = mock(BiConsumer.class); + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + 450, 150); + doAnswer(new TestAnswer() { + int i = 0; + @Override + protected void check(InvocationOnMock invocation) { + if (i++ < 1) { + throw InfluxDBException.buildExceptionForErrorState("cache-max-memory-size exceeded 104/1400"); + } + return; + } + }).when(mockInfluxDB).write(any(BatchPoints.class)); + + rw.write(Collections.singletonList(bp1)); + rw.write(Collections.singletonList(bp2)); + + ArgumentCaptor captor4Write = ArgumentCaptor.forClass(BatchPoints.class); + verify(mockInfluxDB, times(3)).write(captor4Write.capture()); + + //bp1 written but failed because of recoverable cache-max-memory-size error + Assertions.assertEquals(bp1, captor4Write.getAllValues().get(0)); + //bp1 rewritten on writing of bp2 + Assertions.assertEquals(bp1, captor4Write.getAllValues().get(1)); + //bp2 written + Assertions.assertEquals(bp2, captor4Write.getAllValues().get(2)); + } + + @Test + void defaultExceptionIsRecoverable() { + InfluxDBException unknownError = InfluxDBException.buildExceptionForErrorState(createErrorBody("unknown error")); + + Assertions.assertTrue(unknownError.isRetryWorth()); + } + + @Test + public void testBufferCountConsistency() throws Exception { + InfluxDB mockInfluxDB = mock(InfluxDBImpl.class); + BiConsumer errorHandler = mock(BiConsumer.class); + int MAX_BUFFER_CAPACITY = 3000; + RetryCapableBatchWriter rw = new RetryCapableBatchWriter(mockInfluxDB, errorHandler, + MAX_BUFFER_CAPACITY, 1000); + + Exception nonRecoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"database not found: cvfdgf\" }"); + Exception recoverable = InfluxDBException.buildExceptionForErrorState("{ \"error\": \"cache-max-memory-size exceeded 104/1400\" }"); + + // need access to private properties for quality testing + Field localUsedRetryBufferCapacity = RetryCapableBatchWriter.class. + getDeclaredField("usedRetryBufferCapacity"); + Field localBatchQueue = RetryCapableBatchWriter.class. + getDeclaredField("batchQueue"); + + localUsedRetryBufferCapacity.setAccessible(true); + localBatchQueue.setAccessible(true); + + // cycle test with all possible outcomes: non retry, with retry, write pass + // "with retry" will cover https://github.com/influxdata/influxdb-java/issues/541 + Exception[] tryExceptionList = new Exception[]{nonRecoverable, recoverable, null}; + + for (Exception exception : tryExceptionList) { + // try for 100 times with random number of points each time + for (int i=0; i < 100; i++) { + int count = 200 + ((i * 777) & 511); + BatchPoints bps = getBP(count); + if (exception != null) { + Mockito.doThrow(exception).when(mockInfluxDB).write(bps); + } + else { + Mockito.reset(mockInfluxDB); + } + rw.write(Collections.singletonList(bps)); + + // count actual number of points in batchQueue + @SuppressWarnings("unchecked") + LinkedList batchQueue = (LinkedList)localBatchQueue.get(rw); + int sum = 0; + for (BatchPoints b : batchQueue) { + sum += b.getPoints().size(); + } + + // compare with value of usedRetryBufferCapacity + int localUsedRetryBufferCapacityVal = (int) localUsedRetryBufferCapacity.get(rw); + + Assertions.assertTrue(sum == localUsedRetryBufferCapacityVal, + "batchSize usage counter mismatch UsedRetryBufferCapacityVal, " + + sum + " != " + localUsedRetryBufferCapacityVal); + Assertions.assertTrue(sum < MAX_BUFFER_CAPACITY, "batchSize usage outside of allowed range " + sum); + } + } + } + + private static String createErrorBody(String errorMessage) { + return MessageFormat.format("'{' \"error\": \"{0}\" '}'", errorMessage); + } +} diff --git a/src/test/java/org/influxdb/impl/TimeUtilTest.java b/src/test/java/org/influxdb/impl/TimeUtilTest.java index bb8d8e7b8..82efe041d 100644 --- a/src/test/java/org/influxdb/impl/TimeUtilTest.java +++ b/src/test/java/org/influxdb/impl/TimeUtilTest.java @@ -1,23 +1,24 @@ package org.influxdb.impl; -import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertThat; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; +@RunWith(JUnitPlatform.class) public class TimeUtilTest { @Test - public void toInfluxDBTimeFormatTest() throws Exception { - assertThat(TimeUtil.toInfluxDBTimeFormat(1477896740020L), is(equalTo("2016-10-31T06:52:20.020Z"))); - assertThat(TimeUtil.toInfluxDBTimeFormat(1477932740005L), is(equalTo("2016-10-31T16:52:20.005Z"))); + public void testToInfluxDBTimeFormatTest() throws Exception { + assertThat(TimeUtil.toInfluxDBTimeFormat(1477896740020L)).isEqualTo("2016-10-31T06:52:20.020Z"); + assertThat(TimeUtil.toInfluxDBTimeFormat(1477932740005L)).isEqualTo("2016-10-31T16:52:20.005Z"); } @Test - public void fromInfluxDBTimeFormatTest() throws Exception { - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20.020Z"), is(equalTo(1477896740020L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20.005Z"), is(equalTo(1477932740005L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20Z"), is(equalTo(1477932740000L))); - assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20Z"), is(equalTo(1477896740000L))); + public void testFromInfluxDBTimeFormatTest() throws Exception { + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20.020Z")).isEqualTo(1477896740020L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20.005Z")).isEqualTo(1477932740005L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T16:52:20Z")).isEqualTo(1477932740000L); + assertThat(TimeUtil.fromInfluxDBTimeFormat("2016-10-31T06:52:20Z")).isEqualTo(1477896740000L); } } diff --git a/src/test/java/org/influxdb/msgpack/MessagePackTraverserTest.java b/src/test/java/org/influxdb/msgpack/MessagePackTraverserTest.java new file mode 100644 index 000000000..19a00a001 --- /dev/null +++ b/src/test/java/org/influxdb/msgpack/MessagePackTraverserTest.java @@ -0,0 +1,78 @@ +package org.influxdb.msgpack; + +import java.util.Iterator; +import java.util.List; + +import org.influxdb.dto.QueryResult; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +import static org.junit.jupiter.api.Assertions.*; + +@RunWith(JUnitPlatform.class) +@EnabledIfEnvironmentVariable(named = "INFLUXDB_VERSION", matches = "1\\.6|1\\.5|1\\.4") +public class MessagePackTraverserTest { + + @Test + public void testTraverseMethod() { + MessagePackTraverser traverser = new MessagePackTraverser(); + + /* a json-like view of msgpack_1.bin + + {"results":[{"statement_id":0,"series":[{"name":"disk","columns":["time","atag","free","used"], + "values":[[(5,0x00005b556c-252f-23-6438),"a",1,60],[(5,0x00005b556c-252f-23-6438),"b",2,70]],"partial":true}],"partial":true}]} + {"results":[{"statement_id":0,"series":[{"name":"disk","columns":["time","atag","free","used"],"values":[[(5,0x00005b556c-252f-23-6438),"c",3,80]]}]}]} + + */ + + Iterator iter = traverser.traverse(MessagePackTraverserTest.class.getResourceAsStream("msgpack_1.bin")).iterator(); + assertTrue(iter.hasNext()); + QueryResult result = iter.next(); + List> values = result.getResults().get(0).getSeries().get(0).getValues(); + Assertions.assertEquals(2, values.size()); + + assertEquals(1532325083803052600L, values.get(0).get(0)); + assertEquals("b", values.get(1).get(1)); + + assertTrue(iter.hasNext()); + result = iter.next(); + values = result.getResults().get(0).getSeries().get(0).getValues(); + Assertions.assertEquals(1, values.size()); + assertEquals(3, values.get(0).get(2)); + + assertFalse(iter.hasNext()); + } + + @Test + public void testParseMethodOnNonEmptyResult() { + MessagePackTraverser traverser = new MessagePackTraverser(); + /* a json-like view of msgpack_2.bin + + {"results":[{"statement_id":0,"series":[{"name":"measurement_957996674028300","columns":["time","device","foo"], + "values":[[(5,0x000058-797a00000),"one",1.0],[(5,0x000058-79-78100000),"two",2.0],[(5,0x000058-79-6a200000),"three",3.0]]}]}]} + */ + QueryResult queryResult = traverser.parse(MessagePackTraverserTest.class.getResourceAsStream("msgpack_2.bin")); + List> values = queryResult.getResults().get(0).getSeries().get(0).getValues(); + Assertions.assertEquals(3, values.size()); + assertEquals(1485273600000000000L, values.get(0).get(0)); + assertEquals("two", values.get(1).get(1)); + assertEquals(3.0, values.get(2).get(2)); + } + + @Test + public void testParseMethodOnEmptyResult() { + MessagePackTraverser traverser = new MessagePackTraverser(); + /* a json-like view of msgpack_3.bin + + {"results":[{"statement_id":0,"series":[]}]} + + */ + QueryResult queryResult = traverser.parse(MessagePackTraverserTest.class.getResourceAsStream("msgpack_3.bin")); + System.out.println(); + assertNull(queryResult.getResults().get(0).getSeries()); + + } +} diff --git a/src/test/java/org/influxdb/querybuilder/api/AppenderTest.java b/src/test/java/org/influxdb/querybuilder/api/AppenderTest.java new file mode 100644 index 000000000..c922a7743 --- /dev/null +++ b/src/test/java/org/influxdb/querybuilder/api/AppenderTest.java @@ -0,0 +1,20 @@ +package org.influxdb.querybuilder.api; + +import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq; +import static org.junit.jupiter.api.Assertions.assertEquals; + +import java.util.Collections; + +import org.influxdb.querybuilder.Appender; +import org.junit.jupiter.api.Test; + +public class AppenderTest { + + @Test + public void testJoinAndAppend() { + StringBuilder builder = new StringBuilder(); + builder.append("SELECT test1,test2 FROM foo WHERE "); + Appender.joinAndAppend(builder, " AND ", Collections.singletonList(eq("testval", "test1"))); + assertEquals("SELECT test1,test2 FROM foo WHERE testval = 'test1'", builder.toString()); + } +} diff --git a/src/test/java/org/influxdb/querybuilder/api/BuiltQueryTest.java b/src/test/java/org/influxdb/querybuilder/api/BuiltQueryTest.java new file mode 100644 index 000000000..1fcadfd74 --- /dev/null +++ b/src/test/java/org/influxdb/querybuilder/api/BuiltQueryTest.java @@ -0,0 +1,988 @@ +package org.influxdb.querybuilder.api; + +import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.*; +import static org.influxdb.querybuilder.Operations.ADD; +import static org.influxdb.querybuilder.Operations.MUL; +import static org.influxdb.querybuilder.Operations.SUB; +import static org.influxdb.querybuilder.time.DurationLiteral.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.influxdb.dto.Query; +import org.influxdb.querybuilder.FunctionFactory; +import org.influxdb.querybuilder.RawText; +import org.influxdb.querybuilder.Where; +import org.junit.jupiter.api.Test; +import org.junit.platform.runner.JUnitPlatform; +import org.junit.runner.RunWith; + +@RunWith(JUnitPlatform.class) +public class BuiltQueryTest { + + private static final String DATABASE = "testdb"; + + @Test + public void testCommandWithUrlEncoded() { + Query select = select().max("test1").as("hello").from(DATABASE, "foobar"); + String encoded = select.getCommandWithUrlEncoded(); + + assertEquals("SELECT+MAX%28test1%29+AS+hello+FROM+foobar%3B", encoded); + } + + @Test + public void testQueryWithoutTable() { + final String[] tables = null; + assertThrows( + IllegalArgumentException.class, + () -> select().max("test1").as("hello").from(DATABASE, tables)); + } + + @Test + public void testAlias() { + Query query = new Query("SELECT MAX(test1) AS hello FROM foobar;", DATABASE); + Query select = select().max("test1").as("hello").from(DATABASE, "foobar"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testRegex() { + Query query = new Query("SELECT MAX(test1) FROM foobar WHERE test1 =~ /[0-9]/;", DATABASE); + Query select = select().max("test1").from(DATABASE, "foobar").where(regex("test1", "/[0-9]/")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testInvalidRegex() { + assertThrows( + IllegalArgumentException.class, + () -> select().max("test1").from(DATABASE, "foobar").where(regex("test1", null)), + "Missing text for expression"); + } + + @Test + public void testNegativeRegex() { + Query query = new Query("SELECT MAX(test1) FROM foobar WHERE test1 !~ /[0-9]/;", DATABASE); + Query select = select().max("test1").from(DATABASE, "foobar").where(nregex("test1", "/[0-9]/")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testInvalidNegativeRegex() { + assertThrows( + IllegalArgumentException.class, + () -> select().max("test1").from(DATABASE, "foobar").where(nregex("test1", null)), + "Missing text for expression"); + } + + @Test + public void testContains() { + Query query = new Query("SELECT MAX(test1) FROM foobar WHERE test1 =~ /text/;", DATABASE); + Query select = select().max("test1").from(DATABASE, "foobar").where(contains("test1", "text")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDistinct() { + Query query = new Query("SELECT DISTINCT test1 FROM foobar;", DATABASE); + Query select = select().column("test1").distinct().from(DATABASE, "foobar"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMultipleTables() { + Query query = new Query("SELECT DISTINCT test1 FROM foobar,foobar2;", DATABASE); + Query select = + select().column("test1").distinct().from(DATABASE, new String[] {"foobar", "foobar2"}); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testRawTexTable() { + Query query = new Query("SELECT DISTINCT test1 FROM /*/;", DATABASE); + Query select = select().column("test1").distinct().fromRaw(DATABASE, "/*/"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDistinctWithExpression() { + Query query = + new Query("SELECT DISTINCT COUNT(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().count("test1").distinct().from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDistinctWithMultipleSelectedColumns() { + Query select = + select().column("test1").column("test2").distinct().from(DATABASE, "foobar").limit(1, 20); + + assertThrows( + IllegalStateException.class, + () -> select.getCommand(), + "DISTINCT function can only be used with one column"); + } + + @Test + public void testDistinctWithoutSelectedColumns() { + assertThrows( + IllegalStateException.class, + () -> select().distinct().from(DATABASE, "foobar").limit(1, 20), + "DISTINCT function can only be used with one column"); + } + + @Test + public void testMultipleColumns() { + Query query = select().column("test1").distinct().column("test2").from(DATABASE, "foobar"); + assertThrows( + IllegalStateException.class, + () -> query.getCommand(), + "Cannot mix all columns and specific columns"); + } + + @Test + public void testNonEqual() { + Query query = new Query("SELECT * FROM foobar WHERE test1 != 4;", DATABASE); + Query select = select().all().from(DATABASE, "foobar").where(ne("test1", 4)); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSelectAllWithColumn() { + assertThrows( + IllegalStateException.class, + () -> + select() + .column("test1") + .all() + .from(DATABASE, "foobar") + .where(ne("test1", raw("raw expression"))), + "Can't select all columns over columns selected previously"); + } + + @Test + public void testSelectAllWithColumns() { + assertThrows( + IllegalStateException.class, + () -> + select() + .column("test1") + .column("test2") + .all() + .from(DATABASE, "foobar") + .where(ne("test1", raw("raw expression"))), + "Can't select all columns over columns selected previously"); + } + + @Test + public void testSelectAllWithDistinct() { + assertThrows( + IllegalStateException.class, + () -> + select() + .column("test1") + .distinct() + .all() + .from(DATABASE, "foobar") + .where(ne("test1", raw("raw expression"))), + "Can't select all columns over columns selected previously"); + } + + @Test + public void testRawExpressionInWhere() { + Query query = new Query("SELECT * FROM foobar WHERE test1 != raw expression;", DATABASE); + Query select = + select().all().from(DATABASE, "foobar").where(ne("test1", raw("raw expression"))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testRawExpressionEmptyValue() { + String rawTextClause = null; + assertThrows( + IllegalArgumentException.class, + () -> select().all().from(DATABASE, "foobar").where(rawTextClause), + "Missing text for expression"); + } + + @Test + public void testOrderingAsc() { + Query query = + new Query( + "SELECT * FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z' ORDER BY time ASC;", + DATABASE); + Query select = + select() + .all() + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")) + .orderBy(asc()); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testOrderingDesc() { + Query query = + new Query( + "SELECT * FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z' ORDER BY time DESC;", + DATABASE); + Query select = + select() + .all() + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")) + .orderBy(desc()); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSelect() { + Query query = + new Query( + "SELECT * FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z';", DATABASE); + Query select = + select() + .all() + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSelectLtGte() { + Query query = new Query("SELECT * FROM foobar WHERE test1 < 4 AND test2 >= 'a';", DATABASE); + Query select = + select().all().from(DATABASE, "foobar").where(lt("test1", 4)).and(gte("test2", "a")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMean() { + Query query = + new Query( + "SELECT MEAN(test1) FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z';", + DATABASE); + Query select = + select() + .mean("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSum() { + Query query = + new Query( + "SELECT SUM(test1) FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z';", + DATABASE); + Query select = + select() + .sum("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMin() { + Query query = + new Query( + "SELECT MIN(test1) FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z';", + DATABASE); + Query select = + select() + .min("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMax() { + Query query = + new Query( + "SELECT MAX(test1) FROM foobar WHERE test1 = 4 AND test2 > 'a' AND test2 <= 'z';", + DATABASE); + Query select = + select() + .max("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 4)) + .and(gt("test2", "a")) + .and(lte("test2", "z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSelectField() { + Query query = new Query("SELECT test1,test2 FROM foobar;", DATABASE); + Query select = select().column("test1").column("test2").from(DATABASE, "foobar"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testGroupBy() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY test2,test3;", DATABASE); + Query select = select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testGroupByTime() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY time(1h);", DATABASE); + Query select = select().column("test1").from(DATABASE, "foobar").groupBy(time(1L, HOUR)); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testGroupByTimeOffset() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY time(1h,5w);", DATABASE); + Query select = + select().column("test1").from(DATABASE, "foobar").groupBy(time(1L, HOUR, 5L, WEEK)); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testGroupByTimeOffsetMultiples() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY time(1h,5w),test1;", DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .groupBy(time(1L, HOUR, 5L, WEEK), "test1"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testWhereConjunction() { + Query query = new Query("SELECT test1 FROM foobar WHERE test1 = 1 OR test2 = 'a';", DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 1)) + .or(eq("test2", "a")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMultipleOrConjunction() { + Query query = + new Query( + "SELECT test1 FROM foobar WHERE test1 = 1 OR test2 = 'a' OR test3 = 'b';", DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 1)) + .or(eq("test2", "a")) + .or(eq("test3", "b")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testOrAndConjunction() { + Query query = + new Query( + "SELECT test1 FROM foobar WHERE test1 = 1 OR test2 = 'a' OR test3 = 'b' AND test4 = 'c';", + DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 1)) + .or(eq("test2", "a")) + .or(eq("test3", "b")) + .and(eq("test4", "c")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testNestedClauses() { + Query query = + new Query( + "SELECT test1 FROM foobar WHERE test1 = 1 OR test2 = 'a' OR test3 = 'b' " + + "AND (test2 = 'b' OR test3 = 'a') " + + "OR (test1 = 2 AND test2 = 'y' AND test3 = 'z') " + + "AND (test1 = 8 OR test2 = 'g' OR test3 = 'j') " + + "AND test4 = 'c';", + DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .where(eq("test1", 1)) + .or(eq("test2", "a")) + .or(eq("test3", "b")) + .andNested() + .and(eq("test2", "b")) + .or(eq("test3", "a")) + .close() + .orNested() + .and(eq("test1", 2)) + .and(eq("test2", "y")) + .and(eq("test3", "z")) + .close() + .andNested() + .or(eq("test1", 8)) + .or(eq("test2", "g")) + .or(eq("test3", "j")) + .close() + .and(eq("test4", "c")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testWhere() { + Query query = new Query("SELECT test1 FROM foobar WHERE test4 = 1;", DATABASE); + Where where = select().column("test1").from(DATABASE, "foobar").where(); + Query select = where.and(eq("test4", 1)); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testWhereGroupBy() { + Query query = + new Query("SELECT test1 FROM foobar WHERE test4 = 1 GROUP BY test2,test3;", DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .where(eq("test4", 1)) + .groupBy("test2", "test3"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testLimit() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY test2,test3 LIMIT 1;", DATABASE); + Query select = + select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").limit(1); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testLimitTwice() { + assertThrows( + IllegalStateException.class, + () -> + select() + .column("test1") + .from(DATABASE, "foobar") + .groupBy("test2", "test3") + .limit(1) + .limit(1), + "Cannot use limit twice"); + } + + @Test + public void testInvalidLimit() { + assertThrows( + IllegalArgumentException.class, + () -> select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").limit(-1), + "Invalid LIMIT value, must be strictly positive"); + } + + @Test + public void testLimitOffset() { + Query query = + new Query("SELECT test1 FROM foobar GROUP BY test2,test3 LIMIT 1 OFFSET 20;", DATABASE); + Query select = + select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSLimit() { + Query query = new Query("SELECT test1 FROM foobar GROUP BY test2,test3 SLIMIT 1;", DATABASE); + Query select = + select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").sLimit(1); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testInvalidSLimit() { + assertThrows( + IllegalArgumentException.class, + () -> + select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").sLimit(-1), + "Invalid LIMIT value, must be strictly positive"); + } + + @Test + public void testSLimitSOffset() { + Query query = + new Query("SELECT test1 FROM foobar GROUP BY test2,test3 SLIMIT 1 SOFFSET 20;", DATABASE); + Query select = + select().column("test1").from(DATABASE, "foobar").groupBy("test2", "test3").sLimit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testLimitAndSLimitOffset() { + Query query = + new Query( + "SELECT test1 FROM foobar GROUP BY test2,test3 LIMIT 1 OFFSET 20 SLIMIT 1 SOFFSET 20;", + DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .groupBy("test2", "test3") + .limit(1, 20) + .sLimit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testCount() { + Query query = new Query("SELECT COUNT(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().count("test1").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testTimezone() { + Query query = + new Query( + "SELECT test1 FROM foobar GROUP BY test2,test3 SLIMIT 1 tz('America/Chicago');", + DATABASE); + Query select = + select() + .column("test1") + .from(DATABASE, "foobar") + .groupBy("test2", "test3") + .sLimit(1) + .tz("America/Chicago"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMinWithLimit() { + Query query = new Query("SELECT MIN(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().min("test1").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testMaxWithLimit() { + Query query = new Query("SELECT MAX(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().max("test1").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSumWithLimit() { + Query query = new Query("SELECT SUM(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().sum("test1").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testAggregateCompination() { + Query query = + new Query( + "SELECT MAX(test1),MIN(test2),COUNT(test3),SUM(test4) FROM foobar LIMIT 1 OFFSET 20;", + DATABASE); + Query select = + select() + .max("test1") + .min("test2") + .count("test3") + .sum("test4") + .from(DATABASE, "foobar") + .limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testFunctionCall() { + Query query = new Query("SELECT MEDIAN(test1) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().function("MEDIAN", "test1").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testFunctionInsideFunction() { + Query query = new Query("SELECT MEDIAN(now()) FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().function("MEDIAN", now()).from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testRawTextOnSelection() { + Query query = + new Query("SELECT an expression on select FROM foobar LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().raw("an expression on select").from(DATABASE, "foobar").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testRawTextOnCondition() { + Query query = + new Query("SELECT * FROM foobar WHERE text as condition LIMIT 1 OFFSET 20;", DATABASE); + Query select = select().from(DATABASE, "foobar").where("text as condition").limit(1, 20); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testNowOnCondition() { + Query query = new Query("SELECT * FROM foobar WHERE time > now() AND time <= now();", DATABASE); + Query select = + select().from(DATABASE, "foobar").where(gt("time", now())).and(lte("time", now())); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSelectRegex() { + Query query = new Query("SELECT /k/ FROM foobar;", DATABASE); + Query select = select().regex("/k/").from(DATABASE, "foobar"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testNullFromClause() { + String from = null; + assertThrows( + IllegalArgumentException.class, + () -> select().from(DATABASE, from)); + } + + @Test + public void testCountAll() { + Query query = new Query("SELECT COUNT(*) FROM foobar;", DATABASE); + Query select = select().countAll().from(DATABASE, "foobar"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testCountAllWithColumn() { + assertThrows( + IllegalStateException.class, + () -> select().column("test1").countAll().from(DATABASE, "foobar"), + "Can't count all with previously selected columns"); + } + + @Test + public void testCountAllWithColumns() { + assertThrows( + IllegalStateException.class, + () -> select().column("test1").column("test2").countAll().from(DATABASE, "foobar"), + "Can't count all with previously selected columns"); + } + + @Test + public void testRequiresPost() { + Query select = select().requiresPost().countAll().from(DATABASE, "foobar"); + Query selectColumns = select("column1", "column2").requiresPost().from(DATABASE, "foobar"); + Query selectColumnsAndAggregations = + select(min("column1"), max("column2")).requiresPost().from(DATABASE, "foobar"); + + assertTrue(select.requiresPost()); + assertTrue(selectColumns.requiresPost()); + assertTrue(selectColumnsAndAggregations.requiresPost()); + } + + @Test + public void testInto() { + Query query = + new Query( + "SELECT * INTO \"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT FROM \"NOAA_water_database\".\"autogen\"./.*/ GROUP BY *;", + DATABASE); + Query select = + select() + .into("\"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT") + .from(DATABASE, "\"NOAA_water_database\".\"autogen\"./.*/") + .groupBy(new RawText("*")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testIntoWithSelection() { + Query query = + new Query( + "SELECT column1,MAX(column2),MAX(column3) INTO \"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT FROM \"NOAA_water_database\".\"autogen\"./.*/ GROUP BY *;", + DATABASE); + Query select = + select() + .column("column1") + .max("column2") + .max("column3") + .into("\"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT") + .from(DATABASE, "\"NOAA_water_database\".\"autogen\"./.*/") + .groupBy(new RawText("*")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubRelativeTimeSelection() { + Query query = new Query("SELECT water_level FROM h2o_feet WHERE time > now() - 1h;", DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", subTime(1L, HOUR))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testAddRelativeTimeSelection() { + Query query = new Query("SELECT water_level FROM h2o_feet WHERE time > now() + 1w;", DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", addTime(1L, WEEK))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDateTimeString() { + Query query = + new Query( + "SELECT water_level FROM h2o_feet WHERE time > '2015-08-18T00:00:00.000000000Z';", + DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", "2015-08-18T00:00:00.000000000Z")); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testNestedOperations() { + Query query = + new Query( + "SELECT water_level FROM h2o_feet WHERE column1 > 3 * ((column2 + 3) + 4);", DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("column1", op(3,MUL,op(cop("column2",ADD,3), ADD, 4)))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDateTimeStringOperation() { + Query query = + new Query( + "SELECT water_level FROM h2o_feet WHERE time > '2015-09-18T21:24:00Z' + 6m;", DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op("2015-09-18T21:24:00Z", ADD, ti(6L, MINUTE)))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testDateTimeEpochOperation() { + Query query = + new Query("SELECT water_level FROM h2o_feet WHERE time > 24043524m - 6m;", DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op(ti(24043524L, MINUTE), SUB, ti(6L, MINUTE)))); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testFill() { + Query query = + new Query( + "SELECT water_level FROM h2o_feet WHERE time > 24043524m - 6m GROUP BY water_level fill(100);", + DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op(ti(24043524L, MINUTE), SUB, ti(6L, MINUTE)))) + .groupBy("water_level") + .fill(100); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testFillLinear() { + Query query = + new Query( + "SELECT water_level FROM h2o_feet WHERE time > 24043524m - 6m GROUP BY water_level fill(linear);", + DATABASE); + Query select = + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op(ti(24043524L, MINUTE), SUB, ti(6L, MINUTE)))) + .groupBy("water_level") + .fill("linear"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testFillLinearIllegalArgument() { + assertThrows( + IllegalArgumentException.class, + () -> + select() + .column("water_level") + .from(DATABASE, "h2o_feet") + .where(gt("time", op(ti(24043524L, MINUTE), SUB, ti(6L, MINUTE)))) + .groupBy("water_level") + .fill("illegal argument"), + "Invalid argument for fill"); + } + + @Test + public void multipleDatabaseBackReferenceing() { + Query query = + new Query("SELECT MEAN(*) INTO \"where_else\".\"autogen\".:MEASUREMENT FROM /.*/ WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:06:00Z' GROUP BY time(12m);",DATABASE); + Query select = + select() + .mean(raw("*")) + .into("\"where_else\".\"autogen\".:MEASUREMENT") + .fromRaw(DATABASE, "/.*/") + .where(gte("time","2015-08-18T00:00:00Z")) + .and(lte("time","2015-08-18T00:06:00Z")) + .groupBy(time(12L, MINUTE)); + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testBoundParameters() { + Query query = select().column("a").from(DATABASE, "b") + .where(eq("c", FunctionFactory.placeholder("d"))).bindParameter("d", 3); + assertEquals("SELECT a FROM b WHERE c = $d;", query.getCommand()); + assertEquals(Query.encode("{\"d\":3}"), query.getParameterJsonWithUrlEncoded()); + assertEquals(DATABASE, query.getDatabase()); + } +} diff --git a/src/test/java/org/influxdb/querybuilder/api/SelectionSubQueryImplTest.java b/src/test/java/org/influxdb/querybuilder/api/SelectionSubQueryImplTest.java new file mode 100644 index 000000000..23e27c2c5 --- /dev/null +++ b/src/test/java/org/influxdb/querybuilder/api/SelectionSubQueryImplTest.java @@ -0,0 +1,625 @@ +package org.influxdb.querybuilder.api; + +import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.*; +import static org.influxdb.querybuilder.Operations.ADD; +import static org.influxdb.querybuilder.Operations.MUL; +import static org.influxdb.querybuilder.Operations.SUB; +import static org.influxdb.querybuilder.time.DurationLiteral.HOUR; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.influxdb.dto.Query; +import org.junit.jupiter.api.Test; + +public class SelectionSubQueryImplTest { + + private static final String DATABASE = "testdb"; + + @Test + public void testSubQueryWithoutTable() { + String[] tables = null; + assertThrows( + IllegalArgumentException.class, + () -> select().max("test1").as("hello").fromSubQuery(DATABASE).from(tables).close()); + } + + @Test + public void testSubQuery() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, "foobar") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQuerySelectOperations() { + Query query = + new Query( + "SELECT * FROM (SELECT column1,column1 * 2,2 + 2,/*/,COUNT(column3),MIN(column1),SUM((column2 + 1) - 4),testFunction(column1,column2) FROM foobar) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .fromSubQuery(DATABASE) + .column("column1") + .cop("column1",MUL,2) + .op(2,ADD,2) + .raw("/*/") + .count("column3") + .min("column1") + .sum(op(cop("column2",ADD,1),SUB,4)) + .function("testFunction","column1","column2") + .from("foobar") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryMultipleTables() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar,second_table) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, new String[] {"foobar", "second_table"}) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryRawTable() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM /*/) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQueryRaw(DATABASE, "/*/") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithTextOnWhere() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar WHERE arbitrary text) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, "foobar") + .where("arbitrary text") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithLimit() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar LIMIT 1) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, "foobar") + .limit(1) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWhereNestedOrderByLimit() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar WHERE (test1 = 2) AND test1 = 1 ORDER BY time ASC LIMIT 1 OFFSET 1 SLIMIT 1 SOFFSET 1) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, "foobar") + .where() + .orNested() + .or(eq("test1", 2)) + .close() + .where(eq("test1", 1)) + .orderBy(asc()) + .limit(1) + .limit(1, 1) + .sLimit(1) + .sLimit(1, 1) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithLimitOFFSET() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar LIMIT 1 OFFSET 2) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE, "foobar") + .limit(1, 2) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryCountAll() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT COUNT(*) FROM foobar) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .countAll() + .from("foobar") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithTables() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT COUNT(*) FROM foobar,foobar2) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .countAll() + .from(new String[] {"foobar", "foobar2"}) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithRawString() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT COUNT(*) FROM /*/) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .countAll() + .fromRaw("/*/") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryAs() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1 AS newname FROM foobar) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .as("newname") + .from("foobar") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryColumns() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1,column2 FROM foobar) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .column("column2") + .from("foobar") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + + @Test + public void testSubQueryWhereOr() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1,column2 FROM foobar WHERE column1 > 1 + 2 OR column2 < column1 - 3) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .column("column2") + .from("foobar") + .where() + .or(gt("column1",op(1,ADD,2))) + .or(lt("column2",cop("column1",SUB,3))) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryGroupByFill() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1,column2 FROM foobar GROUP BY column1 fill(100)) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .column("column2") + .from("foobar") + .groupBy("column1") + .fill(100) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryGroupByFillFromSelect() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar GROUP BY column1 fill(100)) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .from("foobar") + .groupBy("column1") + .fill(100) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryGroupByFillFromSelectString() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT * FROM foobar GROUP BY column1 fill(null)) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .from("foobar") + .groupBy("column1") + .fill("null") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithSLimit() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1,column2 FROM foobar GROUP BY column1 fill(100) SLIMIT 100 SOFFSET 120) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .column("column2") + .from("foobar") + .groupBy("column1") + .sLimit(100,120) + .fill(100) + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + /*test * */ + + @Test + public void testSubQueryWithSubQueries() { + Query query = + new Query( + "SELECT column1,column2 FROM (" + + "SELECT MAX(column1),MAX(column2) FROM (" + + "SELECT * FROM (" + + "SELECT MAX(column1),MEAN(column2) FROM (" + + "SELECT DISTINCT test1 FROM foobar WHERE column1 > 3 GROUP BY column2" + + ")" + + ") WHERE column1 = 5 GROUP BY column2 LIMIT 50 OFFSET 10" + + ") WHERE column1 = 4" + + ") WHERE column3 = 5 LIMIT 5 OFFSET 10;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .max("column1") + .max("column2") + .fromSubQuery() + .all() + .fromSubQuery() + .max("column1") + .mean("column2") + .fromSubQuery() + .column("test1") + .distinct() + .from("foobar") + .where(gt("column1", 3)) + .groupBy("column2") + .close() + .close() + .where(eq("column1", 5)) + .groupBy("column2") + .limit(50, 10) + .close() + .where(eq("column1", 4)) + .close() + .where(eq("column3", 5)) + .limit(5, 10); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryWithLimitAndSOffSET() { + Query query = + new Query( + "SELECT column1,column2 FROM (" + + "SELECT MAX(column1),MAX(column2) FROM (" + + "SELECT * FROM (" + + "SELECT MAX(column1),MEAN(column2) FROM (" + + "SELECT DISTINCT test1 FROM foobar WHERE column1 > 3 GROUP BY column2 LIMIT 1 OFFSET 20 SLIMIT 2 SOFFSET 10" + + ")" + + ") WHERE column1 = 5 GROUP BY column2 ORDER BY time DESC LIMIT 50 OFFSET 10" + + ") WHERE column1 = 4 OR column1 = 7 GROUP BY time(4h) ORDER BY time ASC SLIMIT 3" + + ") WHERE column3 = 5 LIMIT 5 OFFSET 10;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .max("column1") + .max("column2") + .fromSubQuery() + .all() + .fromSubQuery() + .max("column1") + .mean("column2") + .fromSubQuery() + .column("test1") + .distinct() + .from("foobar") + .where(gt("column1", 3)) + .groupBy("column2") + .limit(1, 20) + .sLimit(2, 10) + .close() + .close() + .where(eq("column1", 5)) + .groupBy("column2") + .limit(50, 10) + .orderBy(desc()) + .close() + .where(eq("column1", 4)) + .or(eq("column1", 7)) + .sLimit(3) + .orderBy(asc()) + .groupBy(time(4L, HOUR)) + .close() + .where(eq("column3", 5)) + .limit(5, 10); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryTimeZoneColumns() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT column1,column2 FROM foobar tz('America/Chicago')) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .column("column1") + .column("column2") + .from("foobar") + .tz("America/Chicago") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQuerySelectRegexColumns() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT /k/ FROM foobar tz('America/Chicago')) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .regex("/k/") + .from("foobar") + .tz("America/Chicago") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } + + @Test + public void testSubQueryNested() { + Query query = + new Query( + "SELECT column1,column2 FROM (SELECT /k/ FROM foobar WHERE (column1 = 2 OR column1 = 3) OR (column2 = 5 AND column2 = 7) tz('America/Chicago')) WHERE column1 = 1 GROUP BY time;", + DATABASE); + Query select = + select() + .requiresPost() + .column("column1") + .column("column2") + .fromSubQuery(DATABASE) + .regex("/k/") + .from("foobar") + .where() + .andNested() + .and(eq("column1", 2)) + .or(eq("column1", 3)) + .close() + .orNested() + .and(eq("column2", 5)) + .and(eq("column2", 7)) + .close() + .tz("America/Chicago") + .close() + .where(eq("column1", 1)) + .groupBy("time"); + + assertEquals(query.getCommand(), select.getCommand()); + assertEquals(query.getDatabase(), select.getDatabase()); + } +} diff --git a/src/test/nginx/nginx.conf b/src/test/nginx/nginx.conf new file mode 100644 index 000000000..06e03d492 --- /dev/null +++ b/src/test/nginx/nginx.conf @@ -0,0 +1,24 @@ +worker_processes auto; +pid /run/nginx.pid; + +events { + worker_connections 768; +} + +http { + server { + listen 8080; + location / { + proxy_pass http://influxdb:8086/; + } + location /influx-api/ { + proxy_pass http://influxdb:8086/; + } + } +} +stream { + server { + listen 8080 udp; + proxy_pass influxdb:8089; + } +} diff --git a/src/test/resources/org/influxdb/invalid_msgpack_errorBody.bin b/src/test/resources/org/influxdb/invalid_msgpack_errorBody.bin new file mode 100644 index 000000000..d7f4ed04e --- /dev/null +++ b/src/test/resources/org/influxdb/invalid_msgpack_errorBody.bin @@ -0,0 +1 @@ +e not found: "abc" \ No newline at end of file diff --git a/src/test/resources/org/influxdb/msgpack/msgpack_1.bin b/src/test/resources/org/influxdb/msgpack/msgpack_1.bin new file mode 100644 index 000000000..48e8c65b2 Binary files /dev/null and b/src/test/resources/org/influxdb/msgpack/msgpack_1.bin differ diff --git a/src/test/resources/org/influxdb/msgpack/msgpack_2.bin b/src/test/resources/org/influxdb/msgpack/msgpack_2.bin new file mode 100644 index 000000000..8574f8270 Binary files /dev/null and b/src/test/resources/org/influxdb/msgpack/msgpack_2.bin differ diff --git a/src/test/resources/org/influxdb/msgpack/msgpack_3.bin b/src/test/resources/org/influxdb/msgpack/msgpack_3.bin new file mode 100644 index 000000000..a474f573e Binary files /dev/null and b/src/test/resources/org/influxdb/msgpack/msgpack_3.bin differ diff --git a/src/test/resources/org/influxdb/msgpack_errorBody.bin b/src/test/resources/org/influxdb/msgpack_errorBody.bin new file mode 100644 index 000000000..54153f725 --- /dev/null +++ b/src/test/resources/org/influxdb/msgpack_errorBody.bin @@ -0,0 +1 @@ +errordatabase not found: "abc" \ No newline at end of file