diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000..7af8a4f3d
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,11 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "maven" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "weekly"
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
new file mode 100644
index 000000000..8571e5738
--- /dev/null
+++ b/.github/workflows/master.yml
@@ -0,0 +1,49 @@
+name: master
+
+on:
+ push:
+ branches:
+ - master
+
+jobs:
+ build:
+ runs-on: ubuntu-20.04
+
+ strategy:
+ matrix:
+ jdk: [3-openjdk-17-slim, 3-jdk-14, 3-jdk-8-slim]
+ influxdb: ['1.1', '1.6', '1.8', '2.3', '2.4', '2.5']
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Build project
+ env:
+ MAVEN_JAVA_VERSION: "${{ matrix.jdk }}"
+ INFLUXDB_VERSION: "${{ matrix.influxdb }}"
+ run: ./compile-and-test.sh
+
+ - name: codecov
+ run: |
+ sudo apt-get update
+ sudo apt-get install gpg libdigest-sha-perl -y
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ curl -s https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import
+ gpgv codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x ./codecov
+ ./codecov
+ if: matrix.influxdb != '2.3' && matrix.influxdb != '2.4' && matrix.influxdb != '2.5'
+
+
+ # deploy:
+ # runs-on: ubuntu-20.04
+
+ # steps:
+ # - name: deploy snapshot
+ # env:
+ # secure: dAJK41xM2dN3q3xJMqAOP6uvrOvpjjUzmHr8mYNyepER8Lpms9/GqVUxqJv12wzCBqv1XZk/CXxrv3iBc2XjlxlrzIJGQChTinwDEigv0BMl/Gh0821ja7gwzMEUmg9f79m5tJxIFQ306cWz1gyRDqM3fLzskvM2ayzvynsNc/w=
+ # run: ./deploy-snapshot.sh
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
new file mode 100644
index 000000000..2a7b043e7
--- /dev/null
+++ b/.github/workflows/pr.yml
@@ -0,0 +1,43 @@
+name: Build from pull request
+
+on:
+ pull_request:
+ branches:
+ - master
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ strategy:
+ matrix:
+ jdk: [3-openjdk-17-slim, 3-jdk-14, 3-jdk-8-slim]
+ influxdb: ['1.1', '1.6', '1.8', '2.3', '2.4', '2.5']
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+
+ - name: Figure out if running fork PR
+ id: fork
+ run: '["${{ secrets.DOCKER_REGISTRY_TOKEN }}" == ""] && echo "::set-output name=is_fork_pr::true" || echo "::set-output name=is_fork_pr::false"'
+
+ - name: Build project
+ env:
+ MAVEN_JAVA_VERSION: "${{ matrix.jdk }}"
+ INFLUXDB_VERSION: "${{ matrix.influxdb }}"
+ run: ./compile-and-test.sh
+
+ - name: codecov
+ run: |
+ sudo apt-get update
+ sudo apt-get install gpg libdigest-sha-perl -y
+ curl -Os https://uploader.codecov.io/latest/linux/codecov
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM
+ curl -Os https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig
+ curl -s https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import
+ gpgv codecov.SHA256SUM.sig codecov.SHA256SUM
+ shasum -a 256 -c codecov.SHA256SUM
+ chmod +x ./codecov
+ ./codecov
+ if: matrix.influxdb != '2.3' && matrix.influxdb != '2.4' && matrix.influxdb != '2.5'
diff --git a/.gitignore b/.gitignore
index 4bfa435bc..eac1a868b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,5 @@ target/
test-output/
.idea/
*iml
+.m2/
+.checkstyle
diff --git a/.maven-settings.xml b/.maven-settings.xml
new file mode 100644
index 000000000..864a9ec04
--- /dev/null
+++ b/.maven-settings.xml
@@ -0,0 +1,27 @@
+
+
+
+
+ ossrh
+ ${env.SONATYPE_USERNAME}
+ ${env.SONATYPE_PASSWORD}
+
+
+
+
+
+ ossrh
+
+ true
+
+
+ ${env.GPG_EXECUTABLE}
+ ${env.GPG_PASSPHRASE}
+
+
+
+
+
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 434bf711d..000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-language: java
-sudo: required
-
-jdk:
- - oraclejdk8
-
-addons:
- apt:
- packages:
- - oracle-java8-installer # Updates JDK 8 to the latest available.
-
-services:
- - docker
-script: ./compile-and-test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
-
-after_failure:
- - cat target/surefire-reports/*.txt
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e593f0a49..0cdc03965 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,83 +1,287 @@
-## v2.6 [unreleased]
+# Changelog
-#### Features
+## 2.25 [2025-03-26]
- - Switch to Java 1.8
- - Support chunking
- - Add a databaseExists method to InfluxDB interface
- - [Issue #289] (https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`.
- - Add a listener to notify asynchronous errors during batch flushes (https://github.com/influxdata/influxdb-java/pull/318).
+### Improvements
+- Add support for parameter binding to built queries [PR #1010](https://github.com/influxdata/influxdb-java/pull/1010)
-#### Fixes
+## 2.24 [2023-12-14]
- - [Issue #263] (https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface.
+### Improvements
+- `allFields` mode to Measurement annotation [PR #972](https://github.com/influxdata/influxdb-java/pull/972)
+- Support generic POJO super classes [PR #980](https://github.com/influxdata/influxdb-java/pull/980)
-#### Improvements
+## 2.23 [2022-07-07]
- - Update retrofit from 2.1 to 2.2
- - Update slf4j from 1.7.22 to 1.7.24
- - Update okhttp3 from 3.5 to 3.6
- - automatically adjust batch processor capacity [PR #282]
+### Improvements
+- Add implementation information to `Jar` manifest [PR #847](https://github.com/influxdata/influxdb-java/pull/847)
+
+### Fixes
+- Only the request to /write endpoint should be compressed by GZIP [PR #851](https://github.com/influxdata/influxdb-java/pull/851)
+
+## 2.22 [2021-09-17]
+
+### Improvements
+
+- `POST` query variants serializes `'q'` parameter into HTTP body [PR #765](https://github.com/influxdata/influxdb-java/pull/765)
+
+## 2.21 [2020-12-04]
+
+### Fixes
+
+- Binary compatibility with old version [PR #692](https://github.com/influxdata/influxdb-java/pull/692)
+- Wrong statement in manual [PR #695](https://github.com/influxdata/influxdb-java/pull/695)
+
+## 2.20 [2020-08-14]
+
+### Features
+- Add an option in `BatchOption` to prevent `InfluxDB#write` from blocking when actions queue is exhausted. [Issue #668](https://github.com/influxdata/influxdb-java/issues/688)
+- Added new signature to InfluxDBMapper.query() with params final Query query, final Class clazz, final String measurementName to leverage InfluxDBResultMapper.toPojo method with identical signature.
+
+### Improvements
+
+- Test: Added test for new InfluxDBMapper.query() signature, as well as test for existing InfluxDBMapper.query(Class clazz) signature (previously only InfluxDBMapper.query(Query query, Class clazz) was tested).
+
+## 2.19 [2020-05-18]
+
+## 2.18 [2020-04-17]
+
+### Fixes
+
+- Update to okhttp 4.x [PR #644](https://github.com/influxdata/influxdb-java/pull/644)
+
+## 2.17 [2019-12-06]
+
+### Fixes
+
+- Fixed runtime exception propagation in chunked query [Issue #639](https://github.com/influxdata/influxdb-java/issues/639)
+
+## 2.16 [2019-10-25]
+
+### Fixes
+
+- Add new annotation called TimeColumn for timestamp field in POJO bean, this can set Point time and precision field correctly, also avoid UnableToParseException when flush Point to influx.
+- Skip fields with NaN and infinity values when writing to InfluxDB
+ [Issue #614](https://github.com/influxdata/influxdb-java/issues/614)
+
+## 2.15 [2019-02-22]
+
+### Fixes
+
+- Close underlying OkHttpClient when closing [Issue #359](https://github.com/influxdata/influxdb-java/issues/359)
+- Update OkHttp to 3.13.1 which disables TLSv1 and TLSv1.1 by default, if still required you can enable them:
+
+```java
+OkHttpClient client = new OkHttpClient.Builder()
+ .connectionSpecs(Arrays.asList(ConnectionSpec.COMPATIBLE_TLS))
+ .build();
+```
+
+### Features
+
+- Query and BatchPoints do not mandate a database name, in which case the InfluxDB database
+ would be used [Issue #548](https://github.com/influxdata/influxdb-java/issues/548)
+- Add BatchPoints.Builder.points(Collection)
+ [Issue #451](https://github.com/influxdata/influxdb-java/issues/451)
+- @Column supports class inheritance
+ [Issue #367](https://github.com/influxdata/influxdb-java/issues/367)
+- BatchOptions to have .precision()
+ [Issue #532](https://github.com/influxdata/influxdb-java/issues/532)
+- Point.Builder.addFieldsFromPOJO to add Column fields from super class
+ [Issue #613](https://github.com/influxdata/influxdb-java/issues/613)
+
+## 2.14 [2018-10-12]
+
+### Fixes
+
+- Fixed chunked query exception handling [Issue #523](https://github.com/influxdata/influxdb-java/issues/523)
+- Memory leak in StringBuilder cache for Point.lineprotocol() [Issue #526](https://github.com/influxdata/influxdb-java/issues/521)
+
+## 2.13 [2018-09-12]
+
+### Fixes
+- MessagePack queries: Exception during parsing InfluxDB version [macOS] [PR #487](https://github.com/influxdata/influxdb-java/issues/487)
+- The InfluxDBResultMapper is able to handle results with a different time precision [PR #501](https://github.com/influxdata/influxdb-java/pull/501)
+- UDP target host address is cached [PR #502](https://github.com/influxdata/influxdb-java/issues/502)
+- Error messages from server not parsed correctly when using msgpack [PR #506](https://github.com/influxdata/influxdb-java/issues/506)
+- Response body must be closed properly in case of JSON response [PR #514](https://github.com/influxdata/influxdb-java/issues/514)
+- Time is serialized not consistently in MsgPack and Json, missing millis and nanos in MsgPack[PR #517](https://github.com/influxdata/influxdb-java/issues/517)
+
+### Features
+
+- Support for Basic Authentication [PR #492](https://github.com/influxdata/influxdb-java/pull/492)
+- Added possibility to reuse client as a core part of [influxdb-java-reactive](https://github.com/bonitoo-io/influxdb-java-reactive) client [PR #493](https://github.com/influxdata/influxdb-java/pull/493)
+- Retry capability for writing of BatchPoints [PR #503](https://github.com/influxdata/influxdb-java/issues/503)
+- Added `BiConsumer` with capability to discontinue a streaming query [Issue #515](https://github.com/influxdata/influxdb-java/issues/515)
+- Added `onComplete` action that is invoked after successfully end of streaming query [Issue #515](https://github.com/influxdata/influxdb-java/issues/515)
+
+## 2.12 [2018-07-31]
+
+### Fixes
+
+- Remove code which checks for unsupported influxdb versions [PR #474](https://github.com/influxdata/influxdb-java/pull/474)
+- Unpredictable errors when OkHttpClient.Builder instance is reused [PR #478](https://github.com/influxdata/influxdb-java/pull/478)
+
+### Features
+
+- Support for MessagePack [PR #471](https://github.com/influxdata/influxdb-java/pull/471)
+- Cache version per influxdb instance and reduce ping() calls for every query call [PR #472](https://github.com/influxdata/influxdb-java/pull/472)
+- FAQ list for influxdb-java [PR #475](https://github.com/influxdata/influxdb-java/pull/475)
+
+### Improvements
+
+- Test: Unit test to ensure tags should be sorted by key in line protocol (to reduce db server overheads) [PR #476](https://github.com/influxdata/influxdb-java/pull/476)
+
+## 2.11 [2018-07-02]
+
+### Features
+
+- Allow write precision of TimeUnit other than Nanoseconds [PR #321](https://github.com/influxdata/influxdb-java/pull/321)
+- Support dynamic measurement name in InfluxDBResultMapper [PR #423](https://github.com/influxdata/influxdb-java/pull/423)
+- Debug mode which allows HTTP requests being sent to the database to be logged [PR #450](https://github.com/influxdata/influxdb-java/pull/450)
+- Fix problem of connecting to the influx api with URL which does not points to the url root (e.g. localhots:80/influx-api/) [PR #400] (https://github.com/influxdata/influxdb-java/pull/400)
+
+## 2.10 [2018-04-26]
+
+### Fixes
+- Fix IllegalAccessException on setting value to POJOs, InfluxDBResultMapper is now more thread-safe [PR #432](https://github.com/influxdata/influxdb-java/pull/432)
+
+### Features
+
+- Support for parameter binding in queries ("prepared statements") [PR #429](https://github.com/influxdata/influxdb-java/pull/429)
+- Allow to figure out whether the Point.Builder has any field or not [PR #434](https://github.com/influxdata/influxdb-java/pull/434)
+
+### Improvements
+
+- Performance: use chained StringBuilder calls instead of single calls [PR #426](https://github.com/influxdata/influxdb-java/pull/426)
+- Performance: Escape fields and keys more efficiently [PR #424](https://github.com/influxdata/influxdb-java/pull/424)
+- Build: Speed up travis build [PR #435](https://github.com/influxdata/influxdb-java/pull/435)
+- Test: Update junit from 5.1.0 to 5.1.1 [PR #441](https://github.com/influxdata/influxdb-java/pull/441)
+
+## 2.9 [2018-02-27]
+
+### Features
+
+- New extensible API to configure batching properties. [PR #409]
+- New configuration property 'jitter interval' to avoid multiple clients hit the server periodically at the same time. [PR #409]
+- New strategy on handling errors, client performs retries writes when server gets overloaded [PR #410]
+- New exceptions give the client user easier way to classify errors reported by the server. [PR #410]
+
+## 2.8 [2017-12-06]
+
+### Fixes
+
+- InfluxDBResultMapper now is able to process QueryResult created when a GROUP BY clause was used [PR #345](https://github.com/influxdata/influxdb-java/pull/345)
+- InfluxDB will now handle the timestamp on its own if none is provided [PR#350](https://github.com/influxdata/influxdb-java/pull/350)
+
+### Features
+
+- API: add InfluxDB#createRetentionPolicy and InfluxDB#dropRetentionPolicy to be able to create and drop Retention Policies [PR #351](https://github.com/influxdata/influxdb-java/pull/351)
+- API: add InfluxDB#query that uses callbacks
+
+### Improvements
+
+- Build: all unit and integration test are now running with jdk8 and jdk9.
+- Test: migration to junit5
+
+## v2.7 [2017-06-26]
+
+### Features
+
+- Simplify write() methods for use cases writing all points to same database and retention policy [PR #327](https://github.com/influxdata/influxdb-java/pull/327)
+- QueryResult to Object mapper added [PR #341](https://github.com/influxdata/influxdb-java/pull/341)
+
+### Fixes
+
+- Replace RuntimeException with InfluxDBException [Issue #323](https://github.com/influxdata/influxdb-java/issues/323)
+
+### Improvements
+
+- Significant (~35%) performance improvements for write speed with less memory footprint. [PR #330](https://github.com/influxdata/influxdb-java/pull/330)
+- Drop guava runtime dependency which reduces jar size from 1MB -> 49KB [PR #322](https://github.com/influxdata/influxdb-java/pull/322)
+
+## v2.6 [2017-06-08]
+
+### Features
+
+- Switch to Java 1.8
+- Support chunking
+- Add a databaseExists method to InfluxDB interface
+- [Issue #289](https://github.com/influxdata/influxdb-java/issues/289) Batching enhancements: Pending asynchronous writes can be explicitly flushed via `InfluxDB.flush()`.
+- Add a listener to notify asynchronous errors during batch flushes [PR #318](https://github.com/influxdata/influxdb-java/pull/318).
+
+### Fixes
+
+- [Issue #263](https://github.com/influxdata/influxdb-java/issues/263) Add databaseExists method to InfluxDB interface.
+
+### Improvements
+
+- Update retrofit from 2.1 to 2.2
+- Update slf4j from 1.7.22 to 1.7.24
+- Update okhttp3 from 3.5 to 3.6
+- automatically adjust batch processor capacity [PR #282](https://github.com/influxdata/influxdb-java/pull/282)
## v2.5 [2016-12-05]
-#### Features
+### Features
- - Support writing by UDP protocal.
- - Support gzip compress for http request body.
- - Support setting thread factory for batch processor.
- - Support chunking
+- Support writing by UDP protocal.
+- Support gzip compress for http request body.
+- Support setting thread factory for batch processor.
+- Support chunking
-#### Fixes
+### Fixes
- - [Issue #162] (https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp.
- - [Issue #214] (https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result.
- - Write can't be always async if batch is enabled.
+- [Issue #162](https://github.com/influxdb/influxdb-java/issues/162) Write point using async batch mode with different rp will use same rp.
+- [Issue #214](https://github.com/influxdb/influxdb-java/issues/214) Send multiple queries in one query statement will get only one result.
+- Write can't be always async if batch is enabled.
-#### Improvements
+### Improvements
- - Remove the limit for database name: not contain '-'.
- - Support creating influxdb instance without username and password.
- - Add time related util methods for converting influxdb timestamp or unix epoch time.
- - correct exception type when disable batch twice.
+- Remove the limit for database name: not contain '-'.
+- Support creating influxdb instance without username and password.
+- Add time related util methods for converting influxdb timestamp or unix epoch time.
+- correct exception type when disable batch twice.
## v2.4 [2016-10-24]
-#### Features
- - now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp.
- - in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET).
+### Features
+
+- now uses okhttp3 and retrofit2. As a result, you can now pass an OkHttpClient.Builder to the InfluxDBFactory.connect if you wish to add more interceptors, etc, to OkHttp.
+- in InfluxDB 1.0.0, some queries now require a POST instead of GET. There is a flag on Query that allow this to be specified (default is still GET).
## v2.2 [2016-04-11]
-#### Features
+### Features
+
+- Allow writing of pre-constructed line protocol strings
- - Allow writing of pre-constructed line protocol strings
+### Fixes
-#### Fixes
+- Correct escaping of database names for create and delete database actions
+- Many bug fixes / improvements in general
- - Correct escaping of database names for create and delete database actions
- - Many bug fixes / improvements in general
+### Other
-#### Other
- - Deprecated `field()` method in preference for `addField()` methods.
+- Deprecated `field()` method in preference for `addField()` methods.
## v2.1 [2015-12-05]
-#### Features
+### Features
- - Extensions to fluent builder classes
- - Convenience methods for building Points
- - Allow integer types as field values
+- Extensions to fluent builder classes
+- Convenience methods for building Points
+- Allow integer types as field values
-#### Fixes
+### Fixes
- - Fixed escaping of tag and field values
- - Always uses nanosecond precision for time
- - Uses NumberFormat class for safer formatting of large numbers.
+- Fixed escaping of tag and field values
+- Always uses nanosecond precision for time
+- Uses NumberFormat class for safer formatting of large numbers.
## v2.0 [2015-07-17]
-#### Features
+### Features
- Compatible with InfluxDB version 0.9+
- Support for lineprotocol
@@ -89,7 +293,7 @@ No major functional changes or improvements. Mainly library updates and code str
## v1.3 [2014-10-22]
-#### Features
+### Features
- Compatible with InfluxDB Version up to 0.8
- API: add a InfluxDB#createDatabase(DatabaseConfiguration) to be able to create a new Database with ShardSpaces defined.
@@ -102,26 +306,26 @@ No major functional changes or improvements. Mainly library updates and code str
## v1.2 [2014-06-28]
-#### Features
+### Features
-- [Issue #2] (https://github.com/influxdb/influxdb-java/issues/2) Implement the last missing api calls ( interfaces, sync, forceCompaction, servers, shards)
+- [Issue #2](https://github.com/influxdb/influxdb-java/issues/2) Implement the last missing api calls ( interfaces, sync, forceCompaction, servers, shards)
- use (http://square.github.io/okhttp/, okhttp) instead of java builtin httpconnection to get failover for the http endpoint.
-#### Tasks
+### Tasks
-- [Issue #8] (https://github.com/influxdb/influxdb-java/issues/8) Use com.github.docker-java which replaces com.kpelykh for Integration tests.
-- [Issue #6] (https://github.com/influxdb/influxdb-java/issues/6) Update Retrofit to 1.6.0
-- [Issue #7] (https://github.com/influxdb/influxdb-java/issues/7) Update Guava to 17.0
+- [Issue #8](https://github.com/influxdb/influxdb-java/issues/8) Use com.github.docker-java which replaces com.kpelykh for Integration tests.
+- [Issue #6](https://github.com/influxdb/influxdb-java/issues/6) Update Retrofit to 1.6.0
+- [Issue #7](https://github.com/influxdb/influxdb-java/issues/7) Update Guava to 17.0
- fix dependency to guava.
## v1.1 [2014-05-31]
-#### Features
+### Features
- Add InfluxDB#version() to get the InfluxDB Server version information.
-- Changed InfluxDB#createDatabase() to match (https://github.com/influxdb/influxdb/issues/489) without replicationFactor.
+- Changed InfluxDB#createDatabase() to match [Issue #489](https://github.com/influxdb/influxdb/issues/489) without replicationFactor.
- Updated Retrofit from 1.5.0 -> 1.5.1
## v1.0 [2014-05-6]
- * Initial Release
+- Initial Release
diff --git a/FAQ.md b/FAQ.md
new file mode 100644
index 000000000..621c1526a
--- /dev/null
+++ b/FAQ.md
@@ -0,0 +1,173 @@
+# Frequently Asked Questions
+
+## Functionality
+
+- [Frequently Asked Questions](#frequently-asked-questions)
+ - [Functionality](#functionality)
+ - [Security](#security)
+ - [Is the batch part of the client thread safe](#is-the-batch-part-of-the-client-thread-safe)
+ - [If multiple threads are accessing it, are they all adding Points to the same batch ?](#if-multiple-threads-are-accessing-it-are-they-all-adding-points-to-the-same-batch)
+ - [And if so, is there a single thread in the background that is emptying batch to the server ?](#and-if-so-is-there-a-single-thread-in-the-background-that-is-emptying-batch-to-the-server)
+ - [If there is an error during this background process, is it propagated to the rest of the client ?](#if-there-is-an-error-during-this-background-process-is-it-propagated-to-the-rest-of-the-client)
+ - [How the client responds to concurrent write backpressure from server ?](#how-the-client-responds-to-concurrent-write-backpressure-from-server)
+ - [Is there a way to tell that all query chunks have arrived ?](#is-there-a-way-to-tell-that-all-query-chunks-have-arrived)
+ - [How to handle exceptions while using async chunked queries ?](#how-to-handle-exceptions-while-using-async-chunked-queries)
+ - [Is there a way to tell the system to stop sending more chunks once I've found what I'm looking for ?](#is-there-a-way-to-tell-the-system-to-stop-sending-more-chunks-once-ive-found-what-im-looking-for)
+ - [Is default config security setup TLS 1.2 ?](#is-default-config-security-setup-tls-12)
+ - [How to use SSL client certificate authentication](#how-to-use-ssl-client-certificate-authentication)
+
+## Security
+
+- [Is default config security setup TLS 1.2 ?](#is-default-config-security-setup-tls-12-)
+- [How to use SSL client certificate authentication](#how-to-use-ssl-client-certificate-authentication-)
+
+## Is the batch part of the client thread safe
+
+Yes, the __BatchProcessor__ uses a __BlockingQueue__ and the __RetryCapableBatchWriter__ is synchronized on its __write__ method
+
+```java
+org.influxdb.impl.RetryCapableBatchWriter.write(Collection)
+
+```
+
+## If multiple threads are accessing it, are they all adding Points to the same batch ?
+
+If they share the same InfluxDbImpl instance, so the answer is Yes (all writing points are put to the __BlockingQueue__)
+
+## And if so, is there a single thread in the background that is emptying batch to the server ?
+
+Yes, there is one worker thread that is scheduled to periodically flush the __BlockingQueue__
+
+## If there is an error during this background process, is it propagated to the rest of the client ?
+
+Yes, on initializing BatchOptions, you can pass an exceptionHandler, this handler is used to handle any batch writing that causes a non-recoverable exception or when a batch is evicted due to a retry buffer capacity
+(please refer to __BatchOptions.bufferLimit(int)__ for more details)
+(list of non-recoverable error : [Handling-errors-of-InfluxDB-under-high-load](https://github.com/influxdata/influxdb-java/wiki/Handling-errors-of-InfluxDB-under-high-load))
+
+## How the client responds to concurrent write backpressure from server ?
+
+Concurrent WRITE throttling at server side is controlled by the trio (__max-concurrent-write-limit__, __max-enqueued-write-limit__, __enqueued-write-timeout__)
+for example, you can have these in influxdb.conf
+
+```properties
+max-concurrent-write-limit = 2
+max-enqueued-write-limit = 1
+enqueued-write-timeout = 1000
+
+```
+
+(more info at this [PR #9888 HTTP Write Throttle](https://github.com/influxdata/influxdb/pull/9888/files))
+
+If the number of concurrent writes reach the threshold, then any further write will be immidiately returned with
+
+```bash
+org.influxdb.InfluxDBIOException: java.net.SocketException: Connection reset by peer: socket write error
+ at org.influxdb.impl.InfluxDBImpl.execute(InfluxDBImpl.java:692)
+ at org.influxdb.impl.InfluxDBImpl.write(InfluxDBImpl.java:428)
+
+```
+
+Form version 2.9, influxdb-java introduces new error handling feature, the client will try to back off and rewrite failed wites on some recoverable errors (list of recoverable error : [Handling-errors-of-InfluxDB-under-high-load](https://github.com/influxdata/influxdb-java/wiki/Handling-errors-of-InfluxDB-under-high-load))
+
+So in case the number of write requests exceeds Concurrent write setting at server side, influxdb-java can try to make sure no writing points get lost (due to rejection from server)
+
+## Is there a way to tell that all query chunks have arrived ?
+
+Yes, there is __onComplete__ action that is invoked after successfully end of stream.
+
+```java
+influxDB.query(new Query("SELECT * FROM disk", "telegraf"), 10_000,
+ queryResult -> {
+ System.out.println("result = " + queryResult);
+ },
+ () -> {
+ System.out.println("The query successfully finished.");
+ });
+```
+
+## How to handle exceptions while using async chunked queries ?
+
+Exception handling for chunked queries can be handled by __onFailure__ error
+consumer.
+
+```java
+
+influxDB.query(query, chunksize,
+ //onNext result consumer
+ (cancellable, queryResult) -> {
+ System.out.println("Process queryResult - " + queryResult.toString());
+ }
+ //onComplete executable
+ , () -> {
+ System.out.println("On Complete - the query finished successfully.");
+ },
+ //onFailure error handler
+ throwable -> {
+ System.out.println("On Failure - " + throwable.getLocalizedMessage());
+ });
+```
+
+## Is there a way to tell the system to stop sending more chunks once I've found what I'm looking for ?
+
+Yes, there is __onNext__ bi-consumer with capability to discontinue a streaming query.
+
+```java
+influxDB.query(new Query("SELECT * FROM disk", "telegraf"), 10_000, (cancellable, queryResult) -> {
+
+ // found what I'm looking for ?
+ if (foundRequest(queryResult)) {
+ // yes => cancel query
+ cancellable.cancel();
+ }
+
+ // no => process next result
+ processResult(queryResult);
+});
+```
+
+## Is default config security setup TLS 1.2 ?
+
+(answer need to be verified)
+
+To construct an InfluxDBImpl you will need to pass a OkHttpClient.Builder instance.
+At this point you are able to set your custom SSLSocketFactory via method OkHttpClient.Builder.sslSocketFactory(…)
+
+In case you don’t set it, OkHttp will use the system default (Java platform dependent), I tested in Java 8 (influxdb-java has CI test in Java 8 and 10) and see the default SSLContext support these protocols
+SSLv3/TLSv1/TLSv1.1/TLSv1.2
+
+So if the server supports TLS1.2, the communication should be encrypted by TLS 1.2 (during the handshake the client will provide the list of accepted security protocols and the server will pick one, so this case the server would pick TLS 1.2)
+
+## How to use SSL client certificate authentication
+
+To use SSL certificate authentication you need to setup `SslSocketFactory` on OkHttpClient.Builder.
+
+Here is the example, how to create InfluxDB client with the new SSLContext with custom identity keystore (p12) and truststore (jks):
+
+```java
+KeyStore keyStore = KeyStore.getInstance("PKCS12");
+keyStore.load(new FileInputStream("conf/keystore.p12"), "changeme".toCharArray());
+
+KeyStore trustStore = KeyStore.getInstance("JKS");
+trustStore.load(new FileInputStream("conf/trustStore.jks"), "changeme".toCharArray());
+
+SSLContext sslContext = SSLContext.getInstance("SSL");
+
+KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
+keyManagerFactory.init(keyStore, "changeme".toCharArray());
+
+TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+trustManagerFactory.init(trustStore);
+
+TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
+
+sslContext.init(keyManagerFactory.getKeyManagers(), trustManagers, new SecureRandom());
+sslContext.getDefaultSSLParameters().setNeedClientAuth(true);
+
+OkHttpClient.Builder okhttpClientBuilder = new OkHttpClient.Builder();
+okhttpClientBuilder.sslSocketFactory(sslContext.getSocketFactory(), (X509TrustManager) trustManagers[0]);
+
+InfluxDB influxDB = InfluxDBFactory.connect("https://proxy_host:9086", okhttpClientBuilder);
+```
+
+InfluxDB (v1.6.2) does not have built-in support for client certificate ssl authentication.
+SSL must be handled by http proxy such as Haproxy, nginx...
diff --git a/INFLUXDB_MAPPER.md b/INFLUXDB_MAPPER.md
new file mode 100644
index 000000000..6540f389d
--- /dev/null
+++ b/INFLUXDB_MAPPER.md
@@ -0,0 +1,50 @@
+### InfluxDBMapper
+
+In case you want to use models only, you can use the InfluxDBMapper to save and load measurements.
+You can create models that specify the database the measurement and the retention policy.
+
+```Java
+@Measurement(name = "cpu",database="servers", retentionPolicy="autogen",timeUnit = TimeUnit.MILLISECONDS)
+public class Cpu {
+ @Column(name = "time")
+ private Instant time;
+ @Column(name = "host", tag = true)
+ private String hostname;
+ @Column(name = "region", tag = true)
+ private String region;
+ @Column(name = "idle")
+ private Double idle;
+ @Column(name = "happydevop")
+ private Boolean happydevop;
+ @Column(name = "uptimesecs")
+ private Long uptimeSecs;
+ // getters (and setters if you need)
+}
+```
+
+Save operation using a model.
+
+```Java
+Cpu cpu = .., create the cpu measure
+influxDBMapper.save(cpu);
+```
+
+Load data using a model.
+
+```java
+Cpu persistedCpu = influxDBMapper.query(Cpu.class).get(0);
+```
+
+Load data using a query and specify the model for mapping.
+
+```java
+Query query = ... create your query
+List persistedMeasure = influxDBMapper.query(query,Cpu.class);
+```
+
+#### InfluxDBMapper limitations
+
+Tags are automatically converted to strings, since tags are strings to influxdb
+Supported values for fields are boolean, int, long, double, Boolean, Integer, Long, Double.
+The time field should be of type instant.
+If you do not specify the time or set a value then the current system time shall be used with the timeunit specified.
diff --git a/LICENSE b/LICENSE
index 766a0a595..f21351ced 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) {{{year}}} {{{fullname}}}
+Copyright (c) 2014-2017 Stefan Majer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/MANUAL.md b/MANUAL.md
new file mode 100644
index 000000000..99e1248b5
--- /dev/null
+++ b/MANUAL.md
@@ -0,0 +1,460 @@
+# Manual
+
+## Quick start
+
+The code below is similar to the one found on the README.md file but with comments removed and rows numbered for better reference.
+
+```Java
+final String serverURL = "http://127.0.0.1:8086", username = "root", password = "root";
+final InfluxDB influxDB = InfluxDBFactory.connect(serverURL, username, password); // (1)
+
+String databaseName = "NOAA_water_database";
+influxDB.query(new Query("CREATE DATABASE " + databaseName));
+influxDB.setDatabase(databaseName); // (2)
+
+String retentionPolicyName = "one_day_only";
+influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
+ + " ON " + databaseName + " DURATION 1d REPLICATION 1 DEFAULT"));
+influxDB.setRetentionPolicy(retentionPolicyName); // (3)
+
+influxDB.enableBatch(
+ BatchOptions.DEFAULTS
+ .threadFactory(runnable -> {
+ Thread thread = new Thread(runnable);
+ thread.setDaemon(true);
+ return thread;
+ })
+); // (4)
+
+Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close)); // (5)
+
+influxDB.write(Point.measurement("h2o_feet") // (6)
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .tag("location", "santa_monica")
+ .addField("level description", "below 3 feet")
+ .addField("water_level", 2.064d)
+ .build());
+
+influxDB.write(Point.measurement("h2o_feet") // (6)
+ .tag("location", "coyote_creek")
+ .addField("level description", "between 6 and 9 feet")
+ .addField("water_level", 8.12d)
+ .build());
+
+Thread.sleep(5_000L);
+
+QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet")); // (7)
+
+System.out.println(queryResult);
+// It will print something like:
+// QueryResult [results=[Result [series=[Series [name=h2o_feet, tags=null,
+// columns=[time, level description, location, water_level],
+// values=[
+// [2020-03-22T20:50:12.929Z, below 3 feet, santa_monica, 2.064],
+// [2020-03-22T20:50:12.929Z, between 6 and 9 feet, coyote_creek, 8.12]
+// ]]], error=null]], error=null]
+```
+
+### Connecting to InfluxDB
+
+(1) The `InfluxDB` client is thread-safe and our recommendation is to have a single instance per application and reuse it, when possible. Every `InfluxDB` instance keeps multiple data structures, including those used to manage different pools like HTTP clients for reads and writes.
+
+It's possible to have just one client even when reading or writing to multiple InfluxDB databases and this will be shown later here.
+
+### Setting a default database (optional)
+
+(2) If you are not querying different databases with a single `InfluxDB` client, it's possible to set a default database name and all queries (reads and writes) from this `InfluxDB` client will be executed against the default database.
+
+If we only comment out the line (2) then all reads and writes queries would fail. To avoid this, we need to pass the database name as parameter to `BatchPoints` (writes) and to `Query` (reads). For example:
+
+```Java
+// ...
+String databaseName = "NOAA_water_database";
+// influxDB.setDatabase() won't be called...
+String retentionPolicyName = "one_day_only";
+// ...
+
+BatchPoints batchPoints = BatchPoints.database(databaseName).retentionPolicy(retentionPolicyName).build();
+
+batchPoints.point(Point.measurement("h2o_feet")
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .tag("location", "santa_monica")
+ .addField("level description", "below 3 feet")
+ .addField("water_level", 2.064d)
+ .build());
+
+// ...
+influxDB.write(batchPoints);
+// ...
+QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet", databaseName));
+// ...
+influxDB.close();
+```
+
+It's possible to use both approaches at the same time: set a default database using `influxDB.setDatabase` and read/write passing a `databaseName` as parameter. On this case, the `databaseName` passed as parameter will be used.
+
+### Setting a default retention policy (optional)
+
+(3) TODO: like setting a default database, explain here how it works with RP.
+
+### Enabling batch writes
+
+(4) TODO: explanation about BatchOption parameters:
+
+```Java
+ // default values here are consistent with Telegraf
+ public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000;
+ public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000;
+ public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0;
+ public static final int DEFAULT_BUFFER_LIMIT = 10000;
+ public static final TimeUnit DEFAULT_PRECISION = TimeUnit.NANOSECONDS;
+ public static final boolean DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION = false;
+```
+#### Configuring behaviour of batch writes when the action queue exhausts
+With batching enabled, the client provides two options on how to deal with **action queue** (where the points are accumulated as a batch) exhaustion.
+1. When `dropActionsOnQueueExhaustion` is `false` (default value), `InfluxDB#write` will be blocked till the space is created in the action queue.
+2. When `dropActionsOnQueueExhaustion` is `true`, new writes using `InfluxDB#write` will dropped and `droppedActionHandler` will be called.
+ Example usage:
+ ```Java
+ influxDB.enableBatch(BatchOptions.DEFAULTS.dropActionsOnQueueExhaustion(true)
+ .droppedActionHandler((point) -> log.error("Point dropped due to action queue exhaustion.")));
+ ```
+
+
+#### Configuring the jitter interval for batch writes
+
+When using large number of influxdb-java clients against a single server it may happen that all the clients
+will submit their buffered points at the same time and possibly overloading the server. This is usually happening
+when all the clients are started at once - for instance as members of cloud hosted large cluster networks.
+If all the clients have the same flushDuration set this situation will repeat periodically.
+
+To solve this situation the influxdb-java offers an option to offset the flushDuration by a random interval so that
+the clients will flush their buffers in different intervals:
+
+```Java
+influxDB.enableBatch(BatchOptions.DEFAULTS.jitterDuration(500));
+```
+
+#### Error handling with batch writes
+
+With batching enabled the client provides two strategies how to deal with errors thrown by the InfluxDB server.
+
+ 1. 'One shot' write - on failed write request to InfluxDB server an error is reported to the client using the means mentioned above.
+ 2. 'Retry on error' write (used by default) - on failed write the request by the client is repeated after batchInterval elapses (if there is a chance the write will succeed - the error was caused by overloading the server, a network error etc.)
+ When new data points are written before the previous (failed) points are successfully written, those are queued inside the client and wait until older data points are successfully written.
+ Size of this queue is limited and configured by `BatchOptions.bufferLimit` property. When the limit is reached, the oldest points in the queue are dropped. 'Retry on error' strategy is used when individual write batch size defined by `BatchOptions.actions` is lower than `BatchOptions.bufferLimit`.
+
+#### Ensure application exit when batching is enabled
+`BatchOptions.DEFAULTS` creates a non-daemon thread pool which prevents the JVM from initiating shutdown in the case of
+exceptions or successful completion of the main thread. This will prevent shutdown hooks (many frameworks and plain JVM
+applications use these to close/ cleanup resources) from running, preventing graceful termination of the application.
+
+Thus, configuring batch options with a daemon thread pool will solve this issue and will for example ensure that the registered
+(5) shutdown hook is run to close the `InfluxDB` client properly (flushing and closing of resources will happen).
+
+### Close InfluxDB Client on JVM Termination
+(5) In order to ensure that in-flight points are flushed and resources are released properly, it is essential to call
+`influxDB.close()` the client when it is no longer required.
+
+Registering a shutdown hook is a good way to ensure that this is done on application termination regardless of exceptions
+that are thrown in the main thread of the code. Note that if you are using a framework, do check the documentation for its
+way of configuring shutdown lifecycle hooks or if it might already be calling `close` automatically.
+
+
+### Writing to InfluxDB
+
+(6) ...
+
+`----8<----BEGIN DRAFT----8<----`
+
+Any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level.
+If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method:
+
+```Java
+influxDB.enableBatch(BatchOptions.DEFAULTS.exceptionHandler(
+ (failedPoints, throwable) -> { /* custom error handling here */ })
+);
+```
+
+`----8<----END DRAFT----8<----`
+
+#### Writing synchronously to InfluxDB (not recommended)
+
+If you want to write the data points synchronously to InfluxDB and handle the errors (as they may happen) with every write:
+
+`----8<----BEGIN DRAFT----8<----`
+
+```Java
+InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root");
+String dbName = "aTimeSeries";
+influxDB.query(new Query("CREATE DATABASE " + dbName));
+String rpName = "aRetentionPolicy";
+influxDB.query(new Query("CREATE RETENTION POLICY " + rpName + " ON " + dbName + " DURATION 30h REPLICATION 2 DEFAULT"));
+
+BatchPoints batchPoints = BatchPoints
+ .database(dbName)
+ .tag("async", "true")
+ .retentionPolicy(rpName)
+ .consistency(ConsistencyLevel.ALL)
+ .build();
+Point point1 = Point.measurement("cpu")
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .addField("idle", 90L)
+ .addField("user", 9L)
+ .addField("system", 1L)
+ .build();
+Point point2 = Point.measurement("disk")
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .addField("used", 80L)
+ .addField("free", 1L)
+ .build();
+batchPoints.point(point1);
+batchPoints.point(point2);
+influxDB.write(batchPoints);
+Query query = new Query("SELECT idle FROM cpu", dbName);
+influxDB.query(query);
+influxDB.query(new Query("DROP RETENTION POLICY " + rpName + " ON " + dbName));
+influxDB.query(new Query("DROP DATABASE " + dbName));
+```
+
+`----8<----END DRAFT----8<----`
+
+### Reading from InfluxDB
+
+(7) ...
+
+#### Query using Callbacks
+
+influxdb-java now supports returning results of a query via callbacks. Only one
+of the following consumers are going to be called once :
+
+```Java
+this.influxDB.query(new Query("SELECT idle FROM cpu", dbName), queryResult -> {
+ // Do something with the result...
+}, throwable -> {
+ // Do something with the error...
+});
+```
+
+#### Query using parameter binding (a.k.a. "prepared statements")
+
+If your Query is based on user input, it is good practice to use parameter binding to avoid [injection attacks](https://en.wikipedia.org/wiki/SQL_injection).
+You can create queries with parameter binding with the help of the QueryBuilder:
+
+```Java
+Query query = QueryBuilder.newQuery("SELECT * FROM cpu WHERE idle > $idle AND system > $system")
+ .forDatabase(dbName)
+ .bind("idle", 90)
+ .bind("system", 5)
+ .create();
+QueryResult results = influxDB.query(query);
+```
+
+The values of the bind() calls are bound to the placeholders in the query ($idle, $system).
+
+## Advanced Usage
+
+### Gzip's support
+
+influxdb-java client doesn't enable gzip compress for http request body by default. If you want to enable gzip to reduce transfer data's size , you can call:
+
+```Java
+influxDB.enableGzip()
+```
+
+### UDP's support
+
+influxdb-java client support udp protocol now. you can call following methods directly to write through UDP.
+
+```Java
+public void write(final int udpPort, final String records);
+public void write(final int udpPort, final List records);
+public void write(final int udpPort, final Point point);
+```
+
+Note: make sure write content's total size should not > UDP protocol's limit(64K), or you should use http instead of udp.
+
+### Chunking support
+
+influxdb-java client now supports influxdb chunking. The following example uses a chunkSize of 20 and invokes the specified Consumer (e.g. System.out.println) for each received QueryResult
+
+```Java
+Query query = new Query("SELECT idle FROM cpu", dbName);
+influxDB.query(query, 20, queryResult -> System.out.println(queryResult));
+```
+
+### QueryResult mapper to POJO
+
+An alternative way to handle the QueryResult object is now available.
+Supposing that you have a measurement _CPU_:
+
+```sql
+> INSERT cpu,host=serverA,region=us_west idle=0.64,happydevop=false,uptimesecs=123456789i
+>
+> select * from cpu
+name: cpu
+time happydevop host idle region uptimesecs
+---- ---------- ---- ---- ------ ----------
+2017-06-20T15:32:46.202829088Z false serverA 0.64 us_west 123456789
+```
+
+And the following tag keys:
+
+```sql
+> show tag keys from cpu
+name: cpu
+tagKey
+------
+host
+region
+```
+
+1. Create a POJO to represent your measurement. For example:
+
+```Java
+public class Cpu {
+ private Instant time;
+ private String hostname;
+ private String region;
+ private Double idle;
+ private Boolean happydevop;
+ private Long uptimeSecs;
+ // getters (and setters if you need)
+}
+```
+
+2. Add @Measurement, @TimeColumn and @Column annotations (column names default to field names unless otherwise specified):
+
+```Java
+@Measurement(name = "cpu")
+public class Cpu {
+ @TimeColumn
+ @Column
+ private Instant time;
+ @Column(name = "host", tag = true)
+ private String hostname;
+ @Column(tag = true)
+ private String region;
+ @Column
+ private Double idle;
+ @Column
+ private Boolean happydevop;
+ @Column(name = "uptimesecs")
+ private Long uptimeSecs;
+ // getters (and setters if you need)
+}
+```
+
+Alternatively, you can use:
+
+```Java
+@Measurement(name = "cpu", allFields = true)
+public class Cpu {
+ @TimeColumn
+ private Instant time;
+ @Column(name = "host", tag = true)
+ private String hostname;
+ @Column(tag = true)
+ private String region;
+ private Double idle;
+ private Boolean happydevop;
+ @Column(name = "uptimesecs")
+ private Long uptimeSecs;
+ // getters (and setters if you need)
+}
+```
+
+Or (if you're on JDK14+ and/or [Android SDK34+](https://android-developers.googleblog.com/2023/06/records-in-android-studio-flamingo.html)):
+
+```Java
+@Measurement(name = "cpu", allFields = true)
+public record Cpu(
+ @TimeColumn
+ Instant time,
+ @Column(name = "host", tag = true)
+ String hostname,
+ @Column(tag = true)
+ String region,
+ Double idle,
+ Boolean happydevop,
+ @Column(name = "uptimesecs")
+ Long uptimeSecs
+) {}
+```
+
+3. Call _InfluxDBResultMapper.toPOJO(...)_ to map the QueryResult to your POJO:
+
+```java
+InfluxDB influxDB = InfluxDBFactory.connect("http://localhost:8086", "root", "root");
+String dbName = "myTimeseries";
+QueryResult queryResult = influxDB.query(new Query("SELECT * FROM cpu", dbName));
+
+InfluxDBResultMapper resultMapper = new InfluxDBResultMapper(); // thread-safe - can be reused
+List cpuList = resultMapper.toPOJO(queryResult, Cpu.class);
+```
+
+### Writing using POJO
+
+The same way we use `annotations` to transform data to POJO, we can write data as POJO.
+Having the same POJO class Cpu
+
+```java
+String dbName = "myTimeseries";
+String rpName = "aRetentionPolicy";
+// Cpu has annotations @Measurement,@TimeColumn and @Column
+Cpu cpu = new Cpu();
+// ... setting data
+
+Point point = Point.measurementByPOJO(cpu.getClass()).addFieldsFromPOJO(cpu).build();
+
+influxDB.write(dbName, rpName, point);
+```
+
+#### QueryResult mapper limitations
+
+* If your InfluxDB query contains multiple SELECT clauses, you will have to call InfluxResultMapper#toPOJO() multiple times to map every measurement returned by QueryResult to the respective POJO;
+* If your InfluxDB query contains multiple SELECT clauses **for the same measurement**, InfluxResultMapper will process all results because there is no way to distinguish which one should be mapped to your POJO. It may result in an invalid collection being returned;
+* A Class field annotated with _@Column(..., tag = true)_ (i.e. a [InfluxDB Tag](https://docs.influxdata.com/influxdb/v1.2/concepts/glossary/#tag-value)) must be declared as _String_.
+
+#### QueryBuilder
+
+An alternative way to create InfluxDB queries is available. By using the [QueryBuilder](QUERY_BUILDER.md) you can create queries using java instead of providing the influxdb queries as strings.
+
+#### Generic POJO super classes
+
+POJO classes can have generic super classes, for cases where multiple measurements have a similar structure, and differ by type(s), as in:
+
+```java
+public class SuperMeasurement {
+ @Column
+ @TimeColumn
+ private Instant time;
+ @Column
+ T value;
+ // Other common columns and tags
+}
+
+public class SubMeasurement extends SuperMeasurement {
+ // Any specific columns and tags
+}
+```
+
+### InfluxDBMapper
+
+In case you want to save and load data using models you can use the [InfluxDBMapper](INFLUXDB_MAPPER.md).
+
+### Other Usages
+
+For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java")
+
+### Publishing
+
+This is a
+[link](https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide)
+to the sonatype oss guide to publishing. I'll update this section once
+the [jira ticket](https://issues.sonatype.org/browse/OSSRH-9728) is
+closed and I'm able to upload artifacts to the sonatype repositories.
+
+### Frequently Asked Questions
+
+This is a [FAQ](FAQ.md) list for influxdb-java.
diff --git a/QUERY_BUILDER.md b/QUERY_BUILDER.md
new file mode 100644
index 000000000..d84e6d255
--- /dev/null
+++ b/QUERY_BUILDER.md
@@ -0,0 +1,606 @@
+# QueryBuilder
+
+Supposing that you have a measurement _h2o_feet_:
+
+```sqlite-psql
+> SELECT * FROM "h2o_feet"
+
+name: h2o_feet
+--------------
+time level description location water_level
+2015-08-18T00:00:00Z below 3 feet santa_monica 2.064
+2015-08-18T00:00:00Z between 6 and 9 feet coyote_creek 8.12
+[...]
+2015-09-18T21:36:00Z between 3 and 6 feet santa_monica 5.066
+2015-09-18T21:42:00Z between 3 and 6 feet santa_monica 4.938
+```
+
+## The basic SELECT statement
+
+Issue simple select statements
+
+```java
+Query query = select().from(DATABASE,"h2o_feet");
+```
+
+```sqlite-psql
+SELECT * FROM "h2o_feet"
+```
+
+Select specific tags and fields from a single measurement
+
+```java
+Query query = select("level description","location","water_level").from(DATABASE,"h2o_feet");
+```
+
+```sqlite-psql
+SELECT "level description",location,water_level FROM h2o_feet;
+```
+
+Select specific tags and fields from a single measurement, and provide their identifier type
+
+```java
+Query query = select().column("\"level description\"::field").column("\"location\"::tag").column("\"water_level\"::field").from(DATABASE,"h2o_feet");
+```
+
+```sqlite-psql
+SELECT "level description"::field,"location"::tag,"water_level"::field FROM h2o_feet;
+```
+
+Select all fields from a single measurement
+
+```java
+Query query = select().raw("*::field").from(DATABASE,"h2o_feet");
+```
+
+```sqlite-psql
+SELECT *::field FROM h2o_feet;
+```
+
+Select a specific field from a measurement and perform basic arithmetic
+
+```java
+Query query = select().op(op(cop("water_level",MUL,2),"+",4)).from(DATABASE,"h2o_feet");
+```
+
+```sqlite-psql
+SELECT (water_level * 2) + 4 FROM h2o_feet;
+```
+
+Select all data from more than one measurement
+
+```java
+Query query = select().from(DATABASE,"\"h2o_feet\",\"h2o_pH\"");
+```
+
+```sqlite-psql
+SELECT * FROM "h2o_feet","h2o_pH";
+```
+
+Select all data from a fully qualified measurement
+
+```java
+Query query = select().from(DATABASE,"\"NOAA_water_database\".\"autogen\".\"h2o_feet\"");
+```
+
+```sqlite-psql
+SELECT * FROM "NOAA_water_database"."autogen"."h2o_feet";
+```
+
+Select data that have specific field key-values
+
+```java
+Query query = select().from(DATABASE,"h2o_feet").where(gt("water_level",8));
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE water_level > 8;
+```
+
+Select data that have a specific string field key-value
+
+```java
+Query query = select().from(DATABASE,"h2o_feet").where(eq("level description","below 3 feet"));
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE "level description" = 'below 3 feet';
+```
+
+Select data that have a specific field key-value and perform basic arithmetic
+
+```java
+Query query = select().from(DATABASE,"h2o_feet").where(gt(cop("water_level",ADD,2),11.9));
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE (water_level + 2) > 11.9;
+```
+
+Select data that have a specific tag key-value
+
+```java
+Query query = select().column("water_level").from(DATABASE,"h2o_feet").where(eq("location","santa_monica"));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica';
+```
+
+Select data that have specific field key-values and tag key-values
+
+```java
+Query query = select().column("water_level").from(DATABASE,"h2o_feet")
+ .where(neq("location","santa_monica"))
+ .andNested()
+ .and(lt("water_level",-0.59))
+ .or(gt("water_level",9.95))
+ .close();
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location <> 'santa_monica' AND (water_level < -0.59 OR water_level > 9.95);
+```
+
+Select data that have specific timestamps
+
+```java
+Query query = select().from(DATABASE,"h2o_feet")
+ .where(gt("time",subTime(7,DAY)));
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE time > now() - 7d;
+```
+
+## The GROUP BY clause
+
+Group query results by a single tag
+
+```java
+Query query = select().mean("water_level").from(DATABASE,"h2o_feet") .groupBy("location");
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet GROUP BY location;
+```
+
+Group query results by more than one tag
+
+```java
+Query query = select().mean("index").from(DATABASE,"h2o_feet")
+ .groupBy("location","randtag");
+```
+
+```sqlite-psql
+SELECT MEAN(index) FROM h2o_feet GROUP BY location,randtag;
+```
+
+Group query results by all tags
+
+```java
+Query query = select().mean("index").from(DATABASE,"h2o_feet")
+ .groupBy(raw("*"));
+```
+
+```sqlite-psql
+SELECT MEAN(index) FROM h2o_feet GROUP BY *;
+```
+
+## GROUP BY time interval
+
+Group query results into 12 minute intervals
+
+```java
+Query query = select().count("water_level").from(DATABASE,"h2o_feet")
+ .where(eq("location","coyote_creek"))
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:30:00Z'"))
+ .groupBy(time(12l,MINUTE));
+```
+
+```sqlite-psql
+SELECT COUNT(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z'' GROUP BY time(12m);
+```
+
+Group query results into 12 minutes intervals and by a tag key
+
+```java
+ Query query = select().count("water_level").from(DATABASE,"h2o_feet")
+ .where()
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:30:00Z'"))
+ .groupBy(time(12l,MINUTE),"location");
+```
+
+```sqlite-psql
+SELECT COUNT(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z'' GROUP BY time(12m),location;
+```
+
+## Advanced GROUP BY time() syntax
+
+Group query results into 18 minute intervals and shift the preset time boundaries forward
+
+```java
+Query query = select().mean("water_level").from(DATABASE,"h2o_feet")
+ .where(eq("location","coyote_creek"))
+ .and(gte("time","2015-08-18T00:06:00Z"))
+ .and(lte("time","2015-08-18T00:54:00Z"))
+ .groupBy(time(18l,MINUTE,6l,MINUTE));
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:06:00Z' AND time <= '2015-08-18T00:54:00Z' GROUP BY time(18m,6m);
+```
+
+Group query results into 12 minute intervals and shift the preset time boundaries back
+
+```java
+Query query = select().mean("water_level").from(DATABASE,"h2o_feet")
+ .where(eq("location","coyote_creek"))
+ .and(gte("time","2015-08-18T00:06:00Z"))
+ .and(lte("time","2015-08-18T00:54:00Z"))
+ .groupBy(time(18l,MINUTE,-12l,MINUTE));
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:06:00Z' AND time <= '2015-08-18T00:54:00Z' GROUP BY time(18m,-12m);
+```
+
+## GROUP BY time intervals and fill()
+
+```java
+Query select = select()
+ .column("water_level")
+ .from(DATABASE, "h2o_feet")
+ .where(gt("time", op(ti(24043524l, MINUTE), SUB, ti(6l, MINUTE))))
+ .groupBy("water_level")
+ .fill(100);
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE time > 24043524m - 6m GROUP BY water_level fill(100);"
+```
+
+## The INTO clause
+
+Rename a database
+
+```java
+Query select = select()
+ .into("\"copy_NOAA_water_database\".\"autogen\".:MEASUREMENT")
+ .from(DATABASE, "\"NOAA_water_database\".\"autogen\"./.*/")
+ .groupBy(new RawText("*"));
+```
+
+```sqlite-psql
+SELECT * INTO "copy_NOAA_water_database"."autogen".:MEASUREMENT FROM "NOAA_water_database"."autogen"./.*/ GROUP BY *;
+```
+
+Write the results of a query to a measurement
+
+```java
+Query select = select().column("water_level").into("h2o_feet_copy_1").from(DATABASE,"h2o_feet").where(eq("location","coyote_creek"));
+```
+
+```sqlite-psql
+SELECT water_level INTO h2o_feet_copy_1 FROM h2o_feet WHERE location = 'coyote_creek';
+```
+
+Write aggregated results to a measurement
+
+```java
+Query select = select()
+ .mean("water_level")
+ .into("all_my_averages")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","coyote_creek"))
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:30:00Z"))
+ .groupBy(time(12l,MINUTE));
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) INTO all_my_averages FROM h2o_feet WHERE location = 'coyote_creek' AND time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:30:00Z' GROUP BY time(12m);
+```
+
+Write aggregated results for more than one measurement to a different database (downsampling with backreferencing)
+
+```java
+Query select = select()
+ .mean(raw("*"))
+ .into("\"where_else\".\"autogen\".:MEASUREMENT")
+ .fromRaw(DATABASE, "/.*/")
+ .where(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:06:00Z"))
+ .groupBy(time(12l,MINUTE));
+```
+
+```sqlite-psql
+SELECT MEAN(*) INTO "where_else"."autogen".:MEASUREMENT FROM /.*/ WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:06:00Z' GROUP BY time(12m);
+```
+
+## ORDER BY time DESC
+
+Return the newest points first
+
+```java
+Query select = select().from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .orderBy(desc());
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE location = 'santa_monica' ORDER BY time DESC;
+```
+
+Return the newest points first and include a GROUP BY time() clause
+
+```java
+Query select = select().mean("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:42:00Z"))
+ .groupBy(time(12l,MINUTE))
+ .orderBy(desc());
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY time(12m) ORDER BY time DESC;
+```
+
+## The LIMIT clause
+
+Limit the number of points returned
+
+```java
+Query select = select("water_level","location")
+ .from(DATABASE,"h2o_feet").limit(3);
+```
+
+```sqlite-psql
+SELECT water_level,location FROM h2o_feet LIMIT 3;
+```
+
+Limit the number points returned and include a GROUP BY clause
+
+```java
+Query select = select().mean("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where()
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:42:00Z"))
+ .groupBy(raw("*"),time(12l,MINUTE))
+ .limit(2);
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) LIMIT 2;
+```
+
+## The SLIMIT clause
+
+Limit the number of series returned
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_fleet")
+ .groupBy(raw("*"))
+ .sLimit(1);
+```
+
+```sqlite-psql
+SELECT water_level FROM "h2o_feet" GROUP BY * SLIMIT 1
+```
+
+Limit the number of series returned and include a GROUP BY time() clause
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where()
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:42:00Z"))
+ .groupBy(raw("*"),time(12l,MINUTE))
+ .sLimit(1);
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) SLIMIT 1;
+```
+
+## The OFFSET clause
+
+Paginate points
+
+```java
+Query select = select("water_level","location").from(DATABASE,"h2o_feet").limit(3,3);
+```
+
+```sqlite-psql
+SELECT water_level,location FROM h2o_feet LIMIT 3 OFFSET 3;
+```
+
+## The SOFFSET clause
+
+Paginate series and include all clauses
+
+```java
+Query select = select().mean("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where()
+ .and(gte("time","2015-08-18T00:00:00Z"))
+ .and(lte("time","2015-08-18T00:42:00Z"))
+ .groupBy(raw("*"),time(12l,MINUTE))
+ .orderBy(desc())
+ .limit(2,2)
+ .sLimit(1,1);
+```
+
+```sqlite-psql
+SELECT MEAN(water_level) FROM h2o_feet WHERE time >= '2015-08-18T00:00:00Z' AND time <= '2015-08-18T00:42:00Z' GROUP BY *,time(12m) ORDER BY time DESC LIMIT 2 OFFSET 2 SLIMIT 1 SOFFSET 1;
+```
+
+## The Time Zone clause
+
+Return the UTC offset for Chicago’s time zone
+
+```java
+Query select = select()
+ .column("test1")
+ .from(DATABASE, "h2o_feet")
+ .groupBy("test2", "test3")
+ .sLimit(1)
+ .tz("America/Chicago");
+```
+
+```sqlite-psql
+SELECT test1 FROM foobar GROUP BY test2,test3 SLIMIT 1 tz('America/Chicago');
+```
+
+## Time Syntax
+
+Specify a time range with RFC3339 date-time strings
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .and(gte("time","2015-08-18T00:00:00.000000000Z"))
+ .and(lte("time","2015-08-18T00:12:00Z"));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= '2015-08-18T00:00:00.000000000Z' AND time <= '2015-08-18T00:12:00Z';
+```
+
+Specify a time range with second-precision epoch timestamps
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .and(gte("time",ti(1439856000l,SECOND)))
+ .and(lte("time",ti(1439856720l,SECOND)));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= 1439856000s AND time <= 1439856720s;
+```
+
+Perform basic arithmetic on an RFC3339-like date-time string
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .and(gte("time",op("2015-09-18T21:24:00Z",SUB,ti(6l,MINUTE))));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= '2015-09-18T21:24:00Z' - 6m;
+```
+
+Perform basic arithmetic on an epoch timestamp
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .and(gte("time",op(ti(24043524l,MINUTE),SUB,ti(6l,MINUTE))));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= 24043524m - 6m;
+```
+
+Specify a time range with relative time
+
+```java
+Query select = select().column("water_level")
+ .from(DATABASE,"h2o_feet")
+ .where(eq("location","santa_monica"))
+ .and(gte("time",subTime(1l,HOUR)));
+```
+
+```sqlite-psql
+SELECT water_level FROM h2o_feet WHERE location = 'santa_monica' AND time >= now() - 1h;
+```
+
+## Regular expressions
+
+Use a regular expression to specify field keys and tag keys in the SELECT clause
+
+```java
+Query select = select().regex("l").from(DATABASE,"h2o_feet").limit(1);
+```
+
+```sqlite-psql
+SELECT /l/ FROM h2o_feet LIMIT 1;
+```
+
+Use a regular expression to specify field keys with a function in the SELECT clause
+
+```java
+Query select = select().regex("l").distinct().from(DATABASE,"h2o_feet").limit(1);
+```
+
+```sqlite-psql
+SELECT DISTINCT /l/ FROM h2o_feet LIMIT 1;
+```
+
+Use a regular expression to specify measurements in the FROM clause
+
+```java
+Query select = select().mean("degrees").fromRaw(DATABASE,"/temperature/");
+```
+
+```sqlite-psql
+SELECT MEAN(degrees) FROM /temperature/;
+```
+
+Use a regular expression to specify a field value in the WHERE clause
+
+```java
+Query select = select().regex("/l/").from(DATABASE,"h2o_feet").where(regex("level description","/between/")).limit(1);
+```
+
+```sqlite-psql
+SELECT /l/ FROM h2o_feet WHERE "level description" =~ /between/ LIMIT 1;
+```
+
+Use a regular expression to specify tag keys in the GROUP BY clause
+
+```java
+Query select = select().regex("/l/").from(DATABASE,"h2o_feet").where(regex("level description","/between/")).groupBy(raw("/l/")).limit(1);
+```
+
+```sqlite-psql
+SELECT /l/ FROM h2o_feet WHERE "level description" =~ /between/ GROUP BY /l/ LIMIT 1;
+```
+
+Function with no direct implementation can be supported by raw expressions
+
+```java
+Query select = select().raw("an expression on select").from(dbName, "cpu").where("an expression as condition");
+```
+
+```sqlite-psql
+SELECT an expression on select FROM h2o_feet WHERE an expression as condition;
+```
+
+Binding parameters
+
+If your Query is based on user input, it is good practice to use parameter binding to avoid [injection attacks](https://en.wikipedia.org/wiki/SQL_injection).
+You can create queries with parameter binding:
+
+```java
+Query query = select().from(DATABASE,"h2o_feet").where(gt("water_level", FunctionFactory.placeholder("level")))
+ .bindParameter("level", 8);
+```
+
+```sqlite-psql
+SELECT * FROM h2o_feet WHERE water_level > $level;
+```
+
+The values of bindParameter() calls are bound to the placeholders in the query (`level`).
diff --git a/README.md b/README.md
index 94e1eb2e2..74f6a5ba1 100644
--- a/README.md
+++ b/README.md
@@ -1,172 +1,174 @@
-influxdb-java
-=============
+# influxdb-java
-[](https://travis-ci.org/influxdata/influxdb-java)
+[](https://github.com/influxdata/influxdb-java/actions)
[](http://codecov.io/github/influxdata/influxdb-java?branch=master)
[](https://codeclimate.com/github/influxdata/influxdb-java)
-This is the Java Client library which is only compatible with InfluxDB 0.9 and higher. Maintained by [@majst01](https://github.com/majst01).
-
-To connect to InfluxDB 0.8.x you need to use influxdb-java version 1.6.
-
-This implementation is meant as a Java rewrite of the influxdb-go package.
-All low level REST Api calls are available.
-
-## Usages
-
-### Basic Usages:
-
-```java
-InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root");
-String dbName = "aTimeSeries";
-influxDB.createDatabase(dbName);
-
-BatchPoints batchPoints = BatchPoints
- .database(dbName)
- .tag("async", "true")
- .retentionPolicy("autogen")
- .consistency(ConsistencyLevel.ALL)
- .build();
-Point point1 = Point.measurement("cpu")
- .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
- .addField("idle", 90L)
- .addField("user", 9L)
- .addField("system", 1L)
- .build();
-Point point2 = Point.measurement("disk")
- .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
- .addField("used", 80L)
- .addField("free", 1L)
- .build();
-batchPoints.point(point1);
-batchPoints.point(point2);
-influxDB.write(batchPoints);
-Query query = new Query("SELECT idle FROM cpu", dbName);
-influxDB.query(query);
-influxDB.deleteDatabase(dbName);
-```
-Note : If you are using influxdb < 1.0.0, you should use 'default' instead of 'autogen'
-
-If your application produces only single Points, you can enable the batching functionality of influxdb-java:
-
-```java
-InfluxDB influxDB = InfluxDBFactory.connect("http://172.17.0.2:8086", "root", "root");
-String dbName = "aTimeSeries";
-influxDB.createDatabase(dbName);
-
-// Flush every 2000 Points, at least every 100ms
-influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);
-
-Point point1 = Point.measurement("cpu")
- .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
- .addField("idle", 90L)
- .addField("user", 9L)
- .addField("system", 1L)
- .build();
-Point point2 = Point.measurement("disk")
- .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
- .addField("used", 80L)
- .addField("free", 1L)
- .build();
-
-influxDB.write(dbName, "autogen", point1);
-influxDB.write(dbName, "autogen", point2);
-Query query = new Query("SELECT idle FROM cpu", dbName);
-influxDB.query(query);
-influxDB.deleteDatabase(dbName);
-```
-Note that the batching functionality creates an internal thread pool that needs to be shutdown explicitly as part of a graceful application shut-down, or the application will not shut down properly. To do so simply call: ```influxDB.close()```
+This is the official (and community-maintained) Java client library for [InfluxDB](https://www.influxdata.com/products/influxdb-overview/) (1.x), the open source time series database that is part of the TICK (Telegraf, InfluxDB, Chronograf, Kapacitor) stack.
-Also note that any errors that happen during the batch flush won't leak into the caller of the `write` method. By default, any kind of errors will be just logged with "SEVERE" level.
+For InfluxDB 3.0 users, this library is succeeded by the lightweight [v3 client library](https://github.com/InfluxCommunity/influxdb3-java).
-If you need to be notified and do some custom logic when such asynchronous errors happen, you can add an error handler with a `BiConsumer, Throwable>` using the overloaded `enableBatch` method:
-
-```java
-// Flush every 2000 Points, at least every 100ms
-influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS, Executors.defaultThreadFactory(), (failedPoints, throwable) -> { /* custom error handling here */ });
-```
+_Note: This library is for use with InfluxDB 1.x and [2.x compatibility API](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/). For full supports of InfluxDB 2.x features, please use the [influxdb-client-java](https://github.com/influxdata/influxdb-client-java) client._
-### Advanced Usages:
+## Adding the library to your project
-#### Gzip's support (version 2.5+ required):
+The library artifact is published in Maven central, available at [https://search.maven.org/artifact/org.influxdb/influxdb-java](https://search.maven.org/artifact/org.influxdb/influxdb-java).
-influxdb-java client doesn't enable gzip compress for http request body by default. If you want to enable gzip to reduce transfer data's size , you can call:
-```java
-influxDB.enableGzip()
-```
+### Release versions
-#### UDP's support (version 2.5+ required):
+Maven dependency:
-influxdb-java client support udp protocol now. you can call followed methods directly to write through UDP.
-```java
-public void write(final int udpPort, final String records);
-public void write(final int udpPort, final List records);
-public void write(final int udpPort, final Point point);
+```xml
+
+ org.influxdb
+ influxdb-java
+ ${influxdbClient.version}
+
```
-note: make sure write content's total size should not > UDP protocol's limit(64K), or you should use http instead of udp.
+Gradle dependency:
-#### chunking support (version 2.6+ required, unreleased):
-
-influxdb-java client now supports influxdb chunking. The following example uses a chunkSize of 20 and invokes the specified Consumer (e.g. System.out.println) for each received QueryResult
-```java
-Query query = new Query("SELECT idle FROM cpu", dbName);
-influxDB.query(query, 20, queryResult -> System.out.println(queryResult));
+```bash
+compile group: 'org.influxdb', name: 'influxdb-java', version: "${influxdbClientVersion}"
```
+## Features
+
+* Querying data using:
+ * [Influx Query Language (InfluxQL)](https://docs.influxdata.com/influxdb/v1.7/query_language/), with support for [bind parameters](https://docs.influxdata.com/influxdb/v1.7/tools/api/#bind-parameters) (similar to [JDBC PreparedStatement parameters](https://docs.oracle.com/javase/tutorial/jdbc/basics/prepared.html#supply_values_ps));
+ * it's own [QueryBuilder](https://github.com/influxdata/influxdb-java/blob/master/QUERY_BUILDER.md), as you would do with e.g. EclipseLink or Hibernate;
+ * Message Pack (requires InfluxDB [1.4+](https://www.influxdata.com/blog/whats-new-influxdb-oss-1-4/));
+* Writing data using:
+ * Data Point (an object provided by this library that represents a ... data point);
+ * Your own POJO (you need to add a few Java Annotations);
+ * [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/) (for the braves only);
+ * UDP, as [supported by InfluxDB](https://docs.influxdata.com/influxdb/v1.7/supported_protocols/udp/);
+* Support synchronous and asynchronous writes;
+* Batch support configurable with `jitter` interval, `buffer` size and `flush` interval.
+
+## Quick start
+
+```Java
+// Create an object to handle the communication with InfluxDB.
+// (best practice tip: reuse the 'influxDB' instance when possible)
+final String serverURL = "http://127.0.0.1:8086", username = "root", password = "root";
+final InfluxDB influxDB = InfluxDBFactory.connect(serverURL, username, password);
+
+// Create a database...
+// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
+String databaseName = "NOAA_water_database";
+influxDB.query(new Query("CREATE DATABASE " + databaseName));
+influxDB.setDatabase(databaseName);
+
+// ... and a retention policy, if necessary.
+// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
+String retentionPolicyName = "one_day_only";
+influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
+ + " ON " + databaseName + " DURATION 1d REPLICATION 1 DEFAULT"));
+influxDB.setRetentionPolicy(retentionPolicyName);
+
+// Enable batch writes to get better performance.
+influxDB.enableBatch(
+ BatchOptions.DEFAULTS
+ .threadFactory(runnable -> {
+ Thread thread = new Thread(runnable);
+ thread.setDaemon(true);
+ return thread;
+ })
+);
+
+// Close it if your application is terminating or you are not using it anymore.
+Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close));
+
+// Write points to InfluxDB.
+influxDB.write(Point.measurement("h2o_feet")
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .tag("location", "santa_monica")
+ .addField("level description", "below 3 feet")
+ .addField("water_level", 2.064d)
+ .build());
+
+influxDB.write(Point.measurement("h2o_feet")
+ .time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
+ .tag("location", "coyote_creek")
+ .addField("level description", "between 6 and 9 feet")
+ .addField("water_level", 8.12d)
+ .build());
+
+// Wait a few seconds in order to let the InfluxDB client
+// write your points asynchronously (note: you can adjust the
+// internal time interval if you need via 'enableBatch' call).
+Thread.sleep(5_000L);
+
+// Query your data using InfluxQL.
+// https://docs.influxdata.com/influxdb/v1.7/query_language/data_exploration/#the-basic-select-statement
+QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet"));
+
+System.out.println(queryResult);
+// It will print something like:
+// QueryResult [results=[Result [series=[Series [name=h2o_feet, tags=null,
+// columns=[time, level description, location, water_level],
+// values=[
+// [2020-03-22T20:50:12.929Z, below 3 feet, santa_monica, 2.064],
+// [2020-03-22T20:50:12.929Z, between 6 and 9 feet, coyote_creek, 8.12]
+// ]]], error=null]], error=null]
+```
-### Other Usages:
-For additional usage examples have a look at [InfluxDBTest.java](https://github.com/influxdb/influxdb-java/blob/master/src/test/java/org/influxdb/InfluxDBTest.java "InfluxDBTest.java")
-
-## Version
+## Contribute
-The latest version for maven dependence:
-```xml
-
- org.influxdb
- influxdb-java
- 2.5
-
-```
For version change history have a look at [ChangeLog](https://github.com/influxdata/influxdb-java/blob/master/CHANGELOG.md).
-
### Build Requirements
* Java 1.8+
-* Maven 3.0+
-* Docker daemon running
+* Maven 3.5+
+* Docker (for Unit testing)
Then you can build influxdb-java with all tests with:
```bash
-$ mvn clean install
-```
+$> export INFLUXDB_IP=127.0.0.1
-If you don't have Docker running locally, you can skip tests with -DskipTests flag set to true:
+$> mvn clean install
-```bash
-$ mvn clean install -DskipTests=true
```
-If you have Docker running, but it is not at localhost (e.g. you are on a Mac and using `docker-machine`) you can set an optional environment variable `INFLUXDB_IP` to point to the correct IP address:
+There is a shell script running InfluxDB and Maven from inside a Docker container and you can execute it by running:
```bash
-$ export INFLUXDB_IP=192.168.99.100
-$ mvn test
+$> ./compile-and-test.sh
```
-For convenience we provide a small shell script which starts a influxdb server locally and executes `mvn clean install` with all tests inside docker containers.
+## Useful links
-```bash
-$ ./compile-and-test.sh
-```
+* [Manual](MANUAL.md) (main documentation);
+* [InfluxDB Object Mapper](INFLUXDB_MAPPER.md);
+* [Query Builder](QUERY_BUILDER.md);
+* [FAQ](FAQ.md);
+* [Changelog](CHANGELOG.md).
+
+## License
+```license
+The MIT License (MIT)
-### Publishing
+Copyright (c) 2014 Stefan Majer
-This is a
-[link](https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide)
-to the sonatype oss guide to publishing. I'll update this section once
-the [jira ticket](https://issues.sonatype.org/browse/OSSRH-9728) is
-closed and I'm able to upload artifacts to the sonatype repositories.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+```
diff --git a/checkstyle.xml b/checkstyle.xml
index d27f8b51d..921bab224 100644
--- a/checkstyle.xml
+++ b/checkstyle.xml
@@ -27,9 +27,13 @@
-
-
+
+
+
+
+
+
@@ -61,17 +65,10 @@
-
-
-
-
-
-
-
diff --git a/compile-and-test.sh b/compile-and-test.sh
index 660181a21..633ff51e2 100755
--- a/compile-and-test.sh
+++ b/compile-and-test.sh
@@ -4,29 +4,72 @@
#
set -e
-INFLUXDB_VERSIONS="1.2 1.1"
-
-for version in ${INFLUXDB_VERSIONS}
-do
- echo "Tesing againts influxdb ${version}"
- docker kill influxdb || true
- docker rm influxdb || true
- docker pull influxdb:${version}-alpine || true
- docker run \
- --detach \
- --name influxdb \
- --publish 8086:8086 \
- --publish 8089:8089/udp \
- --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \
- influxdb:${version}-alpine
-
- docker run -it --rm \
- --volume $PWD:/usr/src/mymaven \
- --volume $PWD/.m2:/root/.m2 \
- --workdir /usr/src/mymaven \
- --link=influxdb \
- --env INFLUXDB_IP=influxdb \
- maven:alpine mvn clean install
-
- docker kill influxdb || true
-done
+DEFAULT_INFLUXDB_VERSION="1.8"
+DEFAULT_MAVEN_JAVA_VERSION="3-openjdk-17-slim"
+
+INFLUXDB_VERSION="${INFLUXDB_VERSION:-$DEFAULT_INFLUXDB_VERSION}"
+MAVEN_JAVA_VERSION="${MAVEN_JAVA_VERSION:-$DEFAULT_MAVEN_JAVA_VERSION}"
+
+echo "Run tests with maven:${MAVEN_JAVA_VERSION} on influxdb-${INFLUXDB_VERSION}"
+docker kill influxdb || true
+docker rm influxdb || true
+docker pull influxdb:${INFLUXDB_VERSION}-alpine || true
+docker run \
+ --detach \
+ --name influxdb \
+ --publish 8086:8086 \
+ --publish 8089:8089/udp \
+ --volume ${PWD}/influxdb.conf:/etc/influxdb/influxdb.conf \
+ --env DOCKER_INFLUXDB_INIT_MODE=setup \
+ --env DOCKER_INFLUXDB_INIT_USERNAME=my-user \
+ --env DOCKER_INFLUXDB_INIT_PASSWORD=my-password \
+ --env DOCKER_INFLUXDB_INIT_ORG=my-org \
+ --env DOCKER_INFLUXDB_INIT_BUCKET=my-bucket \
+ --env DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=my-token \
+ influxdb:${INFLUXDB_VERSION}-alpine
+
+echo "Starting Nginx"
+docker kill nginx || true
+docker rm nginx || true
+
+docker run \
+ --detach \
+ --name nginx \
+ --publish 8080:8080 \
+ --publish 8080:8080/udp \
+ --volume ${PWD}/src/test/nginx/nginx.conf:/etc/nginx/nginx.conf:ro \
+ --link influxdb:influxdb \
+ nginx:stable-alpine nginx '-g' 'daemon off;'
+
+echo "Running tests"
+PROXY_API_URL=http://nginx:8080/influx-api/
+PROXY_UDP_PORT=8080
+if [[ "$INFLUXDB_VERSION" == "2."* ]]
+then
+ TEST_EXPRESSION="InfluxDB2Test"
+ # Wait to start InfluxDB
+ docker run --link influxdb:influxdb ubuntu:20.04 bash -c "apt-get update \
+ && apt-get install wget --yes \
+ && wget -S --spider --tries=20 --retry-connrefused --waitretry=5 http://influxdb:8086/ping"
+ # Create DBRP Mapping
+ BUCKET_ID=$(docker exec influxdb bash -c "influx bucket list -o my-org -n my-bucket | grep my-bucket | xargs | cut -d ' ' -f 0")
+ docker exec influxdb bash -c "influx v1 dbrp create -o my-org --db mydb --rp autogen --default --bucket-id ${BUCKET_ID}"
+ docker exec influxdb bash -c "influx v1 auth create -o my-org --username my-user --password my-password --read-bucket ${BUCKET_ID} --write-bucket ${BUCKET_ID}"
+else
+ TEST_EXPRESSION="*"
+fi
+
+docker run --rm \
+ --volume ${PWD}:/usr/src/mymaven \
+ --volume ${PWD}/.m2:/root/.m2 \
+ --workdir /usr/src/mymaven \
+ --link=influxdb \
+ --link=nginx \
+ --env INFLUXDB_VERSION=${INFLUXDB_VERSION} \
+ --env INFLUXDB_IP=influxdb \
+ --env PROXY_API_URL=${PROXY_API_URL} \
+ --env PROXY_UDP_PORT=${PROXY_UDP_PORT} \
+ maven:${MAVEN_JAVA_VERSION} mvn clean install -Dtest="${TEST_EXPRESSION}"
+
+docker kill influxdb || true
+docker kill nginx || true
diff --git a/deploy-snapshot.sh b/deploy-snapshot.sh
new file mode 100755
index 000000000..60a454ec2
--- /dev/null
+++ b/deploy-snapshot.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+
+# Parse project version from pom.xml
+PROJECT_VERSION=$(xmllint --xpath "//*[local-name()='project']/*[local-name()='version']/text()" pom.xml)
+export PROJECT_VERSION
+echo "Project version: $PROJECT_VERSION"
+
+# Skip if not *SNAPSHOT
+if [[ $PROJECT_VERSION != *SNAPSHOT ]]; then
+ echo "$PROJECT_VERSION is not SNAPSHOT - skip deploy.";
+ exit;
+fi
+
+
+DEFAULT_MAVEN_JAVA_VERSION="3-jdk-8-slim"
+MAVEN_JAVA_VERSION="${MAVEN_JAVA_VERSION:-$DEFAULT_MAVEN_JAVA_VERSION}"
+echo "Deploy snapshot with maven:${MAVEN_JAVA_VERSION}"
+
+docker run --rm \
+ --volume ${PWD}:/usr/src/mymaven \
+ --volume ${PWD}/.m2:/root/.m2 \
+ --workdir /usr/src/mymaven \
+ --env SONATYPE_USERNAME=${SONATYPE_USERNAME} \
+ --env SONATYPE_PASSWORD=${SONATYPE_PASSWORD} \
+ maven:${MAVEN_JAVA_VERSION} mvn -s .maven-settings.xml -DskipTests=true clean package deploy
diff --git a/format-sources.sh b/format-sources.sh
new file mode 100755
index 000000000..98d0bc92f
--- /dev/null
+++ b/format-sources.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+wget https://github.com/google/google-java-format/releases/download/google-java-format-1.4/google-java-format-1.4-all-deps.jar
+
+JAVA_FILES=$(find src/ -name "*.java")
+
+for JAVA_FILE in ${JAVA_FILES}
+do
+ echo "formatting ${JAVA_FILE}"
+ docker run -it --rm \
+ -v $PWD:/mnt \
+ openjdk java -jar /mnt/google-java-format-1.4-all-deps.jar -r /mnt/${JAVA_FILE}
+done
diff --git a/mvn.sh b/mvn.sh
index 6711aec46..cd99af3e6 100755
--- a/mvn.sh
+++ b/mvn.sh
@@ -8,4 +8,4 @@ docker run -it --rm \
-v $PWD:/usr/src/mymaven \
-v $PWD/.m2:/root/.m2 \
-w /usr/src/mymaven \
- maven:alpine mvn clean "$@"
+ maven:3-openjdk-17-slim mvn clean "$@"
diff --git a/pom.xml b/pom.xml
index 2c04b3516..21ec65cf4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,24 +1,14 @@
-
- org.sonatype.oss
- oss-parent
- 7
-
-
4.0.0org.influxdbinfluxdb-javajar
- 2.5-SNAPSHOT
+ 2.26-SNAPSHOTinfluxdb java bindingsJava API to access the InfluxDB REST APIhttp://www.influxdb.org
-
- 3.2.1
-
-
The MIT License (MIT)
@@ -34,6 +24,7 @@
scm:git:git@github.com:influxdata/influxdb-java.gitscm:git:git@github.com:influxdata/influxdb-java.gitgit@github.com:influxdata/influxdb-java.git
+ influxdb-java-2.25
@@ -48,7 +39,7 @@
org.codehaus.mojofindbugs-maven-plugin
- 3.0.4
+ 3.0.5true
@@ -57,35 +48,163 @@
+
+
+
+ ossrh
+ https://central.sonatype.com/repository/maven-snapshots
+
+
+ ossrh
+ https://ossrh-staging-api.central.sonatype.com/service/local/staging/deploy/maven2/
+
+
+
+
+
+
+ src/main/resources
+
+ docker-compose.yml
+
+
+
+
+ org.codehaus.mojo
+ versions-maven-plugin
+ 2.16.2
+ org.apache.maven.pluginsmaven-compiler-plugin
- 3.6.1
+ 3.12.11.81.8
+
+
+ -parameters
+ org.apache.maven.pluginsmaven-surefire-plugin
- 2.20
+ 3.2.5org.apache.maven.pluginsmaven-site-plugin
- 3.6
+ 3.12.1
+
+
+ org.apache.maven.plugins
+ maven-clean-plugin
+ 3.3.2
+
+
+ org.apache.maven.plugins
+ maven-deploy-plugin
+ 3.1.1
+
+
+ org.apache.maven.plugins
+ maven-install-plugin
+ 3.1.1
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+ 3.3.0
+
+
+
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-resources-plugin
+ 3.3.1
+
+
+ org.apache.maven.plugins
+ maven-release-plugin
+ 3.0.1
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+ 3.4.1
+
+
+ enforce-maven
+
+ enforce
+
+
+
+
+ 3.3.9
+
+
+
+
+
+
+
+ org.sonatype.plugins
+ nexus-staging-maven-plugin
+ 1.6.13
+ true
+
+ ossrh
+ https://ossrh-staging-api.central.sonatype.com/
+ true
+ 15
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+ 3.2.1
+
+
+ attach-sources
+
+ jar-no-fork
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+ 3.6.3
+
+ 8
+
+
+
+ attach-javadocs
+
+ jar
+
+
+
+ org.jacocojacoco-maven-plugin
- 0.7.9
+ 0.8.11
@@ -104,7 +223,14 @@
org.apache.maven.pluginsmaven-checkstyle-plugin
- 2.17
+ 3.3.1
+
+
+ com.puppycrawl.tools
+ checkstyle
+ 9.3
+
+ truecheckstyle.xml
@@ -121,99 +247,213 @@
org.apache.maven.plugins
- maven-shade-plugin
- 3.0.0
-
-
- package
-
- shade
-
-
-
+ maven-release-plugin
- true
-
-
- com.google.guava:guava
-
-
-
-
- com.google.common
- org.influxdb.com.google.guava
-
-
-
-
- *:*
-
- META-INF/license/**
- META-INF/*
- META-INF/maven/**
- LICENSE
- NOTICE
- /*.txt
- build.properties
-
-
-
+
+ release
-
- junit
- junit
- 4.12
+ org.junit.jupiter
+ junit-jupiter-engine
+ 5.9.3test
- org.assertj
- assertj-core
- 3.7.0
+ org.junit.platform
+ junit-platform-runner
+ 1.9.3test
- org.mockito
- mockito-core
- 2.8.9
+ org.hamcrest
+ hamcrest-all
+ 1.3test
- org.slf4j
- slf4j-simple
- 1.7.25
+ org.assertj
+ assertj-core
+ 3.27.7test
- com.google.guava
- guava
- 21.0
+ org.mockito
+ mockito-core
+ 4.10.0
+ testcom.squareup.retrofit2retrofit
- 2.3.0
+ 2.9.0
+
+
+ com.squareup.okhttp3
+ okhttp
+
+ com.squareup.retrofit2converter-moshi
- 2.3.0
+ 2.9.0
+
+
+ com.squareup.okio
+ okio
+
+
+
+
+ org.msgpack
+ msgpack-core
+ 0.9.11
+ of the influxdb server address resolves to all influxdb server ips. -->
com.squareup.okhttp3okhttp
- 3.8.0
+ 4.12.0com.squareup.okhttp3logging-interceptor
- 3.8.0
+ 4.12.0
+
+
+ release
+
+ influxdb:alpine
+
+
+
+
+ maven-resources-plugin
+ 3.3.1
+
+
+ copy-resources
+
+ validate
+
+ copy-resources
+
+
+ ${project.build.directory}
+
+
+ src/main/resources
+ true
+
+ docker-compose.yml
+
+
+
+
+
+
+
+
+ com.dkanejs.maven.plugins
+ docker-compose-maven-plugin
+ 4.0.0
+
+
+ up
+ process-test-resources
+
+ up
+
+
+ ${project.build.directory}/docker-compose.yml
+ true
+
+
+
+ down
+ post-integration-test
+
+ down
+
+
+ ${project.build.directory}/docker-compose.yml
+ true
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-gpg-plugin
+ 3.1.0
+
+
+ sign-artifacts
+ verify
+
+ sign
+
+
+
+
+
+
+
+
+
+ java17
+
+ 17
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+ 3.3.0
+
+
+ add-test-source
+ generate-test-sources
+
+ add-test-source
+
+
+
+ src/test-jdk17/java
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ true
+
+
+ 17
+ 17
+
+
+ -parameters
+ --add-opens=java.base/java.lang=ALL-UNNAMED
+ --add-opens=java.base/java.util=ALL-UNNAMED
+
+
+
+
+
+
+
diff --git a/src/main/java/org/influxdb/BatchOptions.java b/src/main/java/org/influxdb/BatchOptions.java
new file mode 100644
index 000000000..af1302a7e
--- /dev/null
+++ b/src/main/java/org/influxdb/BatchOptions.java
@@ -0,0 +1,250 @@
+package org.influxdb;
+
+import org.influxdb.dto.Point;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+/**
+ * BatchOptions are used to configure batching of individual data point writes
+ * into InfluxDB. See {@link InfluxDB#enableBatch(BatchOptions)}
+ */
+public final class BatchOptions implements Cloneable {
+
+ // default values here are consistent with Telegraf
+ public static final int DEFAULT_BATCH_ACTIONS_LIMIT = 1000;
+ public static final int DEFAULT_BATCH_INTERVAL_DURATION = 1000;
+ public static final int DEFAULT_JITTER_INTERVAL_DURATION = 0;
+ public static final int DEFAULT_BUFFER_LIMIT = 10000;
+ public static final TimeUnit DEFAULT_PRECISION = TimeUnit.NANOSECONDS;
+ public static final boolean DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION = false;
+
+
+ /**
+ * Default batch options. This class is immutable, each configuration
+ * is built by taking the DEFAULTS and setting specific configuration
+ * properties.
+ */
+ public static final BatchOptions DEFAULTS = new BatchOptions();
+
+ private int actions = DEFAULT_BATCH_ACTIONS_LIMIT;
+ private int flushDuration = DEFAULT_BATCH_INTERVAL_DURATION;
+ private int jitterDuration = DEFAULT_JITTER_INTERVAL_DURATION;
+ private int bufferLimit = DEFAULT_BUFFER_LIMIT;
+ private TimeUnit precision = DEFAULT_PRECISION;
+ private boolean dropActionsOnQueueExhaustion = DEFAULT_DROP_ACTIONS_ON_QUEUE_EXHAUSTION;
+ private Consumer droppedActionHandler = (point) -> {
+ };
+
+ private ThreadFactory threadFactory = Executors.defaultThreadFactory();
+ BiConsumer, Throwable> exceptionHandler = (points, throwable) -> {
+ };
+
+ private InfluxDB.ConsistencyLevel consistency = InfluxDB.ConsistencyLevel.ONE;
+
+ private BatchOptions() {
+ }
+
+ /**
+ * @param actions the number of actions to collect
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions actions(final int actions) {
+ BatchOptions clone = getClone();
+ clone.actions = actions;
+ return clone;
+ }
+
+ /**
+ * @param flushDuration the time to wait at most (milliseconds).
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions flushDuration(final int flushDuration) {
+ BatchOptions clone = getClone();
+ clone.flushDuration = flushDuration;
+ return clone;
+ }
+
+ /**
+ * Jitters the batch flush interval by a random amount. This is primarily to avoid
+ * large write spikes for users running a large number of client instances.
+ * ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s.
+ *
+ * @param jitterDuration (milliseconds)
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions jitterDuration(final int jitterDuration) {
+ BatchOptions clone = getClone();
+ clone.jitterDuration = jitterDuration;
+ return clone;
+ }
+
+ /**
+ * The client maintains a buffer for failed writes so that the writes will be retried later on. This may
+ * help to overcome temporary network problems or InfluxDB load spikes.
+ * When the buffer is full and new points are written, oldest entries in the buffer are lost.
+ *
+ * To disable this feature set buffer limit to a value smaller than {@link BatchOptions#getActions}
+ *
+ * @param bufferLimit maximum number of points stored in the retry buffer
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions bufferLimit(final int bufferLimit) {
+ BatchOptions clone = getClone();
+ clone.bufferLimit = bufferLimit;
+ return clone;
+ }
+
+ /**
+ * @param threadFactory a ThreadFactory instance to be used
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions threadFactory(final ThreadFactory threadFactory) {
+ BatchOptions clone = getClone();
+ clone.threadFactory = threadFactory;
+ return clone;
+ }
+
+ /**
+ * @param exceptionHandler a consumer function to handle asynchronous errors
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions exceptionHandler(final BiConsumer, Throwable> exceptionHandler) {
+ BatchOptions clone = getClone();
+ clone.exceptionHandler = exceptionHandler;
+ return clone;
+ }
+
+ /**
+ * @param consistency cluster consistency setting (how many nodes have to store data points
+ * to treat a write as a success)
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions consistency(final InfluxDB.ConsistencyLevel consistency) {
+ BatchOptions clone = getClone();
+ clone.consistency = consistency;
+ return clone;
+ }
+
+ /**
+ * Set the time precision to use for the whole batch. If unspecified, will default to {@link TimeUnit#NANOSECONDS}.
+ * @param precision sets the precision to use
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions precision(final TimeUnit precision) {
+ BatchOptions clone = getClone();
+ clone.precision = precision;
+ return clone;
+ }
+
+ /**
+ * Set to define the behaviour when the action queue exhausts. If unspecified, will default to false which means
+ * that the {@link InfluxDB#write(Point)} will be blocked till the space in the queue is created.
+ * true means that the newer actions being written to the queue will be dropped and
+ * {@link BatchOptions#droppedActionHandler} will be called.
+ * @param dropActionsOnQueueExhaustion sets the behavior
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions dropActionsOnQueueExhaustion(final boolean dropActionsOnQueueExhaustion) {
+ BatchOptions clone = getClone();
+ clone.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion;
+ return clone;
+ }
+
+ /**
+ * Handler to handle dropped actions due to queue actions. This is only valid when
+ * {@link BatchOptions#dropActionsOnQueueExhaustion} is set to true.
+ * @param droppedActionHandler to handle action drops on action queue exhaustion.
+ * @return the BatchOptions instance to be able to use it in a fluent manner.
+ */
+ public BatchOptions droppedActionHandler(final Consumer droppedActionHandler) {
+ BatchOptions clone = getClone();
+ clone.droppedActionHandler = droppedActionHandler;
+ return clone;
+ }
+
+
+ /**
+ * @return actions the number of actions to collect
+ */
+ public int getActions() {
+ return actions;
+ }
+
+ /**
+ * @return flushDuration the time to wait at most (milliseconds).
+ */
+ public int getFlushDuration() {
+ return flushDuration;
+ }
+
+ /**
+ * @return batch flush interval jitter value (milliseconds)
+ */
+ public int getJitterDuration() {
+ return jitterDuration;
+ }
+
+ /**
+ * @return Maximum number of points stored in the retry buffer, see {@link BatchOptions#bufferLimit(int)}
+ */
+ public int getBufferLimit() {
+ return bufferLimit;
+ }
+
+ /**
+ * @return a ThreadFactory instance to be used
+ */
+ public ThreadFactory getThreadFactory() {
+ return threadFactory;
+ }
+
+ /**
+ * @return a consumer function to handle asynchronous errors
+ */
+ public BiConsumer, Throwable> getExceptionHandler() {
+ return exceptionHandler;
+ }
+
+ /**
+ * @return cluster consistency setting (how many nodes have to store data points
+ * to treat a write as a success)
+ */
+ public InfluxDB.ConsistencyLevel getConsistency() {
+ return consistency;
+ }
+
+ /**
+ * @return the time precision
+ */
+ public TimeUnit getPrecision() {
+ return precision;
+ }
+
+
+ /**
+ * @return a boolean determining whether to drop actions on action queue exhaustion.
+ */
+ public boolean isDropActionsOnQueueExhaustion() {
+ return dropActionsOnQueueExhaustion;
+ }
+
+ /**
+ * @return a consumer function to handle actions drops on action queue exhaustion.
+ */
+ public Consumer getDroppedActionHandler() {
+ return droppedActionHandler;
+ }
+
+ private BatchOptions getClone() {
+ try {
+ return (BatchOptions) this.clone();
+ } catch (CloneNotSupportedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+}
diff --git a/src/main/java/org/influxdb/BuilderException.java b/src/main/java/org/influxdb/BuilderException.java
new file mode 100644
index 000000000..34ad6ca0d
--- /dev/null
+++ b/src/main/java/org/influxdb/BuilderException.java
@@ -0,0 +1,18 @@
+package org.influxdb;
+
+/**
+ * Class for exceptions when using Point Builder.
+ *
+ * @author mirza99
+ */
+public class BuilderException extends RuntimeException {
+
+ /**
+ * Generated serial version UID.
+ */
+ private static final long serialVersionUID = 4178882805281378918L;
+
+ public BuilderException(final String message) {
+ super(message);
+ }
+}
diff --git a/src/main/java/org/influxdb/InfluxDB.java b/src/main/java/org/influxdb/InfluxDB.java
index cad445c05..56f842f35 100644
--- a/src/main/java/org/influxdb/InfluxDB.java
+++ b/src/main/java/org/influxdb/InfluxDB.java
@@ -5,6 +5,7 @@
import org.influxdb.dto.Pong;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
+import retrofit2.Call;
import java.util.List;
import java.util.concurrent.ThreadFactory;
@@ -24,7 +25,13 @@
* @author stefan.majer [at] gmail.com
*
*/
-public interface InfluxDB {
+public interface InfluxDB extends AutoCloseable {
+
+ /**
+ * The system property key to set the http logging level across the JVM.
+ * @see LogLevel for available values
+ */
+ public static final String LOG_LEVEL_PROPERTY = "org.influxdb.InfluxDB.logLevel";
/** Controls the level of logging of the REST layer. */
public enum LogLevel {
@@ -40,6 +47,24 @@ public enum LogLevel {
* Note: This requires that the entire request and response body be buffered in memory!
*/
FULL;
+ /**
+ * Parses the string argument as a LogLevel constant.
+ * @param value a {@code String} containing the {@code LogLevel constant}
+ * representation to be parsed
+ * @return the LogLevel constant representation of the param
+ * or {@code NONE} for null or any invalid String representation.
+ */
+ public static LogLevel parseLogLevel(final String value) {
+ LogLevel logLevel = NONE;
+ if (value != null) {
+ try {
+ logLevel = valueOf(value.toUpperCase());
+ } catch (IllegalArgumentException e) {
+ }
+ }
+
+ return logLevel;
+ }
}
/**
@@ -70,6 +95,37 @@ public String value() {
}
}
+ /**
+ * Format of HTTP Response body from InfluxDB server.
+ */
+ public enum ResponseFormat {
+ /** application/json format. */
+ JSON,
+ /** application/x-msgpack format. */
+ MSGPACK
+ }
+
+ /**
+ * A cancelable allows to discontinue a streaming query.
+ */
+ public interface Cancellable {
+
+ /**
+ * Cancel the streaming query call.
+ *
+ * @see Call#cancel()
+ */
+ void cancel();
+
+ /**
+ * Return {@code true} if the {@link Cancellable#cancel()} was called.
+ *
+ * @return {@code true} if the {@link Cancellable#cancel()} was called
+ * @see Call#isCanceled()
+ */
+ boolean isCanceled();
+ }
+
/**
* Set the loglevel which is used for REST related actions.
*
@@ -81,36 +137,102 @@ public String value() {
/**
* Enable Gzip compress for http request body.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
*/
public InfluxDB enableGzip();
/**
* Disable Gzip compress for http request body.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
*/
public InfluxDB disableGzip();
/**
* Returns whether Gzip compress for http request body is enabled.
+ * @return true if gzip is enabled.
*/
public boolean isGzipEnabled();
+ /**
+ * Enable batching of single Point writes to speed up writes significantly. This is the same as calling
+ * InfluxDB.enableBatch(BatchOptions.DEFAULTS)
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+ public InfluxDB enableBatch();
+
+ /**
+ * Enable batching of single Point writes to speed up writes significantly. If either number of points written or
+ * flushDuration time limit is reached, a batch write is issued.
+ * Note that batch processing needs to be explicitly stopped before the application is shutdown.
+ * To do so call disableBatch().
+ *
+ * @param batchOptions
+ * the options to set for batching the writes.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+ public InfluxDB enableBatch(final BatchOptions batchOptions);
+
/**
* Enable batching of single Point writes as {@link #enableBatch(int, int, TimeUnit, ThreadFactory)}}
* using {@linkplain java.util.concurrent.Executors#defaultThreadFactory() default thread factory}.
*
+ * @param actions
+ * the number of actions to collect
+ * @param flushDuration
+ * the time to wait at most.
+ * @param flushDurationTimeUnit
+ * the TimeUnit for the given flushDuration.
+ *
* @see #enableBatch(int, int, TimeUnit, ThreadFactory)
+ *
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
*/
public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit);
/**
* Enable batching of single Point writes as
- * {@link #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer, Throwable>)}
+ * {@link #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer)}
* using with a exceptionHandler that does nothing.
*
- * @see #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer, Throwable>)
+ * @param actions
+ * the number of actions to collect
+ * @param flushDuration
+ * the time to wait at most.
+ * @param flushDurationTimeUnit
+ * the TimeUnit for the given flushDuration.
+ * @param threadFactory
+ * a ThreadFactory instance to be used.
+ *
+ * @see #enableBatch(int, int, TimeUnit, ThreadFactory, BiConsumer)
+ *
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
*/
public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit,
final ThreadFactory threadFactory);
+ /**
+ * Enable batching of single Point writes with consistency set for an entire batch
+ * flushDurations is reached first, a batch write is issued.
+ * Note that batch processing needs to be explicitly stopped before the application is shutdown.
+ * To do so call disableBatch(). Default consistency is ONE.
+ *
+ * @param actions
+ * the number of actions to collect
+ * @param flushDuration
+ * the time to wait at most.
+ * @param flushDurationTimeUnit
+ * the TimeUnit for the given flushDuration.
+ * @param threadFactory
+ * a ThreadFactory instance to be used.
+ * @param exceptionHandler
+ * a consumer function to handle asynchronous errors
+ * @param consistency
+ * a consistency setting for batch writes.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+
+ InfluxDB enableBatch(int actions, int flushDuration, TimeUnit flushDurationTimeUnit,
+ ThreadFactory threadFactory, BiConsumer, Throwable> exceptionHandler,
+ ConsistencyLevel consistency);
/**
* Enable batching of single Point writes to speed up writes significant. If either actions or
@@ -123,7 +245,9 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti
* @param flushDuration
* the time to wait at most.
* @param flushDurationTimeUnit
+ * the TimeUnit for the given flushDuration.
* @param threadFactory
+ * a ThreadFactory instance to be used.
* @param exceptionHandler
* a consumer function to handle asynchronous errors
* @return the InfluxDB instance to be able to use it in a fluent manner.
@@ -139,6 +263,7 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti
/**
* Returns whether Batching is enabled.
+ * @return true if batch is enabled.
*/
public boolean isBatchEnabled();
@@ -156,6 +281,30 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti
*/
public String version();
+ /**
+ * Write a single Point to the default database.
+ *
+ * @param point
+ * The point to write
+ */
+ public void write(final Point point);
+
+ /**
+ * Write a set of Points to the default database with the string records.
+ *
+ * @param records
+ * the points in the correct lineprotocol.
+ */
+ public void write(final String records);
+
+ /**
+ * Write a set of Points to the default database with the list of string records.
+ *
+ * @param records
+ * the List of points in the correct lineprotocol.
+ */
+ public void write(final List records);
+
/**
* Write a single Point to the database.
*
@@ -179,39 +328,111 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti
public void write(final int udpPort, final Point point);
/**
- * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol.
+ * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol.
*
- * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"}
+ * @see 2696
*
* @param batchPoints
+ * the points to write in BatchPoints.
*/
public void write(final BatchPoints batchPoints);
+ /**
+ * Write a set of Points to the influxdb database with the new (>= 0.9.0rc32) lineprotocol.
+ *
+ * If batching is enabled with appropriate {@code BatchOptions} settings
+ * ({@code BatchOptions.bufferLimit} greater than {@code BatchOptions.actions})
+ * This method will try to retry in case of some recoverable errors.
+ * Otherwise it just works as {@link #write(BatchPoints)}
+ *
+ * @see 2696
+ * @see
+ * Retry worth errors
+ *
+ * @param batchPoints
+ * the points to write in BatchPoints.
+ */
+ public void writeWithRetry(final BatchPoints batchPoints);
+
/**
* Write a set of Points to the influxdb database with the string records.
*
- * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"}
+ * @see 2696
*
+ * @param database
+ * the name of the database to write
+ * @param retentionPolicy
+ * the retentionPolicy to use
+ * @param consistency
+ * the ConsistencyLevel to use
* @param records
+ * the points in the correct lineprotocol.
*/
public void write(final String database, final String retentionPolicy,
final ConsistencyLevel consistency, final String records);
+ /**
+ * Write a set of Points to the influxdb database with the string records.
+ *
+ * @see 2696
+ *
+ * @param database
+ * the name of the database to write
+ * @param retentionPolicy
+ * the retentionPolicy to use
+ * @param consistency
+ * the ConsistencyLevel to use
+ * @param precision
+ * the time precision to use
+ * @param records
+ * the points in the correct lineprotocol.
+ */
+ public void write(final String database, final String retentionPolicy,
+ final ConsistencyLevel consistency, final TimeUnit precision, final String records);
+
/**
* Write a set of Points to the influxdb database with the list of string records.
*
- * {@linkplain "https://github.com/influxdb/influxdb/pull/2696"}
+ * @see 2696
*
+ * @param database
+ * the name of the database to write
+ * @param retentionPolicy
+ * the retentionPolicy to use
+ * @param consistency
+ * the ConsistencyLevel to use
* @param records
+ * the List of points in the correct lineprotocol.
*/
public void write(final String database, final String retentionPolicy,
final ConsistencyLevel consistency, final List records);
+ /**
+ * Write a set of Points to the influxdb database with the list of string records.
+ *
+ * @see 2696
+ *
+ * @param database
+ * the name of the database to write
+ * @param retentionPolicy
+ * the retentionPolicy to use
+ * @param consistency
+ * the ConsistencyLevel to use
+ * @param precision
+ * the time precision to use
+ * @param records
+ * the List of points in the correct lineprotocol.
+ */
+ public void write(final String database, final String retentionPolicy,
+ final ConsistencyLevel consistency, final TimeUnit precision, final List records);
+
/**
* Write a set of Points to the influxdb database with the string records through UDP.
*
* @param udpPort
- * @param records the content will be encoded by UTF-8 before sent.
+ * the udpPort where influxdb is listening
+ * @param records
+ * the content will be encoded by UTF-8 before sent.
*/
public void write(final int udpPort, final String records);
@@ -219,7 +440,9 @@ public void write(final String database, final String retentionPolicy,
* Write a set of Points to the influxdb database with the list of string records through UDP.
*
* @param udpPort
- * @param records list of record, the content will be encoded by UTF-8 before sent.
+ * the udpPort where influxdb is listening
+ * @param records
+ * list of record, the content will be encoded by UTF-8 before sent.
*/
public void write(final int udpPort, final List records);
@@ -232,6 +455,44 @@ public void write(final String database, final String retentionPolicy,
*/
public QueryResult query(final Query query);
+ /**
+ * Execute a query against a database.
+ *
+ * One of the consumers will be executed.
+ *
+ * @param query
+ * the query to execute.
+ * @param onSuccess
+ * the consumer to invoke when result is received
+ * @param onFailure
+ * the consumer to invoke when error is thrown
+ */
+ public void query(final Query query, final Consumer onSuccess, final Consumer onFailure);
+
+ /**
+ * Execute a streaming query against a database.
+ *
+ * @param query
+ * the query to execute.
+ * @param chunkSize
+ * the number of QueryResults to process in one chunk.
+ * @param onNext
+ * the consumer to invoke for each received QueryResult
+ */
+ public void query(Query query, int chunkSize, Consumer onNext);
+
+ /**
+ * Execute a streaming query against a database.
+ *
+ * @param query
+ * the query to execute.
+ * @param chunkSize
+ * the number of QueryResults to process in one chunk.
+ * @param onNext
+ * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query
+ */
+ public void query(Query query, int chunkSize, BiConsumer onNext);
+
/**
* Execute a streaming query against a database.
*
@@ -239,10 +500,43 @@ public void write(final String database, final String retentionPolicy,
* the query to execute.
* @param chunkSize
* the number of QueryResults to process in one chunk.
- * @param consumer
+ * @param onNext
* the consumer to invoke for each received QueryResult
+ * @param onComplete
+ * the onComplete to invoke for successfully end of stream
+ */
+ public void query(Query query, int chunkSize, Consumer onNext, Runnable onComplete);
+
+ /**
+ * Execute a streaming query against a database.
+ *
+ * @param query
+ * the query to execute.
+ * @param chunkSize
+ * the number of QueryResults to process in one chunk.
+ * @param onNext
+ * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query
+ * @param onComplete
+ * the onComplete to invoke for successfully end of stream
+ */
+ public void query(Query query, int chunkSize, BiConsumer onNext, Runnable onComplete);
+
+ /**
+ * Execute a streaming query against a database.
+ *
+ * @param query
+ * the query to execute.
+ * @param chunkSize
+ * the number of QueryResults to process in one chunk.
+ * @param onNext
+ * the consumer to invoke for each received QueryResult; with capability to discontinue a streaming query
+ * @param onComplete
+ * the onComplete to invoke for successfully end of stream
+ * @param onFailure
+ * the consumer for error handling
*/
- public void query(Query query, int chunkSize, Consumer consumer);
+ public void query(Query query, int chunkSize, BiConsumer onNext, Runnable onComplete,
+ Consumer onFailure);
/**
* Execute a query against a database.
@@ -259,7 +553,10 @@ public void write(final String database, final String retentionPolicy,
*
* @param name
* the name of the new database.
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a parameterized CREATE DATABASE query.
*/
+ @Deprecated
public void createDatabase(final String name);
/**
@@ -267,14 +564,20 @@ public void write(final String database, final String retentionPolicy,
*
* @param name
* the name of the database to delete.
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a DROP DATABASE query.
*/
+ @Deprecated
public void deleteDatabase(final String name);
/**
* Describe all available databases.
*
* @return a List of all Database names.
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a SHOW DATABASES query.
*/
+ @Deprecated
public List describeDatabases();
/**
@@ -284,7 +587,10 @@ public void write(final String database, final String retentionPolicy,
* the name of the database to search.
*
* @return true if the database exists or false if it doesn't exist
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a SHOW DATABASES query and inspect the result.
*/
+ @Deprecated
public boolean databaseExists(final String name);
/**
@@ -300,4 +606,83 @@ public void write(final String database, final String retentionPolicy,
*/
public void close();
+ /**
+ * Set the consistency level which is used for writing points.
+ *
+ * @param consistency
+ * the consistency level to set.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+ public InfluxDB setConsistency(final ConsistencyLevel consistency);
+
+ /**
+ * Set the database which is used for writing points.
+ *
+ * @param database
+ * the database to set.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+ public InfluxDB setDatabase(final String database);
+
+ /**
+ * Set the retention policy which is used for writing points.
+ *
+ * @param retentionPolicy
+ * the retention policy to set.
+ * @return the InfluxDB instance to be able to use it in a fluent manner.
+ */
+ public InfluxDB setRetentionPolicy(final String retentionPolicy);
+
+ /**
+ * Creates a retentionPolicy.
+ * @param rpName the name of the retentionPolicy(rp)
+ * @param database the name of the database
+ * @param duration the duration of the rp
+ * @param shardDuration the shardDuration
+ * @param replicationFactor the replicationFactor of the rp
+ * @param isDefault if the rp is the default rp for the database or not
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a parameterized CREATE RETENTION POLICY query.
+ */
+ @Deprecated
+ public void createRetentionPolicy(final String rpName, final String database, final String duration,
+ final String shardDuration, final int replicationFactor, final boolean isDefault);
+
+ /**
+ * Creates a retentionPolicy. (optional shardDuration)
+ * @param rpName the name of the retentionPolicy(rp)
+ * @param database the name of the database
+ * @param duration the duration of the rp
+ * @param replicationFactor the replicationFactor of the rp
+ * @param isDefault if the rp is the default rp for the database or not
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a parameterized CREATE RETENTION POLICY query.
+ */
+ @Deprecated
+ public void createRetentionPolicy(final String rpName, final String database, final String duration,
+ final int replicationFactor, final boolean isDefault);
+
+ /**
+ * Creates a retentionPolicy. (optional shardDuration and isDefault)
+ * @param rpName the name of the retentionPolicy(rp)
+ * @param database the name of the database
+ * @param duration the duration of the rp
+ * @param shardDuration the shardDuration
+ * @param replicationFactor the replicationFactor of the rp
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a parameterized CREATE RETENTION POLICY query.
+ */
+ @Deprecated
+ public void createRetentionPolicy(final String rpName, final String database, final String duration,
+ final String shardDuration, final int replicationFactor);
+
+ /**
+ * Drops a retentionPolicy in a database.
+ * @param rpName the name of the retentionPolicy
+ * @param database the name of the database
+ * @deprecated (since 2.9, removed in 3.0) Use org.influxdb.InfluxDB.query(Query)
+ * to execute a DROP RETENTION POLICY query.
+ */
+ @Deprecated
+ public void dropRetentionPolicy(final String rpName, final String database);
}
diff --git a/src/main/java/org/influxdb/InfluxDBException.java b/src/main/java/org/influxdb/InfluxDBException.java
new file mode 100644
index 000000000..9f826f8eb
--- /dev/null
+++ b/src/main/java/org/influxdb/InfluxDBException.java
@@ -0,0 +1,195 @@
+package org.influxdb;
+
+import java.io.InputStream;
+
+import org.msgpack.core.MessagePack;
+import org.msgpack.core.MessageUnpacker;
+import org.msgpack.value.ImmutableMapValue;
+import org.msgpack.value.impl.ImmutableStringValueImpl;
+
+import com.squareup.moshi.JsonAdapter;
+import com.squareup.moshi.Moshi;
+
+/**
+ * A wrapper for various exceptions caused while interacting with InfluxDB.
+ *
+ * @author Simon Legner
+ */
+public class InfluxDBException extends RuntimeException {
+
+ public InfluxDBException(final String message) {
+ super(message);
+ }
+
+ public InfluxDBException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+
+ public InfluxDBException(final Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * @return true if the operation may succeed if repeated, false otherwise.
+ */
+ public boolean isRetryWorth() {
+ return true;
+ }
+
+ /* See https://github.com/influxdata/influxdb/blob/master/tsdb/shard.go */
+ static final String FIELD_TYPE_CONFLICT_ERROR = "field type conflict";
+ /* See https://github.com/influxdata/influxdb/blob/master/coordinator/points_writer.go */
+ static final String POINTS_BEYOND_RETENTION_POLICY_ERROR = "points beyond retention policy";
+ /* See https://github.com/influxdata/influxdb/blob/master/models/points.go */
+ static final String UNABLE_TO_PARSE_ERROR = "unable to parse";
+ /* See https://github.com/influxdata/telegraf/blob/master/plugins/outputs/influxdb/influxdb.go */
+ static final String HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR = "hinted handoff queue not empty";
+ /* See https://github.com/influxdata/influxdb/blob/master/tsdb/engine/tsm1/cache.go */
+ static final String CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR = "cache-max-memory-size exceeded";
+ /* For all messages below see https://github.com/influxdata/influxdb/blob/master/services/httpd/handler.go */
+ static final String DATABASE_NOT_FOUND_ERROR = "database not found";
+ static final String USER_REQUIRED_ERROR = "user is required to write to database";
+ static final String USER_NOT_AUTHORIZED_ERROR = "user is not authorized to write to database";
+ static final String AUTHORIZATION_FAILED_ERROR = "authorization failed";
+ static final String USERNAME_REQUIRED_ERROR = "username required";
+
+ public static final class DatabaseNotFoundException extends InfluxDBException {
+ private DatabaseNotFoundException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class HintedHandOffQueueNotEmptyException extends InfluxDBException {
+ private HintedHandOffQueueNotEmptyException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class UnableToParseException extends InfluxDBException {
+ private UnableToParseException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class FieldTypeConflictException extends InfluxDBException {
+ private FieldTypeConflictException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class PointsBeyondRetentionPolicyException extends InfluxDBException {
+ private PointsBeyondRetentionPolicyException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class CacheMaxMemorySizeExceededException extends InfluxDBException {
+ private CacheMaxMemorySizeExceededException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return true;
+ }
+ }
+
+ public static final class RetryBufferOverrunException extends InfluxDBException {
+ public RetryBufferOverrunException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ public static final class AuthorizationFailedException extends InfluxDBException {
+ public AuthorizationFailedException(final String message) {
+ super(message);
+ }
+
+ public boolean isRetryWorth() {
+ return false;
+ }
+ }
+
+ private static InfluxDBException buildExceptionFromErrorMessage(final String errorMessage) {
+ if (errorMessage.contains(DATABASE_NOT_FOUND_ERROR)) {
+ return new DatabaseNotFoundException(errorMessage);
+ }
+ if (errorMessage.contains(POINTS_BEYOND_RETENTION_POLICY_ERROR)) {
+ return new PointsBeyondRetentionPolicyException(errorMessage);
+ }
+ if (errorMessage.contains(FIELD_TYPE_CONFLICT_ERROR)) {
+ return new FieldTypeConflictException(errorMessage);
+ }
+ if (errorMessage.contains(UNABLE_TO_PARSE_ERROR)) {
+ return new UnableToParseException(errorMessage);
+ }
+ if (errorMessage.contains(HINTED_HAND_OFF_QUEUE_NOT_EMPTY_ERROR)) {
+ return new HintedHandOffQueueNotEmptyException(errorMessage);
+ }
+ if (errorMessage.contains(CACHE_MAX_MEMORY_SIZE_EXCEEDED_ERROR)) {
+ return new CacheMaxMemorySizeExceededException(errorMessage);
+ }
+ if (errorMessage.contains(USER_REQUIRED_ERROR)
+ || errorMessage.contains(USER_NOT_AUTHORIZED_ERROR)
+ || errorMessage.contains(AUTHORIZATION_FAILED_ERROR)
+ || errorMessage.contains(USERNAME_REQUIRED_ERROR)) {
+ return new AuthorizationFailedException(errorMessage);
+ }
+ return new InfluxDBException(errorMessage);
+ }
+
+ private static class ErrorMessage {
+ public String error;
+ }
+
+ public static InfluxDBException buildExceptionForErrorState(final String errorBody) {
+ try {
+ Moshi moshi = new Moshi.Builder().build();
+ JsonAdapter adapter = moshi.adapter(ErrorMessage.class).lenient();
+ ErrorMessage errorMessage = adapter.fromJson(errorBody);
+ return InfluxDBException.buildExceptionFromErrorMessage(errorMessage.error);
+ } catch (Exception e) {
+ return new InfluxDBException(errorBody);
+ }
+ }
+
+ /**
+ * Create corresponding InfluxDBException from the message pack error body.
+ * @param messagePackErrorBody
+ * the error body if any
+ * @return the Exception
+ */
+ public static InfluxDBException buildExceptionForErrorState(final InputStream messagePackErrorBody) {
+ try {
+ MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(messagePackErrorBody);
+ ImmutableMapValue mapVal = (ImmutableMapValue) unpacker.unpackValue();
+ return InfluxDBException.buildExceptionFromErrorMessage(
+ mapVal.map().get(new ImmutableStringValueImpl("error")).toString());
+ } catch (Exception e) {
+ return new InfluxDBException(e);
+ }
+ }
+}
diff --git a/src/main/java/org/influxdb/InfluxDBFactory.java b/src/main/java/org/influxdb/InfluxDBFactory.java
index 01c3281fa..aee28d73a 100644
--- a/src/main/java/org/influxdb/InfluxDBFactory.java
+++ b/src/main/java/org/influxdb/InfluxDBFactory.java
@@ -1,11 +1,12 @@
package org.influxdb;
+import org.influxdb.InfluxDB.ResponseFormat;
import org.influxdb.impl.InfluxDBImpl;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-
import okhttp3.OkHttpClient;
+import org.influxdb.impl.Preconditions;
+
+import java.util.Objects;
/**
@@ -25,7 +26,7 @@ public enum InfluxDBFactory {
* @return a InfluxDB adapter suitable to access a InfluxDB.
*/
public static InfluxDB connect(final String url) {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty.");
+ Preconditions.checkNonEmptyString(url, "url");
return new InfluxDBImpl(url, null, null, new OkHttpClient.Builder());
}
@@ -42,8 +43,8 @@ public static InfluxDB connect(final String url) {
* @return a InfluxDB adapter suitable to access a InfluxDB.
*/
public static InfluxDB connect(final String url, final String username, final String password) {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty.");
- Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "The username may not be null or empty.");
+ Preconditions.checkNonEmptyString(url, "url");
+ Preconditions.checkNonEmptyString(username, "username");
return new InfluxDBImpl(url, username, password, new OkHttpClient.Builder());
}
@@ -57,8 +58,8 @@ public static InfluxDB connect(final String url, final String username, final St
* @return a InfluxDB adapter suitable to access a InfluxDB.
*/
public static InfluxDB connect(final String url, final OkHttpClient.Builder client) {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty.");
- Preconditions.checkNotNull(client, "The client may not be null.");
+ Preconditions.checkNonEmptyString(url, "url");
+ Objects.requireNonNull(client, "client");
return new InfluxDBImpl(url, null, null, client);
}
@@ -78,9 +79,30 @@ public static InfluxDB connect(final String url, final OkHttpClient.Builder clie
*/
public static InfluxDB connect(final String url, final String username, final String password,
final OkHttpClient.Builder client) {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "The URL may not be null or empty.");
- Preconditions.checkArgument(!Strings.isNullOrEmpty(username), "The username may not be null or empty.");
- Preconditions.checkNotNull(client, "The client may not be null.");
- return new InfluxDBImpl(url, username, password, client);
+ return connect(url, username, password, client, ResponseFormat.JSON);
+ }
+
+ /**
+ * Create a connection to a InfluxDB.
+ *
+ * @param url
+ * the url to connect to.
+ * @param username
+ * the username which is used to authorize against the influxDB instance.
+ * @param password
+ * the password for the username which is used to authorize against the influxDB
+ * instance.
+ * @param client
+ * the HTTP client to use
+ * @param responseFormat
+ * The {@code ResponseFormat} to use for response from InfluxDB server
+ * @return a InfluxDB adapter suitable to access a InfluxDB.
+ */
+ public static InfluxDB connect(final String url, final String username, final String password,
+ final OkHttpClient.Builder client, final ResponseFormat responseFormat) {
+ Preconditions.checkNonEmptyString(url, "url");
+ Preconditions.checkNonEmptyString(username, "username");
+ Objects.requireNonNull(client, "client");
+ return new InfluxDBImpl(url, username, password, client, responseFormat);
}
}
diff --git a/src/main/java/org/influxdb/InfluxDBIOException.java b/src/main/java/org/influxdb/InfluxDBIOException.java
new file mode 100644
index 000000000..0a6858c76
--- /dev/null
+++ b/src/main/java/org/influxdb/InfluxDBIOException.java
@@ -0,0 +1,15 @@
+package org.influxdb;
+
+import java.io.IOException;
+
+/**
+ * A wrapper for {@link IOException} caused while interacting with InfluxDB.
+ *
+ * @author Simon Legner
+ */
+public class InfluxDBIOException extends InfluxDBException {
+
+ public InfluxDBIOException(final IOException cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/org/influxdb/InfluxDBMapperException.java b/src/main/java/org/influxdb/InfluxDBMapperException.java
new file mode 100644
index 000000000..a79dd9c7f
--- /dev/null
+++ b/src/main/java/org/influxdb/InfluxDBMapperException.java
@@ -0,0 +1,41 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 azeti Networks AG ()
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or
+ * substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package org.influxdb;
+
+/**
+ * @author fmachado
+ */
+public class InfluxDBMapperException extends RuntimeException {
+
+ private static final long serialVersionUID = -7328402653918756407L;
+
+ public InfluxDBMapperException(final String message, final Throwable cause) {
+ super(message, cause);
+ }
+
+ public InfluxDBMapperException(final String message) {
+ super(message);
+ }
+
+ public InfluxDBMapperException(final Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/org/influxdb/annotation/Column.java b/src/main/java/org/influxdb/annotation/Column.java
new file mode 100644
index 000000000..6edb256f8
--- /dev/null
+++ b/src/main/java/org/influxdb/annotation/Column.java
@@ -0,0 +1,41 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 azeti Networks AG ()
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or
+ * substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package org.influxdb.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * @author fmachado
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.FIELD)
+public @interface Column {
+
+ /**
+ * If unset, the annotated field's name will be used as the column name.
+ */
+ String name() default "";
+
+ boolean tag() default false;
+}
diff --git a/src/main/java/org/influxdb/annotation/Exclude.java b/src/main/java/org/influxdb/annotation/Exclude.java
new file mode 100644
index 000000000..23e076797
--- /dev/null
+++ b/src/main/java/org/influxdb/annotation/Exclude.java
@@ -0,0 +1,41 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 azeti Networks AG ()
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or
+ * substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package org.influxdb.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * When a POJO annotated with {@code @Measurement(allFields = true)} is loaded or saved,
+ * this annotation can be used to exclude some of its fields.
+ *
+ * Note: this is not considered when loading record measurements.
+ *
+ * @see Measurement#allFields()
+ *
+ * @author Eran Leshem
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.FIELD)
+public @interface Exclude {
+}
diff --git a/src/main/java/org/influxdb/annotation/Measurement.java b/src/main/java/org/influxdb/annotation/Measurement.java
new file mode 100644
index 000000000..6ea8142e0
--- /dev/null
+++ b/src/main/java/org/influxdb/annotation/Measurement.java
@@ -0,0 +1,54 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2017 azeti Networks AG ()
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+ * associated documentation files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish, distribute,
+ * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all copies or
+ * substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+ * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+package org.influxdb.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * @author fmachado
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface Measurement {
+
+ String name();
+
+ String database() default "[unassigned]";
+
+ String retentionPolicy() default "autogen";
+
+ TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
+
+ /**
+ * If {@code true}, then all non-static fields of this measurement will be loaded or saved,
+ * regardless of any {@code @Column} annotations.
+ *
+ * Note: When loading record measurements, this is always implied to be true,
+ * since the record's canonical constructor is used to populate the record.
+ *
+ * @see Exclude
+ */
+ boolean allFields() default false;
+}
diff --git a/src/main/java/org/influxdb/annotation/TimeColumn.java b/src/main/java/org/influxdb/annotation/TimeColumn.java
new file mode 100644
index 000000000..94ed1d898
--- /dev/null
+++ b/src/main/java/org/influxdb/annotation/TimeColumn.java
@@ -0,0 +1,13 @@
+package org.influxdb.annotation;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.util.concurrent.TimeUnit;
+
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.FIELD)
+public @interface TimeColumn {
+ TimeUnit timeUnit() default TimeUnit.MILLISECONDS;
+}
diff --git a/src/main/java/org/influxdb/dto/BatchPoints.java b/src/main/java/org/influxdb/dto/BatchPoints.java
index 0f39540d7..e32774628 100644
--- a/src/main/java/org/influxdb/dto/BatchPoints.java
+++ b/src/main/java/org/influxdb/dto/BatchPoints.java
@@ -1,18 +1,16 @@
package org.influxdb.dto;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
import org.influxdb.InfluxDB.ConsistencyLevel;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Ordering;
-
/**
* {Purpose of This Type}.
*
@@ -27,11 +25,21 @@ public class BatchPoints {
private Map tags;
private List points;
private ConsistencyLevel consistency;
+ private TimeUnit precision;
BatchPoints() {
// Only visible in the Builder
}
+ /**
+ * Create a new BatchPoints build to create a new BatchPoints in a fluent manner.
+ *
+ * @return the Builder to be able to add further Builder calls.
+ */
+ public static Builder builder() {
+ return new Builder(null);
+ }
+
/**
* Create a new BatchPoints build to create a new BatchPoints in a fluent manner.
*
@@ -49,9 +57,10 @@ public static Builder database(final String database) {
public static final class Builder {
private final String database;
private String retentionPolicy;
- private final Map tags = Maps.newTreeMap(Ordering.natural());
- private final List points = Lists.newArrayList();
+ private final Map tags = new TreeMap<>();
+ private final List points = new ArrayList<>();
private ConsistencyLevel consistency;
+ private TimeUnit precision;
/**
* @param database
@@ -63,7 +72,7 @@ public static final class Builder {
/**
* The retentionPolicy to use.
*
- * @param policy
+ * @param policy the retentionPolicy to use
* @return the Builder instance
*/
public Builder retentionPolicy(final String policy) {
@@ -88,7 +97,7 @@ public Builder tag(final String tagName, final String value) {
/**
* Add a Point to this set of points.
*
- * @param pointToAdd
+ * @param pointToAdd the Point to add
* @return the Builder instance
*/
public Builder point(final Point pointToAdd) {
@@ -99,7 +108,7 @@ public Builder point(final Point pointToAdd) {
/**
* Add a set of Points to this set of points.
*
- * @param pointsToAdd
+ * @param pointsToAdd the Points to add
* @return the Builder instance
*/
public Builder points(final Point... pointsToAdd) {
@@ -107,10 +116,21 @@ public Builder points(final Point... pointsToAdd) {
return this;
}
+ /**
+ * Add a set of Points to this set of points.
+ *
+ * @param pointsToAdd the Points to add
+ * @return the Builder instance
+ */
+ public Builder points(final Collection pointsToAdd) {
+ this.points.addAll(pointsToAdd);
+ return this;
+ }
+
/**
* Set the ConsistencyLevel to use. If not given it defaults to {@link ConsistencyLevel#ONE}
*
- * @param consistencyLevel
+ * @param consistencyLevel the ConsistencyLevel
* @return the Builder instance
*/
public Builder consistency(final ConsistencyLevel consistencyLevel) {
@@ -118,14 +138,22 @@ public Builder consistency(final ConsistencyLevel consistencyLevel) {
return this;
}
+ /**
+ * Set the time precision to use for the whole batch. If unspecified, will default to {@link TimeUnit#NANOSECONDS}
+ * @param precision the precision of the points
+ * @return the Builder instance
+ */
+ public Builder precision(final TimeUnit precision) {
+ this.precision = precision;
+ return this;
+ }
+
/**
* Create a new BatchPoints instance.
*
* @return the created BatchPoints.
*/
public BatchPoints build() {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(this.database),
- "Database must not be null or empty.");
BatchPoints batchPoints = new BatchPoints();
batchPoints.setDatabase(this.database);
for (Point point : this.points) {
@@ -138,6 +166,10 @@ public BatchPoints build() {
this.consistency = ConsistencyLevel.ONE;
}
batchPoints.setConsistency(this.consistency);
+ if (null == this.precision) {
+ this.precision = TimeUnit.NANOSECONDS;
+ }
+ batchPoints.setPrecision(this.precision);
return batchPoints;
}
}
@@ -187,10 +219,24 @@ void setPoints(final List points) {
this.points = points;
}
+ /**
+ * @return the time precision unit
+ */
+ public TimeUnit getPrecision() {
+ return precision;
+ }
+
+ /**
+ * @param precision the time precision to set for the batch points
+ */
+ void setPrecision(final TimeUnit precision) {
+ this.precision = precision;
+ }
+
/**
* Add a single Point to these batches.
*
- * @param point
+ * @param point the Point to add
* @return this Instance to be able to daisy chain calls.
*/
public BatchPoints point(final Point point) {
@@ -242,12 +288,13 @@ public boolean equals(final Object o) {
&& Objects.equals(retentionPolicy, that.retentionPolicy)
&& Objects.equals(tags, that.tags)
&& Objects.equals(points, that.points)
- && consistency == that.consistency;
+ && consistency == that.consistency
+ && precision == that.precision;
}
@Override
public int hashCode() {
- return Objects.hash(database, retentionPolicy, tags, points, consistency);
+ return Objects.hash(database, retentionPolicy, tags, points, consistency, precision);
}
/**
@@ -256,17 +303,19 @@ public int hashCode() {
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchPoints [database=");
- builder.append(this.database);
- builder.append(", retentionPolicy=");
- builder.append(this.retentionPolicy);
- builder.append(", consistency=");
- builder.append(this.consistency);
- builder.append(", tags=");
- builder.append(this.tags);
- builder.append(", points=");
- builder.append(this.points);
- builder.append("]");
+ builder.append("BatchPoints [database=")
+ .append(this.database)
+ .append(", retentionPolicy=")
+ .append(this.retentionPolicy)
+ .append(", consistency=")
+ .append(this.consistency)
+ .append(", tags=")
+ .append(this.tags)
+ .append(", precision=")
+ .append(this.precision)
+ .append(", points=")
+ .append(this.points)
+ .append("]");
return builder.toString();
}
@@ -278,9 +327,37 @@ public String toString() {
*/
public String lineProtocol() {
StringBuilder sb = new StringBuilder();
+
for (Point point : this.points) {
- sb.append(point.lineProtocol()).append("\n");
+ sb.append(point.lineProtocol(this.precision)).append("\n");
}
return sb.toString();
}
+
+ /**
+ * Test whether is possible to merge two BatchPoints objects.
+ *
+ * @param that batch point to merge in
+ * @return true if the batch points can be sent in a single HTTP request write
+ */
+ public boolean isMergeAbleWith(final BatchPoints that) {
+ return Objects.equals(database, that.database)
+ && Objects.equals(retentionPolicy, that.retentionPolicy)
+ && Objects.equals(tags, that.tags)
+ && consistency == that.consistency;
+ }
+
+ /**
+ * Merge two BatchPoints objects.
+ *
+ * @param that batch point to merge in
+ * @return true if the batch points have been merged into this BatchPoints instance. Return false otherwise.
+ */
+ public boolean mergeIn(final BatchPoints that) {
+ boolean mergeAble = isMergeAbleWith(that);
+ if (mergeAble) {
+ this.points.addAll(that.points);
+ }
+ return mergeAble;
+ }
}
diff --git a/src/main/java/org/influxdb/dto/BoundParameterQuery.java b/src/main/java/org/influxdb/dto/BoundParameterQuery.java
new file mode 100644
index 000000000..1f197289e
--- /dev/null
+++ b/src/main/java/org/influxdb/dto/BoundParameterQuery.java
@@ -0,0 +1,36 @@
+package org.influxdb.dto;
+
+public final class BoundParameterQuery extends Query {
+
+ private BoundParameterQuery(final String command, final String database) {
+ super(command, database);
+ }
+
+ public static class QueryBuilder {
+ private BoundParameterQuery query;
+ private String influxQL;
+
+ public static QueryBuilder newQuery(final String influxQL) {
+ QueryBuilder instance = new QueryBuilder();
+ instance.influxQL = influxQL;
+ return instance;
+ }
+
+ public QueryBuilder forDatabase(final String database) {
+ query = new BoundParameterQuery(influxQL, database);
+ return this;
+ }
+
+ public QueryBuilder bind(final String placeholder, final Object value) {
+ if (query == null) {
+ query = new BoundParameterQuery(influxQL, null);
+ }
+ query.bindParameter(placeholder, value);
+ return this;
+ }
+
+ public BoundParameterQuery create() {
+ return query;
+ }
+ }
+}
diff --git a/src/main/java/org/influxdb/dto/Point.java b/src/main/java/org/influxdb/dto/Point.java
old mode 100644
new mode 100755
index d1dd78a7c..1663913dc
--- a/src/main/java/org/influxdb/dto/Point.java
+++ b/src/main/java/org/influxdb/dto/Point.java
@@ -1,20 +1,32 @@
package org.influxdb.dto;
+import org.influxdb.BuilderException;
+import org.influxdb.InfluxDBMapperException;
+import org.influxdb.annotation.Column;
+import org.influxdb.annotation.Exclude;
+import org.influxdb.annotation.Measurement;
+import org.influxdb.annotation.TimeColumn;
+import org.influxdb.impl.Preconditions;
+import org.influxdb.impl.TypeMapper;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
import java.math.BigDecimal;
import java.math.BigInteger;
+import java.math.RoundingMode;
import java.text.NumberFormat;
+import java.time.Instant;
import java.util.Locale;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
+import java.util.Optional;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.escape.Escaper;
-import com.google.common.escape.Escapers;
-
/**
* Representation of a InfluxDB database Point.
*
@@ -24,20 +36,23 @@
public class Point {
private String measurement;
private Map tags;
- private Long time;
+ private Number time;
private TimeUnit precision = TimeUnit.NANOSECONDS;
private Map fields;
-
- private static final Escaper FIELD_ESCAPER = Escapers.builder()
- .addEscape('\\', "\\\\")
- .addEscape('"', "\\\"")
- .build();
- private static final Escaper KEY_ESCAPER = Escapers.builder()
- .addEscape(' ', "\\ ")
- .addEscape(',', "\\,")
- .addEscape('=', "\\=")
- .build();
private static final int MAX_FRACTION_DIGITS = 340;
+ private static final ThreadLocal NUMBER_FORMATTER =
+ ThreadLocal.withInitial(() -> {
+ NumberFormat numberFormat = NumberFormat.getInstance(Locale.ENGLISH);
+ numberFormat.setMaximumFractionDigits(MAX_FRACTION_DIGITS);
+ numberFormat.setGroupingUsed(false);
+ numberFormat.setMinimumFractionDigits(1);
+ return numberFormat;
+ });
+
+ private static final int DEFAULT_STRING_BUILDER_SIZE = 1024;
+ private static final int MAX_STRING_BUILDER_SIZE = 64 * 1024;
+ private static final ThreadLocal CACHED_STRINGBUILDERS =
+ ThreadLocal.withInitial(() -> new StringBuilder(DEFAULT_STRING_BUILDER_SIZE));
Point() {
}
@@ -54,6 +69,28 @@ public static Builder measurement(final String measurement) {
return new Builder(measurement);
}
+ /**
+ * Create a new Point Build build to create a new Point in a fluent manner from a POJO.
+ *
+ * @param clazz Class of the POJO
+ * @return the Builder instance
+ */
+
+ public static Builder measurementByPOJO(final Class> clazz) {
+ Objects.requireNonNull(clazz, "clazz");
+ throwExceptionIfMissingAnnotation(clazz, Measurement.class);
+ String measurementName = findMeasurementName(clazz);
+ return new Builder(measurementName);
+ }
+
+ private static void throwExceptionIfMissingAnnotation(final Class> clazz,
+ final Class extends Annotation> expectedClass) {
+ if (!clazz.isAnnotationPresent(expectedClass)) {
+ throw new IllegalArgumentException("Class " + clazz.getName() + " is not annotated with @"
+ + Measurement.class.getSimpleName());
+ }
+ }
+
/**
* Builder for a new Point.
*
@@ -61,10 +98,11 @@ public static Builder measurement(final String measurement) {
*
*/
public static final class Builder {
+ private static final BigInteger NANOSECONDS_PER_SECOND = BigInteger.valueOf(1000000000L);
private final String measurement;
private final Map tags = new TreeMap<>();
- private Long time;
- private TimeUnit precision = TimeUnit.NANOSECONDS;
+ private Number time;
+ private TimeUnit precision;
private final Map fields = new TreeMap<>();
/**
@@ -84,8 +122,8 @@ public static final class Builder {
* @return the Builder instance.
*/
public Builder tag(final String tagName, final String value) {
- Preconditions.checkArgument(tagName != null);
- Preconditions.checkArgument(value != null);
+ Objects.requireNonNull(tagName, "tagName");
+ Objects.requireNonNull(value, "value");
if (!tagName.isEmpty() && !value.isEmpty()) {
tags.put(tagName, value);
}
@@ -121,20 +159,15 @@ public Builder field(final String field, Object value) {
if (value instanceof Number) {
if (value instanceof Byte) {
value = ((Byte) value).doubleValue();
- }
- if (value instanceof Short) {
+ } else if (value instanceof Short) {
value = ((Short) value).doubleValue();
- }
- if (value instanceof Integer) {
+ } else if (value instanceof Integer) {
value = ((Integer) value).doubleValue();
- }
- if (value instanceof Long) {
+ } else if (value instanceof Long) {
value = ((Long) value).doubleValue();
- }
- if (value instanceof BigInteger) {
+ } else if (value instanceof BigInteger) {
value = ((BigInteger) value).doubleValue();
}
-
}
fields.put(field, value);
return this;
@@ -155,15 +188,28 @@ public Builder addField(final String field, final double value) {
return this;
}
+ public Builder addField(final String field, final int value) {
+ fields.put(field, value);
+ return this;
+ }
+
+ public Builder addField(final String field, final float value) {
+ fields.put(field, value);
+ return this;
+ }
+
+ public Builder addField(final String field, final short value) {
+ fields.put(field, value);
+ return this;
+ }
+
public Builder addField(final String field, final Number value) {
fields.put(field, value);
return this;
}
public Builder addField(final String field, final String value) {
- if (value == null) {
- throw new IllegalArgumentException("Field value cannot be null");
- }
+ Objects.requireNonNull(value, "value");
fields.put(field, value);
return this;
@@ -184,42 +230,198 @@ public Builder fields(final Map fieldsToAdd) {
/**
* Add a time to this point.
*
- * @param precisionToSet
- * @param timeToSet
+ * @param timeToSet the time for this point
+ * @param precisionToSet the TimeUnit
* @return the Builder instance.
*/
- public Builder time(final long timeToSet, final TimeUnit precisionToSet) {
- Preconditions.checkNotNull(precisionToSet, "Precision must be not null!");
+ public Builder time(final Number timeToSet, final TimeUnit precisionToSet) {
+ Objects.requireNonNull(timeToSet, "timeToSet");
+ Objects.requireNonNull(precisionToSet, "precisionToSet");
this.time = timeToSet;
this.precision = precisionToSet;
return this;
}
+ /**
+ * Add a time to this point as long.
+ * only kept for binary compatibility with previous releases.
+ *
+ * @param timeToSet the time for this point as long
+ * @param precisionToSet the TimeUnit
+ * @return the Builder instance.
+ */
+ public Builder time(final long timeToSet, final TimeUnit precisionToSet) {
+ return time((Number) timeToSet, precisionToSet);
+ }
+
+ /**
+ * Add a time to this point as Long.
+ * only kept for binary compatibility with previous releases.
+ *
+ * @param timeToSet the time for this point as Long
+ * @param precisionToSet the TimeUnit
+ * @return the Builder instance.
+ */
+ public Builder time(final Long timeToSet, final TimeUnit precisionToSet) {
+ return time((Number) timeToSet, precisionToSet);
+ }
+
+ /**
+ * Does this builder contain any fields?
+ *
+ * @return true, if the builder contains any fields, false otherwise.
+ */
+ public boolean hasFields() {
+ return !fields.isEmpty();
+ }
+
+ /**
+ * Adds field map from object by reflection using {@link org.influxdb.annotation.Column}
+ * annotation.
+ *
+ * @param pojo POJO Object with annotation {@link org.influxdb.annotation.Column} on fields
+ * @return the Builder instance
+ */
+ public Builder addFieldsFromPOJO(final Object pojo) {
+
+ Class> clazz = pojo.getClass();
+ Measurement measurement = clazz.getAnnotation(Measurement.class);
+ boolean allFields = measurement != null && measurement.allFields();
+
+ while (clazz != null) {
+
+ TypeMapper typeMapper = TypeMapper.empty();
+ while (clazz != null) {
+ for (Field field : clazz.getDeclaredFields()) {
+
+ Column column = field.getAnnotation(Column.class);
+
+ if (column == null && !(allFields
+ && !field.isAnnotationPresent(Exclude.class) && !Modifier.isStatic(field.getModifiers()))) {
+ continue;
+ }
+
+ field.setAccessible(true);
+
+ String fieldName;
+ if (column != null && !column.name().isEmpty()) {
+ fieldName = column.name();
+ } else {
+ fieldName = field.getName();
+ }
+
+ addFieldByAttribute(pojo, field, column != null && column.tag(), fieldName, typeMapper);
+ }
+
+ Class> superclass = clazz.getSuperclass();
+ Type genericSuperclass = clazz.getGenericSuperclass();
+ if (genericSuperclass instanceof ParameterizedType) {
+ typeMapper = TypeMapper.of((ParameterizedType) genericSuperclass, superclass);
+ } else {
+ typeMapper = TypeMapper.empty();
+ }
+
+ clazz = superclass;
+ }
+ }
+
+ if (this.fields.isEmpty()) {
+ throw new BuilderException("Class " + pojo.getClass().getName()
+ + " has no @" + Column.class.getSimpleName() + " annotation");
+ }
+
+ return this;
+ }
+
+ private void addFieldByAttribute(final Object pojo, final Field field, final boolean tag,
+ final String fieldName, final TypeMapper typeMapper) {
+ try {
+ Object fieldValue = field.get(pojo);
+
+ TimeColumn tc = field.getAnnotation(TimeColumn.class);
+ Class> fieldType = (Class>) typeMapper.resolve(field.getGenericType());
+ if (tc != null) {
+ if (Instant.class.isAssignableFrom(fieldType)) {
+ Optional.ofNullable((Instant) fieldValue).ifPresent(instant -> {
+ TimeUnit timeUnit = tc.timeUnit();
+ if (timeUnit == TimeUnit.NANOSECONDS || timeUnit == TimeUnit.MICROSECONDS) {
+ this.time = BigInteger.valueOf(instant.getEpochSecond())
+ .multiply(NANOSECONDS_PER_SECOND)
+ .add(BigInteger.valueOf(instant.getNano()))
+ .divide(BigInteger.valueOf(TimeUnit.NANOSECONDS.convert(1, timeUnit)));
+ } else {
+ this.time = timeUnit.convert(instant.toEpochMilli(), TimeUnit.MILLISECONDS);
+ }
+ this.precision = timeUnit;
+ });
+ return;
+ }
+
+ throw new InfluxDBMapperException(
+ "Unsupported type " + fieldType + " for time: should be of Instant type");
+ }
+
+ if (tag) {
+ if (fieldValue != null) {
+ this.tags.put(fieldName, (String) fieldValue);
+ }
+ } else {
+ if (fieldValue != null) {
+ setField(fieldType, fieldName, fieldValue);
+ }
+ }
+
+ } catch (IllegalArgumentException | IllegalAccessException e) {
+ // Can not happen since we use metadata got from the object
+ throw new BuilderException(
+ "Field " + fieldName + " could not found on class " + pojo.getClass().getSimpleName());
+ }
+ }
+
/**
* Create a new Point.
*
* @return the newly created Point.
*/
public Point build() {
- Preconditions
- .checkArgument(!Strings.isNullOrEmpty(this.measurement),
- "Point name must not be null or empty.");
- Preconditions
- .checkArgument(this.fields.size() > 0,
- "Point must have at least one field specified.");
+ Preconditions.checkNonEmptyString(this.measurement, "measurement");
+ Preconditions.checkPositiveNumber(this.fields.size(), "fields size");
Point point = new Point();
point.setFields(this.fields);
point.setMeasurement(this.measurement);
if (this.time != null) {
point.setTime(this.time);
point.setPrecision(this.precision);
- } else {
- point.setTime(System.currentTimeMillis());
- point.setPrecision(TimeUnit.MILLISECONDS);
}
point.setTags(this.tags);
return point;
}
+
+ private void setField(
+ final Class> fieldType,
+ final String columnName,
+ final Object value) {
+ if (boolean.class.isAssignableFrom(fieldType) || Boolean.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (boolean) value);
+ } else if (long.class.isAssignableFrom(fieldType) || Long.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (long) value);
+ } else if (double.class.isAssignableFrom(fieldType) || Double.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (double) value);
+ } else if (float.class.isAssignableFrom(fieldType) || Float.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (float) value);
+ } else if (int.class.isAssignableFrom(fieldType) || Integer.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (int) value);
+ } else if (short.class.isAssignableFrom(fieldType) || Short.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (short) value);
+ } else if (String.class.isAssignableFrom(fieldType)) {
+ addField(columnName, (String) value);
+ } else if (Enum.class.isAssignableFrom(fieldType)) {
+ addField(columnName, ((Enum>) value).name());
+ } else {
+ throw new InfluxDBMapperException(
+ "Unsupported type " + fieldType + " for column " + columnName);
+ }
+ }
}
/**
@@ -234,7 +436,7 @@ void setMeasurement(final String measurement) {
* @param time
* the time to set
*/
- void setTime(final Long time) {
+ void setTime(final Number time) {
this.time = time;
}
@@ -261,6 +463,13 @@ void setPrecision(final TimeUnit precision) {
this.precision = precision;
}
+ /**
+ * @return the fields
+ */
+ Map getFields() {
+ return this.fields;
+ }
+
/**
* @param fields
* the fields to set
@@ -298,12 +507,16 @@ public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Point [name=");
builder.append(this.measurement);
- builder.append(", time=");
- builder.append(this.time);
+ if (this.time != null) {
+ builder.append(", time=");
+ builder.append(this.time);
+ }
builder.append(", tags=");
builder.append(this.tags);
- builder.append(", precision=");
- builder.append(this.precision);
+ if (this.precision != null) {
+ builder.append(", precision=");
+ builder.append(this.precision);
+ }
builder.append(", fields=");
builder.append(this.fields);
builder.append("]");
@@ -311,78 +524,169 @@ public String toString() {
}
/**
- * calculate the lineprotocol entry for a single Point.
- *
- * Documentation is WIP : https://github.com/influxdb/influxdb/pull/2997
+ * Calculate the lineprotocol entry for a single Point.
+ *
+ * NaN and infinity values are silently dropped as they are unsupported:
+ * https://github.com/influxdata/influxdb/issues/4089
*
- * https://github.com/influxdb/influxdb/blob/master/tsdb/README.md
+ * @see
+ * InfluxDB line protocol reference
*
- * @return the String without newLine.
+ * @return the String without newLine, empty when there are no fields to write
*/
public String lineProtocol() {
- final StringBuilder sb = new StringBuilder();
- sb.append(KEY_ESCAPER.escape(this.measurement));
- sb.append(concatenatedTags());
- sb.append(concatenateFields());
- sb.append(formatedTime());
+ return lineProtocol(null);
+ }
+
+ /**
+ * Calculate the lineprotocol entry for a single point, using a specific {@link TimeUnit} for the timestamp.
+ *
+ * NaN and infinity values are silently dropped as they are unsupported:
+ * https://github.com/influxdata/influxdb/issues/4089
+ *
+ * @see
+ * InfluxDB line protocol reference
+ *
+ * @param precision the time precision unit for this point
+ * @return the String without newLine, empty when there are no fields to write
+ */
+ public String lineProtocol(final TimeUnit precision) {
+
+ // setLength(0) is used for reusing cached StringBuilder instance per thread
+ // it reduces GC activity and performs better then new StringBuilder()
+ StringBuilder sb = CACHED_STRINGBUILDERS.get();
+ if (sb.capacity() > MAX_STRING_BUILDER_SIZE) {
+ sb = new StringBuilder(DEFAULT_STRING_BUILDER_SIZE);
+ CACHED_STRINGBUILDERS.set(sb);
+ } else {
+ sb.setLength(0);
+ }
+
+ escapeKey(sb, measurement);
+ concatenatedTags(sb);
+ int writtenFields = concatenatedFields(sb);
+ if (writtenFields == 0) {
+ return "";
+ }
+ formatedTime(sb, precision);
+
return sb.toString();
}
- private StringBuilder concatenatedTags() {
- final StringBuilder sb = new StringBuilder();
+ private void concatenatedTags(final StringBuilder sb) {
for (Entry tag : this.tags.entrySet()) {
- sb.append(",")
- .append(KEY_ESCAPER.escape(tag.getKey()))
- .append("=")
- .append(KEY_ESCAPER.escape(tag.getValue()));
+ sb.append(',');
+ escapeKey(sb, tag.getKey());
+ sb.append('=');
+ escapeKey(sb, tag.getValue());
}
- sb.append(" ");
- return sb;
+ sb.append(' ');
}
- private StringBuilder concatenateFields() {
- final StringBuilder sb = new StringBuilder();
- final int fieldCount = this.fields.size();
- int loops = 0;
-
- NumberFormat numberFormat = NumberFormat.getInstance(Locale.ENGLISH);
- numberFormat.setMaximumFractionDigits(MAX_FRACTION_DIGITS);
- numberFormat.setGroupingUsed(false);
- numberFormat.setMinimumFractionDigits(1);
-
+ private int concatenatedFields(final StringBuilder sb) {
+ int fieldCount = 0;
for (Entry field : this.fields.entrySet()) {
- loops++;
Object value = field.getValue();
- if (value == null) {
+ if (value == null || isNotFinite(value)) {
continue;
}
-
- sb.append(KEY_ESCAPER.escape(field.getKey())).append("=");
- if (value instanceof String) {
- String stringValue = (String) value;
- sb.append("\"").append(FIELD_ESCAPER.escape(stringValue)).append("\"");
- } else if (value instanceof Number) {
+ escapeKey(sb, field.getKey());
+ sb.append('=');
+ if (value instanceof Number) {
if (value instanceof Double || value instanceof Float || value instanceof BigDecimal) {
- sb.append(numberFormat.format(value));
+ sb.append(NUMBER_FORMATTER.get().format(value));
} else {
- sb.append(value).append("i");
+ sb.append(value).append('i');
}
+ } else if (value instanceof String) {
+ String stringValue = (String) value;
+ sb.append('"');
+ escapeField(sb, stringValue);
+ sb.append('"');
} else {
sb.append(value);
}
- if (loops < fieldCount) {
- sb.append(",");
+ sb.append(',');
+
+ fieldCount++;
+ }
+
+ // efficiently chop off the trailing comma
+ int lengthMinusOne = sb.length() - 1;
+ if (sb.charAt(lengthMinusOne) == ',') {
+ sb.setLength(lengthMinusOne);
+ }
+
+ return fieldCount;
+ }
+
+ static void escapeKey(final StringBuilder sb, final String key) {
+ for (int i = 0; i < key.length(); i++) {
+ switch (key.charAt(i)) {
+ case ' ':
+ case ',':
+ case '=':
+ sb.append('\\');
+ default:
+ sb.append(key.charAt(i));
+ }
+ }
+ }
+
+ static void escapeField(final StringBuilder sb, final String field) {
+ for (int i = 0; i < field.length(); i++) {
+ switch (field.charAt(i)) {
+ case '\\':
+ case '\"':
+ sb.append('\\');
+ default:
+ sb.append(field.charAt(i));
}
}
+ }
- return sb;
+ private static boolean isNotFinite(final Object value) {
+ return value instanceof Double && !Double.isFinite((Double) value)
+ || value instanceof Float && !Float.isFinite((Float) value);
}
- private StringBuilder formatedTime() {
- final StringBuilder sb = new StringBuilder();
- sb.append(" ").append(TimeUnit.NANOSECONDS.convert(this.time, this.precision));
- return sb;
+ private void formatedTime(final StringBuilder sb, final TimeUnit precision) {
+ if (this.time == null) {
+ return;
+ }
+ TimeUnit converterPrecision = precision;
+
+ if (converterPrecision == null) {
+ converterPrecision = TimeUnit.NANOSECONDS;
+ }
+ if (this.time instanceof BigInteger) {
+ BigInteger time = (BigInteger) this.time;
+ long conversionFactor = converterPrecision.convert(1, this.precision);
+ if (conversionFactor >= 1) {
+ time = time.multiply(BigInteger.valueOf(conversionFactor));
+ } else {
+ conversionFactor = this.precision.convert(1, converterPrecision);
+ time = time.divide(BigInteger.valueOf(conversionFactor));
+ }
+ sb.append(" ").append(time);
+ } else if (this.time instanceof BigDecimal) {
+ BigDecimal time = (BigDecimal) this.time;
+ long conversionFactor = converterPrecision.convert(1, this.precision);
+ if (conversionFactor >= 1) {
+ time = time.multiply(BigDecimal.valueOf(conversionFactor));
+ } else {
+ conversionFactor = this.precision.convert(1, converterPrecision);
+ time = time.divide(BigDecimal.valueOf(conversionFactor), RoundingMode.HALF_UP);
+ }
+ sb.append(" ").append(time.toBigInteger());
+ } else {
+ sb.append(" ").append(converterPrecision.convert(this.time.longValue(), this.precision));
+ }
}
+
+ private static String findMeasurementName(final Class> clazz) {
+ return clazz.getAnnotation(Measurement.class).name();
+ }
}
diff --git a/src/main/java/org/influxdb/dto/Pong.java b/src/main/java/org/influxdb/dto/Pong.java
index 22ab79e33..4aa041e41 100644
--- a/src/main/java/org/influxdb/dto/Pong.java
+++ b/src/main/java/org/influxdb/dto/Pong.java
@@ -1,7 +1,5 @@
package org.influxdb.dto;
-import com.google.common.base.MoreObjects;
-
/**
* Representation of the response for a influxdb ping.
*
@@ -11,6 +9,7 @@
public class Pong {
private String version;
private long responseTime;
+ private static final String UNKNOWN_VERSION = "unknown";
/**
* @return the status
@@ -27,6 +26,15 @@ public void setVersion(final String version) {
this.version = version;
}
+ /**
+ * Good or bad connection status.
+ *
+ * @return true if the version of influxdb is not unknown.
+ */
+ public boolean isGood() {
+ return !UNKNOWN_VERSION.equalsIgnoreCase(version);
+ }
+
/**
* @return the responseTime
*/
@@ -47,11 +55,7 @@ public void setResponseTime(final long responseTime) {
*/
@Override
public String toString() {
- return MoreObjects
- .toStringHelper(this.getClass())
- .add("version", this.version)
- .add("responseTime", this.responseTime)
- .toString();
+ return "Pong{version=" + version + ", responseTime=" + responseTime + "}";
}
}
diff --git a/src/main/java/org/influxdb/dto/Query.java b/src/main/java/org/influxdb/dto/Query.java
index 6305c5942..ebed08e7e 100644
--- a/src/main/java/org/influxdb/dto/Query.java
+++ b/src/main/java/org/influxdb/dto/Query.java
@@ -1,8 +1,18 @@
package org.influxdb.dto;
+import com.squareup.moshi.JsonWriter;
+import okio.Buffer;
+import org.influxdb.InfluxDBIOException;
+import org.influxdb.querybuilder.Appendable;
+
+import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
+import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
/**
* Represents a Query against Influxdb.
@@ -15,18 +25,26 @@ public class Query {
private final String command;
private final String database;
private final boolean requiresPost;
+ protected final Map params = new HashMap<>();
/**
- * @param command
- * @param database
+ * @param command the query command
+ */
+ public Query(final String command) {
+ this(command, null);
+ }
+
+ /**
+ * @param command the query command
+ * @param database the database to query
*/
public Query(final String command, final String database) {
this(command, database, false);
}
/**
- * @param command
- * @param database
+ * @param command the query command
+ * @param database the database to query
* @param requiresPost true if the command requires a POST instead of GET to influxdb
*/
public Query(final String command, final String database, final boolean requiresPost) {
@@ -40,59 +58,64 @@ public Query(final String command, final String database, final boolean requires
* @return the command
*/
public String getCommand() {
- return this.command;
+ return command;
}
/**
* @return url encoded command
*/
public String getCommandWithUrlEncoded() {
- return encode(this.command);
+ return encode(command);
}
/**
* @return the database
*/
public String getDatabase() {
- return this.database;
+ return database;
}
public boolean requiresPost() {
return requiresPost;
}
- @SuppressWarnings("checkstyle:avoidinlineconditionals")
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((command == null) ? 0 : command.hashCode());
- result = prime * result
- + ((database == null) ? 0 : database.hashCode());
- return result;
+ public Query bindParameter(final String placeholder, final Object value) {
+ params.put(placeholder, value);
+ return this;
+ }
+
+ public boolean hasBoundParameters() {
+ return !params.isEmpty();
+ }
+
+ public String getParameterJsonWithUrlEncoded() {
+ try {
+ String jsonParameterObject = createJsonObject(params);
+ String urlEncodedJsonParameterObject = encode(jsonParameterObject);
+ return urlEncodedJsonParameterObject;
+ } catch (IOException e) {
+ throw new InfluxDBIOException(e);
+ }
}
- @SuppressWarnings("checkstyle:needbraces")
@Override
- public boolean equals(final Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- Query other = (Query) obj;
- if (command == null) {
- if (other.command != null)
- return false;
- } else if (!command.equals(other.command))
- return false;
- if (database == null) {
- if (other.database != null)
- return false;
- } else if (!database.equals(other.database))
+ public boolean equals(final Object o) {
+ if (o == null || getClass() != o.getClass()) {
return false;
- return true;
+ }
+
+ Query query = (Query) o;
+ return Objects.equals(command, query.command) && Objects.equals(database, query.database) && params.equals(
+ query.params);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = Objects.hashCode(command);
+ result = prime * result + Objects.hashCode(database);
+ result = prime * result + params.hashCode();
+ return result;
}
/**
@@ -105,7 +128,33 @@ public static String encode(final String command) {
try {
return URLEncoder.encode(command, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e);
+ throw new IllegalStateException("Every JRE must support UTF-8", e);
+ }
+ }
+
+ private String createJsonObject(final Map parameterMap) throws IOException {
+ Buffer b = new Buffer();
+ JsonWriter writer = JsonWriter.of(b);
+ writer.beginObject();
+ for (Map.Entry pair : parameterMap.entrySet()) {
+ String name = pair.getKey();
+ Object value = pair.getValue();
+ if (value instanceof Number) {
+ Number number = (Number) value;
+ writer.name(name).value(number);
+ } else if (value instanceof String) {
+ writer.name(name).value((String) value);
+ } else if (value instanceof Boolean) {
+ writer.name(name).value((Boolean) value);
+ } else if (value instanceof Appendable) {
+ StringBuilder stringBuilder = new StringBuilder();
+ ((Appendable) value).appendTo(stringBuilder);
+ writer.name(name).value(stringBuilder.toString());
+ } else {
+ writer.name(name).value(String.valueOf(value));
+ }
}
+ writer.endObject();
+ return b.readString(Charset.forName("utf-8"));
}
}
diff --git a/src/main/java/org/influxdb/example/Android.java b/src/main/java/org/influxdb/example/Android.java
new file mode 100644
index 000000000..f26495cd2
--- /dev/null
+++ b/src/main/java/org/influxdb/example/Android.java
@@ -0,0 +1,98 @@
+package org.influxdb.example;
+
+import org.influxdb.InfluxDB;
+import org.influxdb.InfluxDBFactory;
+import org.influxdb.dto.QueryResult;
+import org.influxdb.dto.Query;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author StrakarCe
+ * @since 07/05/2021
+ * @version 1
+ */
+public class Android {
+ // put the address IP of your database
+ String address = "http://192.168.1.75:8000/";
+ String dbName = "myDatabase";
+ String table = "SERIES";
+ QueryResult actual;
+ Boolean flag = false;
+ InfluxDB con;
+
+ public Android() {
+ super();
+ }
+ public void queryExecute(final Query query) {
+ Thread thread = new Thread(new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ //InfluxDB connector = InfluxDBFactory.connect(address);
+ // if you want to open every time
+ System.out.println("Send the query to the database ...");
+ // FOR A REAL APP CREATE A LOGGER ;
+ List results = new LinkedList<>();
+ actual = con.query(query);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ flag = true; // For simplicity, I use a simple flag to know when the thread have finished
+ }
+ });
+
+ thread.start();
+ }
+
+ /**
+ * It's to open the connexion with the database.
+ * In my case I decide to open once, do many query and close.
+ */
+ public void connexion() {
+ con = InfluxDBFactory.connect(address);
+ }
+ /**
+ * It's to close after my list of query.
+ */
+ public void close() {
+ con.close();
+ }
+ /*
+ * simple example of how you can create a query
+ */
+ private void queryLauncher(final String query) {
+ queryExecute(new Query(query, dbName));
+ while (!flag) { // ugly method to wait the thread
+ System.out.println("Wait the thread");
+ }
+ flag = false;
+ }
+ public String getEtat() {
+ queryLauncher("select last(value) from PTEC");
+ return actual.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString();
+ }
+ public String getHC() {
+ queryLauncher("SELECT last(value) FROM HCHC");
+ return actual.getResults().get(0).getSeries().get(0).getValues().get(0).get(1).toString();
+ }
+ // ------------------------- Example when you want to use it ------------
+ /*
+ Android test = new Android();
+ refresh.setOnClickListener(new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ test.connexion();
+ etat2.setText(test.getEtat());
+ hc2.setText(test.getHC());
+ hp2.setText(test.getHP());
+ prix2.setText(test.getDepense());
+ percMens2.setText(test.getPercentageMensuel());
+ percTotal2.setText(test.getPercentageTotal());
+ test.close();
+ }
+ });
+ */
+}
diff --git a/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java b/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java
new file mode 100644
index 000000000..ffa75af61
--- /dev/null
+++ b/src/main/java/org/influxdb/impl/BasicAuthInterceptor.java
@@ -0,0 +1,24 @@
+package org.influxdb.impl;
+
+import java.io.IOException;
+
+import okhttp3.Credentials;
+import okhttp3.Interceptor;
+import okhttp3.Request;
+import okhttp3.Response;
+
+public class BasicAuthInterceptor implements Interceptor {
+
+ private String credentials;
+
+ public BasicAuthInterceptor(final String user, final String password) {
+ credentials = Credentials.basic(user, password);
+ }
+
+ @Override
+ public Response intercept(final Chain chain) throws IOException {
+ Request request = chain.request();
+ Request authenticatedRequest = request.newBuilder().header("Authorization", credentials).build();
+ return chain.proceed(authenticatedRequest);
+ }
+}
diff --git a/src/main/java/org/influxdb/impl/BatchProcessor.java b/src/main/java/org/influxdb/impl/BatchProcessor.java
index cea38f0b4..28c45b693 100644
--- a/src/main/java/org/influxdb/impl/BatchProcessor.java
+++ b/src/main/java/org/influxdb/impl/BatchProcessor.java
@@ -1,9 +1,17 @@
package org.influxdb.impl;
+import org.influxdb.InfluxDB;
+import org.influxdb.InfluxDB.ConsistencyLevel;
+import org.influxdb.dto.BatchPoints;
+import org.influxdb.dto.Point;
+
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Objects;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
@@ -11,16 +19,11 @@
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
-import org.influxdb.InfluxDB;
-import org.influxdb.dto.BatchPoints;
-import org.influxdb.dto.Point;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
/**
* A BatchProcessor can be attached to a InfluxDB Instance to collect single point writes and
* aggregates them to BatchPoints to get a better write performance.
@@ -28,31 +31,48 @@
* @author stefan.majer [at] gmail.com
*
*/
-public class BatchProcessor {
+public final class BatchProcessor {
private static final Logger LOG = Logger.getLogger(BatchProcessor.class.getName());
protected final BlockingQueue queue;
private final ScheduledExecutorService scheduler;
private final BiConsumer, Throwable> exceptionHandler;
- final InfluxDBImpl influxDB;
+ final InfluxDB influxDB;
final int actions;
private final TimeUnit flushIntervalUnit;
private final int flushInterval;
+ private final ConsistencyLevel consistencyLevel;
+ private final int jitterInterval;
+ private final TimeUnit precision;
+ private final BatchWriter batchWriter;
+ private boolean dropActionsOnQueueExhaustion;
+ Consumer droppedActionHandler;
+ Supplier randomSupplier;
/**
* The Builder to create a BatchProcessor instance.
*/
public static final class Builder {
- private final InfluxDBImpl influxDB;
+ private final InfluxDB influxDB;
private ThreadFactory threadFactory = Executors.defaultThreadFactory();
private int actions;
private TimeUnit flushIntervalUnit;
private int flushInterval;
+ private int jitterInterval;
+ // this is a default value if the InfluxDb.enableBatch(BatchOptions) IS NOT used
+ // the reason is backward compatibility
+ private int bufferLimit = 0;
+ private TimeUnit precision;
+
private BiConsumer, Throwable> exceptionHandler = (entries, throwable) -> { };
+ private ConsistencyLevel consistencyLevel;
+ private boolean dropActionsOnQueueExhaustion;
+ private Consumer droppedActionsHandler;
/**
* @param threadFactory
* is optional.
+ * @return this Builder to use it fluent
*/
public Builder threadFactory(final ThreadFactory threadFactory) {
this.threadFactory = threadFactory;
@@ -64,7 +84,7 @@ public Builder threadFactory(final ThreadFactory threadFactory) {
* is mandatory.
*/
public Builder(final InfluxDB influxDB) {
- this.influxDB = (InfluxDBImpl) influxDB;
+ this.influxDB = influxDB;
}
/**
@@ -95,6 +115,37 @@ public Builder interval(final int interval, final TimeUnit unit) {
return this;
}
+ /**
+ * The interval at which at least should issued a write.
+ *
+ * @param flushInterval
+ * the flush interval
+ * @param jitterInterval
+ * the flush jitter interval
+ * @param unit
+ * the TimeUnit of the interval
+ *
+ * @return this Builder to use it fluent
+ */
+ public Builder interval(final int flushInterval, final int jitterInterval, final TimeUnit unit) {
+ this.flushInterval = flushInterval;
+ this.jitterInterval = jitterInterval;
+ this.flushIntervalUnit = unit;
+ return this;
+ }
+
+ /**
+ * A buffer for failed writes so that the writes will be retried later on. When the buffer is full and
+ * new points are written, oldest entries in the buffer are lost.
+ *
+ * @param bufferLimit maximum number of points stored in the buffer
+ * @return this Builder to use it fluent
+ */
+ public Builder bufferLimit(final int bufferLimit) {
+ this.bufferLimit = bufferLimit;
+ return this;
+ }
+
/**
* A callback to be used when an error occurs during a batchwrite.
*
@@ -108,20 +159,86 @@ public Builder exceptionHandler(final BiConsumer, Throwable> han
return this;
}
+ /**
+ * To define the behaviour when the action queue exhausts. If unspecified, will default to false which means that
+ * the {@link InfluxDB#write(Point)} will be blocked till the space in the queue is created.
+ * true means that the newer actions being written to the queue will dropped and
+ * {@link BatchProcessor#droppedActionHandler} will be called.
+ *
+ * @param dropActionsOnQueueExhaustion
+ * the dropActionsOnQueueExhaustion
+ *
+ * @return this Builder to use it fluent
+ */
+ public Builder dropActionsOnQueueExhaustion(final boolean dropActionsOnQueueExhaustion) {
+ this.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion;
+ return this;
+ }
+
+ /**
+ * A callback to be used when an actions are dropped on action queue exhaustion.
+ *
+ * @param handler
+ * the handler
+ *
+ * @return this Builder to use it fluent
+ */
+ public Builder droppedActionHandler(final Consumer handler) {
+ this.droppedActionsHandler = handler;
+ return this;
+ }
+
+
+
+ /**
+ * Consistency level for batch write.
+ *
+ * @param consistencyLevel
+ * the consistencyLevel
+ *
+ * @return this Builder to use it fluent
+ */
+ public Builder consistencyLevel(final ConsistencyLevel consistencyLevel) {
+ this.consistencyLevel = consistencyLevel;
+ return this;
+ }
+
+ /**
+ * Set the time precision to use for the batch.
+ *
+ * @param precision
+ * the precision
+ *
+ * @return this Builder to use it fluent
+ */
+ public Builder precision(final TimeUnit precision) {
+ this.precision = precision;
+ return this;
+ }
+
/**
* Create the BatchProcessor.
*
* @return the BatchProcessor instance.
*/
public BatchProcessor build() {
- Preconditions.checkNotNull(this.influxDB, "influxDB may not be null");
- Preconditions.checkArgument(this.actions > 0, "actions should > 0");
- Preconditions.checkArgument(this.flushInterval > 0, "flushInterval should > 0");
- Preconditions.checkNotNull(this.flushIntervalUnit, "flushIntervalUnit may not be null");
- Preconditions.checkNotNull(this.threadFactory, "threadFactory may not be null");
- Preconditions.checkNotNull(this.exceptionHandler, "exceptionHandler may not be null");
- return new BatchProcessor(this.influxDB, this.threadFactory, this.actions, this.flushIntervalUnit,
- this.flushInterval, exceptionHandler);
+ Objects.requireNonNull(this.influxDB, "influxDB");
+ Preconditions.checkPositiveNumber(this.actions, "actions");
+ Preconditions.checkPositiveNumber(this.flushInterval, "flushInterval");
+ Preconditions.checkNotNegativeNumber(jitterInterval, "jitterInterval");
+ Preconditions.checkNotNegativeNumber(bufferLimit, "bufferLimit");
+ Objects.requireNonNull(this.flushIntervalUnit, "flushIntervalUnit");
+ Objects.requireNonNull(this.threadFactory, "threadFactory");
+ Objects.requireNonNull(this.exceptionHandler, "exceptionHandler");
+ BatchWriter batchWriter;
+ if (this.bufferLimit > this.actions) {
+ batchWriter = new RetryCapableBatchWriter(this.influxDB, this.exceptionHandler, this.bufferLimit, this.actions);
+ } else {
+ batchWriter = new OneShotBatchWriter(this.influxDB);
+ }
+ return new BatchProcessor(this.influxDB, batchWriter, this.threadFactory, this.actions, this.flushIntervalUnit,
+ this.flushInterval, this.jitterInterval, exceptionHandler, this.consistencyLevel,
+ this.precision, this.dropActionsOnQueueExhaustion, this.droppedActionsHandler);
}
}
@@ -180,41 +297,58 @@ public static Builder builder(final InfluxDB influxDB) {
return new Builder(influxDB);
}
- BatchProcessor(final InfluxDBImpl influxDB, final ThreadFactory threadFactory, final int actions,
- final TimeUnit flushIntervalUnit, final int flushInterval,
- final BiConsumer, Throwable> exceptionHandler) {
+ BatchProcessor(final InfluxDB influxDB, final BatchWriter batchWriter, final ThreadFactory threadFactory,
+ final int actions, final TimeUnit flushIntervalUnit, final int flushInterval, final int jitterInterval,
+ final BiConsumer, Throwable> exceptionHandler,
+ final ConsistencyLevel consistencyLevel, final TimeUnit precision,
+ final boolean dropActionsOnQueueExhaustion, final Consumer droppedActionHandler) {
super();
this.influxDB = influxDB;
+ this.batchWriter = batchWriter;
this.actions = actions;
this.flushIntervalUnit = flushIntervalUnit;
this.flushInterval = flushInterval;
+ this.jitterInterval = jitterInterval;
this.scheduler = Executors.newSingleThreadScheduledExecutor(threadFactory);
this.exceptionHandler = exceptionHandler;
+ this.consistencyLevel = consistencyLevel;
+ this.precision = precision;
+ this.dropActionsOnQueueExhaustion = dropActionsOnQueueExhaustion;
+ this.droppedActionHandler = droppedActionHandler;
if (actions > 1 && actions < Integer.MAX_VALUE) {
this.queue = new LinkedBlockingQueue<>(actions);
} else {
this.queue = new LinkedBlockingQueue<>();
}
- // Flush at specified Rate
- this.scheduler.scheduleAtFixedRate(new Runnable() {
+ this.randomSupplier = Math::random;
+
+ Runnable flushRunnable = new Runnable() {
@Override
public void run() {
+ // write doesn't throw any exceptions
write();
+ int jitterInterval = (int) (randomSupplier.get() * BatchProcessor.this.jitterInterval);
+ BatchProcessor.this.scheduler.schedule(this,
+ BatchProcessor.this.flushInterval + jitterInterval, BatchProcessor.this.flushIntervalUnit);
}
- }, this.flushInterval, this.flushInterval, this.flushIntervalUnit);
-
+ };
+ // Flush at specified Rate
+ this.scheduler.schedule(flushRunnable,
+ this.flushInterval + (int) (randomSupplier.get() * BatchProcessor.this.jitterInterval),
+ this.flushIntervalUnit);
}
void write() {
List currentBatch = null;
try {
if (this.queue.isEmpty()) {
+ BatchProcessor.this.batchWriter.write(Collections.emptyList());
return;
}
//for batch on HTTP.
- Map batchKeyToBatchPoints = Maps.newHashMap();
+ Map batchKeyToBatchPoints = new HashMap<>();
//for batch on UDP.
- Map> udpPortToBatchPoints = Maps.newHashMap();
+ Map> udpPortToBatchPoints = new HashMap<>();
List batchEntries = new ArrayList<>(this.queue.size());
this.queue.drainTo(batchEntries);
currentBatch = new ArrayList<>(batchEntries.size());
@@ -229,7 +363,8 @@ void write() {
String batchKey = dbName + "_" + rp;
if (!batchKeyToBatchPoints.containsKey(batchKey)) {
BatchPoints batchPoints = BatchPoints.database(dbName)
- .retentionPolicy(rp).build();
+ .retentionPolicy(rp).consistency(getConsistencyLevel())
+ .precision(getPrecision()).build();
batchKeyToBatchPoints.put(batchKey, batchPoints);
}
batchKeyToBatchPoints.get(batchKey).point(point);
@@ -244,9 +379,8 @@ void write() {
}
}
- for (BatchPoints batchPoints : batchKeyToBatchPoints.values()) {
- BatchProcessor.this.influxDB.write(batchPoints);
- }
+ BatchProcessor.this.batchWriter.write(batchKeyToBatchPoints.values());
+
for (Entry> entry : udpPortToBatchPoints.entrySet()) {
for (String lineprotocolStr : entry.getValue()) {
BatchProcessor.this.influxDB.write(entry.getKey(), lineprotocolStr);
@@ -267,7 +401,14 @@ void write() {
*/
void put(final AbstractBatchEntry batchEntry) {
try {
- this.queue.put(batchEntry);
+ if (this.dropActionsOnQueueExhaustion) {
+ if (!this.queue.offer(batchEntry)) {
+ this.droppedActionHandler.accept(batchEntry.getPoint());
+ return;
+ }
+ } else {
+ this.queue.put(batchEntry);
+ }
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
@@ -289,6 +430,7 @@ public void run() {
void flushAndShutdown() {
this.write();
this.scheduler.shutdown();
+ this.batchWriter.close();
}
/**
@@ -297,4 +439,24 @@ void flushAndShutdown() {
void flush() {
this.write();
}
+
+ public ConsistencyLevel getConsistencyLevel() {
+ return consistencyLevel;
+ }
+
+ public TimeUnit getPrecision() {
+ return precision;
+ }
+
+ BatchWriter getBatchWriter() {
+ return batchWriter;
+ }
+
+ public boolean isDropActionsOnQueueExhaustion() {
+ return dropActionsOnQueueExhaustion;
+ }
+
+ public Consumer getDroppedActionHandler() {
+ return droppedActionHandler;
+ }
}
diff --git a/src/main/java/org/influxdb/impl/BatchWriter.java b/src/main/java/org/influxdb/impl/BatchWriter.java
new file mode 100644
index 000000000..2a71ebddd
--- /dev/null
+++ b/src/main/java/org/influxdb/impl/BatchWriter.java
@@ -0,0 +1,22 @@
+package org.influxdb.impl;
+
+import org.influxdb.dto.BatchPoints;
+
+import java.util.Collection;
+
+/**
+ * Write individual batches to InfluxDB.
+ */
+interface BatchWriter {
+ /**
+ * Write the given batch into InfluxDB.
+ * @param batchPointsCollection to write
+ */
+ void write(Collection batchPointsCollection);
+
+ /**
+ * FLush all cached writes into InfluxDB. The application is about to exit.
+ */
+ void close();
+}
+
diff --git a/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java b/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java
index 8969780d9..adaa3d528 100644
--- a/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java
+++ b/src/main/java/org/influxdb/impl/GzipRequestInterceptor.java
@@ -2,6 +2,7 @@
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Pattern;
import okhttp3.Interceptor;
import okhttp3.MediaType;
@@ -19,6 +20,8 @@
*/
final class GzipRequestInterceptor implements Interceptor {
+ private static final Pattern WRITE_PATTERN = Pattern.compile(".*/write", Pattern.CASE_INSENSITIVE);
+
private AtomicBoolean enabled = new AtomicBoolean(false);
GzipRequestInterceptor() {
@@ -48,6 +51,10 @@ public Response intercept(final Interceptor.Chain chain) throws IOException {
return chain.proceed(originalRequest);
}
+ if (!WRITE_PATTERN.matcher(originalRequest.url().encodedPath()).matches()) {
+ return chain.proceed(originalRequest);
+ }
+
Request compressedRequest = originalRequest.newBuilder().header("Content-Encoding", "gzip")
.method(originalRequest.method(), gzip(body)).build();
return chain.proceed(compressedRequest);
diff --git a/src/main/java/org/influxdb/impl/InfluxDBImpl.java b/src/main/java/org/influxdb/impl/InfluxDBImpl.java
index 884787cab..23427a23d 100644
--- a/src/main/java/org/influxdb/impl/InfluxDBImpl.java
+++ b/src/main/java/org/influxdb/impl/InfluxDBImpl.java
@@ -1,54 +1,61 @@
package org.influxdb.impl;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
-
-import org.influxdb.InfluxDB;
-import org.influxdb.dto.BatchPoints;
-import org.influxdb.dto.Point;
-import org.influxdb.dto.Pong;
-import org.influxdb.dto.Query;
-import org.influxdb.dto.QueryResult;
-import org.influxdb.impl.BatchProcessor.HttpBatchEntry;
-import org.influxdb.impl.BatchProcessor.UdpBatchEntry;
-
import okhttp3.Headers;
-import okhttp3.HttpUrl;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
+import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import okhttp3.logging.HttpLoggingInterceptor;
import okhttp3.logging.HttpLoggingInterceptor.Level;
import okio.BufferedSource;
+import org.influxdb.BatchOptions;
+import org.influxdb.InfluxDB;
+import org.influxdb.InfluxDBException;
+import org.influxdb.InfluxDBIOException;
+import org.influxdb.dto.BatchPoints;
+import org.influxdb.dto.Point;
+import org.influxdb.dto.Pong;
+import org.influxdb.dto.Query;
+import org.influxdb.dto.QueryResult;
+import org.influxdb.impl.BatchProcessor.HttpBatchEntry;
+import org.influxdb.impl.BatchProcessor.UdpBatchEntry;
+import org.influxdb.msgpack.MessagePackConverterFactory;
+import org.influxdb.msgpack.MessagePackTraverser;
import retrofit2.Call;
import retrofit2.Callback;
+import retrofit2.Converter.Factory;
import retrofit2.Response;
import retrofit2.Retrofit;
import retrofit2.converter.moshi.MoshiConverterFactory;
import java.io.EOFException;
import java.io.IOException;
+import java.io.InputStream;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.SocketException;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.LongAdder;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
/**
* Implementation of a InluxDB API.
@@ -56,51 +63,181 @@
* @author stefan.majer [at] gmail.com
*/
public class InfluxDBImpl implements InfluxDB {
+
+ private static final String APPLICATION_MSGPACK = "application/x-msgpack";
+
static final okhttp3.MediaType MEDIA_TYPE_STRING = MediaType.parse("text/plain");
private static final String SHOW_DATABASE_COMMAND_ENCODED = Query.encode("SHOW DATABASES");
- private final InetAddress hostAddress;
- private final String username;
- private final String password;
+ /**
+ * This static constant holds the http logging log level expected in DEBUG mode
+ * It is set by System property {@code org.influxdb.InfluxDB.logLevel}.
+ *
+ * @see org.influxdb.InfluxDB#LOG_LEVEL_PROPERTY
+ */
+ private static final LogLevel LOG_LEVEL = LogLevel.parseLogLevel(System.getProperty(LOG_LEVEL_PROPERTY));
+
+ private final String hostName;
+ private String version;
private final Retrofit retrofit;
+ private final OkHttpClient client;
private final InfluxDBService influxDBService;
private BatchProcessor batchProcessor;
private final AtomicBoolean batchEnabled = new AtomicBoolean(false);
- private final AtomicLong writeCount = new AtomicLong();
- private final AtomicLong unBatchedCount = new AtomicLong();
- private final AtomicLong batchedCount = new AtomicLong();
+ private final LongAdder writeCount = new LongAdder();
+ private final LongAdder unBatchedCount = new LongAdder();
+ private final LongAdder batchedCount = new LongAdder();
private volatile DatagramSocket datagramSocket;
private final HttpLoggingInterceptor loggingInterceptor;
private final GzipRequestInterceptor gzipRequestInterceptor;
private LogLevel logLevel = LogLevel.NONE;
- private JsonAdapter adapter;
+ private String database;
+ private String retentionPolicy = "autogen";
+ private ConsistencyLevel consistency = ConsistencyLevel.ONE;
+ private final boolean messagePack;
+ private Boolean messagePackSupport;
+ private final ChunkProccesor chunkProccesor;
+
+ /**
+ * Constructs a new {@code InfluxDBImpl}.
+ *
+ * @param url
+ * The InfluxDB server API URL
+ * @param username
+ * The InfluxDB user name
+ * @param password
+ * The InfluxDB user password
+ * @param okHttpBuilder
+ * The OkHttp Client Builder
+ * @param responseFormat
+ * The {@code ResponseFormat} to use for response from InfluxDB
+ * server
+ */
+ public InfluxDBImpl(final String url, final String username, final String password,
+ final OkHttpClient.Builder okHttpBuilder, final ResponseFormat responseFormat) {
+ this(url, username, password, okHttpBuilder, new Retrofit.Builder(), responseFormat);
+ }
+
+ /**
+ * Constructs a new {@code InfluxDBImpl}.
+ *
+ * @param url
+ * The InfluxDB server API URL
+ * @param username
+ * The InfluxDB user name
+ * @param password
+ * The InfluxDB user password
+ * @param okHttpBuilder
+ * The OkHttp Client Builder
+ * @param retrofitBuilder
+ * The Retrofit Builder
+ * @param responseFormat
+ * The {@code ResponseFormat} to use for response from InfluxDB
+ * server
+ */
+ public InfluxDBImpl(final String url, final String username, final String password,
+ final OkHttpClient.Builder okHttpBuilder, final Retrofit.Builder retrofitBuilder,
+ final ResponseFormat responseFormat) {
+ this.messagePack = ResponseFormat.MSGPACK.equals(responseFormat);
+ this.hostName = parseHost(url);
+
+ this.loggingInterceptor = new HttpLoggingInterceptor();
+ setLogLevel(LOG_LEVEL);
+
+ this.gzipRequestInterceptor = new GzipRequestInterceptor();
+ OkHttpClient.Builder clonedOkHttpBuilder = okHttpBuilder.build().newBuilder()
+ .addInterceptor(loggingInterceptor)
+ .addInterceptor(gzipRequestInterceptor);
+ if (username != null && password != null) {
+ clonedOkHttpBuilder.addInterceptor(new BasicAuthInterceptor(username, password));
+ }
+ Factory converterFactory = null;
+ switch (responseFormat) {
+ case MSGPACK:
+ clonedOkHttpBuilder.addInterceptor(chain -> {
+ Request request = chain.request().newBuilder().addHeader("Accept", APPLICATION_MSGPACK).build();
+ return chain.proceed(request);
+ });
+
+ converterFactory = MessagePackConverterFactory.create();
+ chunkProccesor = new MessagePackChunkProccesor();
+ break;
+ case JSON:
+ default:
+ converterFactory = MoshiConverterFactory.create();
+
+ Moshi moshi = new Moshi.Builder().build();
+ JsonAdapter adapter = moshi.adapter(QueryResult.class);
+ chunkProccesor = new JSONChunkProccesor(adapter);
+ break;
+ }
+
+ this.client = clonedOkHttpBuilder.build();
+ Retrofit.Builder clonedRetrofitBuilder = retrofitBuilder.baseUrl(url).build().newBuilder();
+ this.retrofit = clonedRetrofitBuilder.client(this.client)
+ .addConverterFactory(converterFactory).build();
+ this.influxDBService = this.retrofit.create(InfluxDBService.class);
+
+ }
public InfluxDBImpl(final String url, final String username, final String password,
final OkHttpClient.Builder client) {
+ this(url, username, password, client, ResponseFormat.JSON);
+
+ }
+
+ InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client,
+ final InfluxDBService influxDBService, final JsonAdapter adapter) {
super();
- Moshi moshi = new Moshi.Builder().build();
- this.hostAddress = parseHostAddress(url);
- this.username = username;
- this.password = password;
+ this.messagePack = false;
+ this.hostName = parseHost(url);
+
this.loggingInterceptor = new HttpLoggingInterceptor();
- this.loggingInterceptor.setLevel(Level.NONE);
+ setLogLevel(LOG_LEVEL);
+
this.gzipRequestInterceptor = new GzipRequestInterceptor();
- this.retrofit = new Retrofit.Builder()
- .baseUrl(url)
- .client(client.addInterceptor(loggingInterceptor).addInterceptor(gzipRequestInterceptor).build())
- .addConverterFactory(MoshiConverterFactory.create())
- .build();
- this.influxDBService = this.retrofit.create(InfluxDBService.class);
- this.adapter = moshi.adapter(QueryResult.class);
+ OkHttpClient.Builder clonedBuilder = client.build().newBuilder()
+ .addInterceptor(loggingInterceptor)
+ .addInterceptor(gzipRequestInterceptor)
+ .addInterceptor(new BasicAuthInterceptor(username, password));
+ this.client = clonedBuilder.build();
+ this.retrofit = new Retrofit.Builder().baseUrl(url)
+ .client(this.client)
+ .addConverterFactory(MoshiConverterFactory.create()).build();
+ this.influxDBService = influxDBService;
+
+ chunkProccesor = new JSONChunkProccesor(adapter);
}
- private InetAddress parseHostAddress(final String url) {
- try {
- return InetAddress.getByName(HttpUrl.parse(url).host());
- } catch (UnknownHostException e) {
- throw new RuntimeException(e);
- }
+ public InfluxDBImpl(final String url, final String username, final String password, final OkHttpClient.Builder client,
+ final String database, final String retentionPolicy, final ConsistencyLevel consistency) {
+ this(url, username, password, client);
+
+ setConsistency(consistency);
+ setDatabase(database);
+ setRetentionPolicy(retentionPolicy);
+ }
+
+ private String parseHost(final String url) {
+ String hostName;
+ try {
+ URI uri = new URI(url);
+ hostName = uri.getHost();
+ } catch (URISyntaxException e1) {
+ throw new IllegalArgumentException("Unable to parse url: " + url, e1);
+ }
+
+ if (hostName == null) {
+ throw new IllegalArgumentException("Unable to parse url: " + url);
+ }
+
+ try {
+ InetAddress.getByName(hostName);
+ } catch (UnknownHostException e) {
+ throw new InfluxDBIOException(e);
+ }
+ return hostName;
}
@Override
@@ -151,6 +288,34 @@ public boolean isGzipEnabled() {
return this.gzipRequestInterceptor.isEnabled();
}
+ @Override
+ public InfluxDB enableBatch() {
+ enableBatch(BatchOptions.DEFAULTS);
+ return this;
+ }
+
+ @Override
+ public InfluxDB enableBatch(final BatchOptions batchOptions) {
+
+ if (this.batchEnabled.get()) {
+ throw new IllegalStateException("BatchProcessing is already enabled.");
+ }
+ this.batchProcessor = BatchProcessor
+ .builder(this)
+ .actions(batchOptions.getActions())
+ .exceptionHandler(batchOptions.getExceptionHandler())
+ .interval(batchOptions.getFlushDuration(), batchOptions.getJitterDuration(), TimeUnit.MILLISECONDS)
+ .threadFactory(batchOptions.getThreadFactory())
+ .bufferLimit(batchOptions.getBufferLimit())
+ .consistencyLevel(batchOptions.getConsistency())
+ .precision(batchOptions.getPrecision())
+ .dropActionsOnQueueExhaustion(batchOptions.isDropActionsOnQueueExhaustion())
+ .droppedActionHandler(batchOptions.getDroppedActionHandler())
+ .build();
+ this.batchEnabled.set(true);
+ return this;
+ }
+
@Override
public InfluxDB enableBatch(final int actions, final int flushDuration,
final TimeUnit flushDurationTimeUnit) {
@@ -165,10 +330,28 @@ public InfluxDB enableBatch(final int actions, final int flushDuration,
return this;
}
+ @Override
+ public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit,
+ final ThreadFactory threadFactory,
+ final BiConsumer, Throwable> exceptionHandler,
+ final ConsistencyLevel consistency) {
+ enableBatch(actions, flushDuration, flushDurationTimeUnit, threadFactory, exceptionHandler)
+ .setConsistency(consistency);
+ return this;
+ }
+
@Override
public InfluxDB enableBatch(final int actions, final int flushDuration, final TimeUnit flushDurationTimeUnit,
final ThreadFactory threadFactory,
final BiConsumer, Throwable> exceptionHandler) {
+ enableBatch(actions, flushDuration, 0, flushDurationTimeUnit, threadFactory, exceptionHandler, false, null);
+ return this;
+ }
+
+ private InfluxDB enableBatch(final int actions, final int flushDuration, final int jitterDuration,
+ final TimeUnit durationTimeUnit, final ThreadFactory threadFactory,
+ final BiConsumer, Throwable> exceptionHandler,
+ final boolean dropActionsOnQueueExhaustion, final Consumer droppedActionHandler) {
if (this.batchEnabled.get()) {
throw new IllegalStateException("BatchProcessing is already enabled.");
}
@@ -176,8 +359,11 @@ public InfluxDB enableBatch(final int actions, final int flushDuration, final Ti
.builder(this)
.actions(actions)
.exceptionHandler(exceptionHandler)
- .interval(flushDuration, flushDurationTimeUnit)
+ .interval(flushDuration, jitterDuration, durationTimeUnit)
.threadFactory(threadFactory)
+ .consistencyLevel(consistency)
+ .dropActionsOnQueueExhaustion(dropActionsOnQueueExhaustion)
+ .droppedActionHandler(droppedActionHandler)
.build();
this.batchEnabled.set(true);
return this;
@@ -188,12 +374,6 @@ public void disableBatch() {
this.batchEnabled.set(false);
if (this.batchProcessor != null) {
this.batchProcessor.flushAndShutdown();
- if (this.logLevel != LogLevel.NONE) {
- System.out.println(
- "total writes:" + this.writeCount.get()
- + " unbatched:" + this.unBatchedCount.get()
- + " batchPoints:" + this.batchedCount);
- }
}
}
@@ -204,7 +384,7 @@ public boolean isBatchEnabled() {
@Override
public Pong ping() {
- Stopwatch watch = Stopwatch.createStarted();
+ final long started = System.currentTimeMillis();
Call call = this.influxDBService.ping();
try {
Response response = call.execute();
@@ -218,16 +398,34 @@ public Pong ping() {
}
Pong pong = new Pong();
pong.setVersion(version);
- pong.setResponseTime(watch.elapsed(TimeUnit.MILLISECONDS));
+ pong.setResponseTime(System.currentTimeMillis() - started);
return pong;
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new InfluxDBIOException(e);
}
}
@Override
public String version() {
- return ping().getVersion();
+ if (version == null) {
+ this.version = ping().getVersion();
+ }
+ return this.version;
+ }
+
+ @Override
+ public void write(final Point point) {
+ write(database, retentionPolicy, point);
+ }
+
+ @Override
+ public void write(final String records) {
+ write(database, retentionPolicy, consistency, records);
+ }
+
+ @Override
+ public void write(final List records) {
+ write(database, retentionPolicy, consistency, records);
}
@Override
@@ -240,9 +438,9 @@ public void write(final String database, final String retentionPolicy, final Poi
.retentionPolicy(retentionPolicy).build();
batchPoints.point(point);
this.write(batchPoints);
- this.unBatchedCount.incrementAndGet();
+ this.unBatchedCount.increment();
}
- this.writeCount.incrementAndGet();
+ this.writeCount.increment();
}
/**
@@ -255,45 +453,67 @@ public void write(final int udpPort, final Point point) {
this.batchProcessor.put(batchEntry);
} else {
this.write(udpPort, point.lineProtocol());
- this.unBatchedCount.incrementAndGet();
+ this.unBatchedCount.increment();
}
- this.writeCount.incrementAndGet();
+ this.writeCount.increment();
}
@Override
public void write(final BatchPoints batchPoints) {
- this.batchedCount.addAndGet(batchPoints.getPoints().size());
+ this.batchedCount.add(batchPoints.getPoints().size());
RequestBody lineProtocol = RequestBody.create(MEDIA_TYPE_STRING, batchPoints.lineProtocol());
+ String db = batchPoints.getDatabase();
+ if (db == null) {
+ db = this.database;
+ }
execute(this.influxDBService.writePoints(
- this.username,
- this.password,
- batchPoints.getDatabase(),
+ db,
batchPoints.getRetentionPolicy(),
- TimeUtil.toTimePrecision(TimeUnit.NANOSECONDS),
+ TimeUtil.toTimePrecision(batchPoints.getPrecision()),
batchPoints.getConsistency().value(),
lineProtocol));
}
+ @Override
+ public void writeWithRetry(final BatchPoints batchPoints) {
+ if (isBatchEnabled()) {
+ batchProcessor.getBatchWriter().write(Collections.singleton(batchPoints));
+ } else {
+ write(batchPoints);
+ }
+ }
+
@Override
public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency,
- final String records) {
+ final TimeUnit precision, final String records) {
execute(this.influxDBService.writePoints(
- this.username,
- this.password,
database,
retentionPolicy,
- TimeUtil.toTimePrecision(TimeUnit.NANOSECONDS),
+ TimeUtil.toTimePrecision(precision),
consistency.value(),
RequestBody.create(MEDIA_TYPE_STRING, records)));
}
+ @Override
+ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency,
+ final String records) {
+ write(database, retentionPolicy, consistency, TimeUnit.NANOSECONDS, records);
+ }
+
@Override
public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency,
final List records) {
- final String joinedRecords = Joiner.on("\n").join(records);
- write(database, retentionPolicy, consistency, joinedRecords);
+ write(database, retentionPolicy, consistency, TimeUnit.NANOSECONDS, records);
+ }
+
+
+ @Override
+ public void write(final String database, final String retentionPolicy, final ConsistencyLevel consistency,
+ final TimeUnit precision, final List records) {
+ write(database, retentionPolicy, consistency, precision, String.join("\n", records));
}
+
/**
* {@inheritDoc}
*/
@@ -302,9 +522,9 @@ public void write(final int udpPort, final String records) {
initialDatagramSocket();
byte[] bytes = records.getBytes(StandardCharsets.UTF_8);
try {
- datagramSocket.send(new DatagramPacket(bytes, bytes.length, hostAddress, udpPort));
+ datagramSocket.send(new DatagramPacket(bytes, bytes.length, new InetSocketAddress(hostName, udpPort)));
} catch (IOException e) {
- throw new RuntimeException(e);
+ throw new InfluxDBIOException(e);
}
}
@@ -315,7 +535,7 @@ private void initialDatagramSocket() {
try {
datagramSocket = new DatagramSocket();
} catch (SocketException e) {
- throw new RuntimeException(e);
+ throw new InfluxDBIOException(e);
}
}
}
@@ -327,8 +547,7 @@ private void initialDatagramSocket() {
*/
@Override
public void write(final int udpPort, final List records) {
- final String joinedRecords = Joiner.on("\n").join(records);
- write(udpPort, joinedRecords);
+ write(udpPort, String.join("\n", records));
}
/**
@@ -336,60 +555,162 @@ public void write(final int udpPort, final List records) {
*/
@Override
public QueryResult query(final Query query) {
- Call call;
- if (query.requiresPost()) {
- call = this.influxDBService.postQuery(this.username,
- this.password, query.getDatabase(), query.getCommandWithUrlEncoded());
- } else {
- call = this.influxDBService.query(this.username,
- this.password, query.getDatabase(), query.getCommandWithUrlEncoded());
- }
- return execute(call);
+ return executeQuery(callQuery(query));
}
/**
* {@inheritDoc}
*/
@Override
- public void query(final Query query, final int chunkSize, final Consumer consumer) {
+ public void query(final Query query, final Consumer onSuccess, final Consumer onFailure) {
+ final Call call = callQuery(query);
+ call.enqueue(new Callback() {
+ @Override
+ public void onResponse(final Call call, final Response response) {
+ if (response.isSuccessful()) {
+ onSuccess.accept(response.body());
+ } else {
+ Throwable t = null;
+ String errorBody = null;
+
+ try {
+ if (response.errorBody() != null) {
+ errorBody = response.errorBody().string();
+ }
+ } catch (IOException e) {
+ t = e;
+ }
- if (version().startsWith("0.") || version().startsWith("1.0")) {
- throw new RuntimeException("chunking not supported");
+ if (t != null) {
+ onFailure.accept(new InfluxDBException(response.message(), t));
+ } else if (errorBody != null) {
+ onFailure.accept(new InfluxDBException(response.message() + " - " + errorBody));
+ } else {
+ onFailure.accept(new InfluxDBException(response.message()));
+ }
}
+ }
- Call call = this.influxDBService.query(this.username, this.password,
- query.getDatabase(), query.getCommandWithUrlEncoded(), chunkSize);
+ @Override
+ public void onFailure(final Call call, final Throwable throwable) {
+ onFailure.accept(throwable);
+ }
+ });
+ }
- call.enqueue(new Callback() {
- @Override
- public void onResponse(final Call call, final Response response) {
- try {
- if (response.isSuccessful()) {
- BufferedSource source = response.body().source();
- while (true) {
- QueryResult result = InfluxDBImpl.this.adapter.fromJson(source);
- if (result != null) {
- consumer.accept(result);
- }
- }
- }
- try (ResponseBody errorBody = response.errorBody()) {
- throw new RuntimeException(errorBody.string());
- }
- } catch (EOFException e) {
- QueryResult queryResult = new QueryResult();
- queryResult.setError("DONE");
- consumer.accept(queryResult);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void query(final Query query, final int chunkSize, final Consumer onNext) {
+ query(query, chunkSize, onNext, () -> { });
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void query(final Query query, final int chunkSize, final BiConsumer onNext) {
+ query(query, chunkSize, onNext, () -> { });
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void query(final Query query, final int chunkSize, final Consumer onNext,
+ final Runnable onComplete) {
+ query(query, chunkSize, (cancellable, queryResult) -> onNext.accept(queryResult), onComplete);
+ }
+
+ @Override
+ public void query(final Query query, final int chunkSize, final BiConsumer onNext,
+ final Runnable onComplete) {
+ query(query, chunkSize, onNext, onComplete, null);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void query(final Query query, final int chunkSize, final BiConsumer onNext,
+ final Runnable onComplete, final Consumer onFailure) {
+ Call call;
+ if (query.hasBoundParameters()) {
+ if (query.requiresPost()) {
+ call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize,
+ query.getParameterJsonWithUrlEncoded());
+ } else {
+ call = this.influxDBService.query(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize,
+ query.getParameterJsonWithUrlEncoded());
+ }
+ } else {
+ if (query.requiresPost()) {
+ call = this.influxDBService.postQuery(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize);
+ } else {
+ call = this.influxDBService.query(getDatabase(query), query.getCommandWithUrlEncoded(), chunkSize);
+ }
+ }
+
+ call.enqueue(new Callback() {
+ @Override
+ public void onResponse(final Call call, final Response response) {
- @Override
- public void onFailure(final Call call, final Throwable t) {
- throw new RuntimeException(t);
+ Cancellable cancellable = new Cancellable() {
+ @Override
+ public void cancel() {
+ call.cancel();
+ }
+
+ @Override
+ public boolean isCanceled() {
+ return call.isCanceled();
+ }
+ };
+
+ try {
+ if (response.isSuccessful()) {
+ ResponseBody chunkedBody = response.body();
+ chunkProccesor.process(chunkedBody, cancellable, onNext, onComplete);
+ } else {
+ // REVIEW: must be handled consistently with IOException.
+ ResponseBody errorBody = response.errorBody();
+ if (errorBody != null) {
+ InfluxDBException influxDBException = new InfluxDBException(errorBody.string());
+ if (onFailure == null) {
+ throw influxDBException;
+ } else {
+ onFailure.accept(influxDBException);
+ }
}
- });
+ }
+ } catch (IOException e) {
+ QueryResult queryResult = new QueryResult();
+ queryResult.setError(e.toString());
+ onNext.accept(cancellable, queryResult);
+ //passing null onFailure consumer is here for backward compatibility
+ //where the empty queryResult containing error is propagating into onNext consumer
+ if (onFailure != null) {
+ onFailure.accept(e);
+ }
+ } catch (Exception e) {
+ call.cancel();
+ if (onFailure != null) {
+ onFailure.accept(e);
+ }
+ }
+
+ }
+
+ @Override
+ public void onFailure(final Call call, final Throwable t) {
+ if (onFailure == null) {
+ throw new InfluxDBException(t);
+ } else {
+ onFailure.accept(t);
+ }
+ }
+ });
}
/**
@@ -397,8 +718,25 @@ public void onFailure(final Call call, final Throwable t) {
*/
@Override
public QueryResult query(final Query query, final TimeUnit timeUnit) {
- return execute(this.influxDBService.query(this.username, this.password, query.getDatabase(),
- TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded()));
+ Call call;
+ if (query.hasBoundParameters()) {
+ if (query.requiresPost()) {
+ call = this.influxDBService.postQuery(getDatabase(query), TimeUtil.toTimePrecision(timeUnit),
+ query.getCommandWithUrlEncoded(), query.getParameterJsonWithUrlEncoded());
+ } else {
+ call = this.influxDBService.query(getDatabase(query), TimeUtil.toTimePrecision(timeUnit),
+ query.getCommandWithUrlEncoded(), query.getParameterJsonWithUrlEncoded());
+ }
+ } else {
+ if (query.requiresPost()) {
+ call = this.influxDBService.postQuery(getDatabase(query),
+ TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded());
+ } else {
+ call = this.influxDBService.query(getDatabase(query),
+ TimeUtil.toTimePrecision(timeUnit), query.getCommandWithUrlEncoded(), null);
+ }
+ }
+ return executeQuery(call);
}
/**
@@ -406,12 +744,9 @@ public QueryResult query(final Query query, final TimeUnit timeUnit) {
*/
@Override
public void createDatabase(final String name) {
- Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Database name may not be null or empty");
+ Preconditions.checkNonEmptyString(name, "name");
String createDatabaseQueryString = String.format("CREATE DATABASE \"%s\"", name);
- if (this.version().startsWith("0.")) {
- createDatabaseQueryString = String.format("CREATE DATABASE IF NOT EXISTS \"%s\"", name);
- }
- execute(this.influxDBService.postQuery(this.username, this.password, Query.encode(createDatabaseQueryString)));
+ executeQuery(this.influxDBService.postQuery(Query.encode(createDatabaseQueryString)));
}
/**
@@ -419,8 +754,7 @@ public void createDatabase(final String name) {
*/
@Override
public void deleteDatabase(final String name) {
- execute(this.influxDBService.postQuery(this.username, this.password,
- Query.encode("DROP DATABASE \"" + name + "\"")));
+ executeQuery(this.influxDBService.postQuery(Query.encode("DROP DATABASE \"" + name + "\"")));
}
/**
@@ -428,12 +762,11 @@ public void deleteDatabase(final String name) {
*/
@Override
public List describeDatabases() {
- QueryResult result = execute(this.influxDBService.query(this.username,
- this.password, SHOW_DATABASE_COMMAND_ENCODED));
+ QueryResult result = executeQuery(this.influxDBService.postQuery(SHOW_DATABASE_COMMAND_ENCODED));
// {"results":[{"series":[{"name":"databases","columns":["name"],"values":[["mydb"]]}]}]}
// Series [name=databases, columns=[name], values=[[mydb], [unittest_1433605300968]]]
List> databaseNames = result.getResults().get(0).getSeries().get(0).getValues();
- List databases = Lists.newArrayList();
+ List databases = new ArrayList<>();
if (databaseNames != null) {
for (List