Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 26 additions & 7 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,30 @@ jobs:
script: *run_integration_test
after_failure: *integration_test_diags

- &integration_kafka_index_slow
name: "(Compile=openjdk8, Run=openjdk8) kafka index integration test slow"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=kafka-index-slow' JVM_RUNTIME='-Djvm.runtime=8'
script: *run_integration_test
after_failure: *integration_test_diags

- &integration_kafka_transactional_index
name: "(Compile=openjdk8, Run=openjdk8) transactional kafka index integration test"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=kafka-transactional-index' JVM_RUNTIME='-Djvm.runtime=8'
script: *run_integration_test
after_failure: *integration_test_diags

- &integration_kafka_transactional_index_slow
name: "(Compile=openjdk8, Run=openjdk8) transactional kafka index integration test slow"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-Dgroups=kafka-transactional-index-slow' JVM_RUNTIME='-Djvm.runtime=8'
script: *run_integration_test
after_failure: *integration_test_diags

- &integration_query
name: "(Compile=openjdk8, Run=openjdk8) query integration test"
jdk: openjdk8
Expand Down Expand Up @@ -344,7 +368,7 @@ jobs:
name: "(Compile=openjdk8, Run=openjdk8) other integration test"
jdk: openjdk8
services: *integration_test_services
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion' JVM_RUNTIME='-Djvm.runtime=8'
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow' JVM_RUNTIME='-Djvm.runtime=8'
script: *run_integration_test
after_failure: *integration_test_diags
# END - Integration tests for Compile with Java 8 and Run with Java 8
Expand All @@ -360,11 +384,6 @@ jobs:
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=perfect-rollup-parallel-batch-index' JVM_RUNTIME='-Djvm.runtime=11'

- <<: *integration_kafka_index
name: "(Compile=openjdk8, Run=openjdk11) kafka index integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-Dgroups=kafka-index' JVM_RUNTIME='-Djvm.runtime=11'

- <<: *integration_query
name: "(Compile=openjdk8, Run=openjdk11) query integration test"
jdk: openjdk8
Expand All @@ -383,7 +402,7 @@ jobs:
- <<: *integration_tests
name: "(Compile=openjdk8, Run=openjdk11) other integration test"
jdk: openjdk8
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion' JVM_RUNTIME='-Djvm.runtime=11'
env: TESTNG_GROUPS='-DexcludedGroups=batch-index,perfect-rollup-parallel-batch-index,kafka-index,query,realtime-index,security,s3-deep-storage,gcs-deep-storage,azure-deep-storage,hdfs-deep-storage,s3-ingestion,kinesis-index,kafka-transactional-index,kafka-index-slow,kafka-transactional-index-slow' JVM_RUNTIME='-Djvm.runtime=11'
# END - Integration tests for Compile with Java 8 and Run with Java 11

- name: "security vulnerabilities"
Expand Down
2 changes: 1 addition & 1 deletion docs/development/extensions-core/kinesis-ingestion.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon
| `indexSpecForIntermediatePersists` | | Defines segment storage format options to be used at indexing time for intermediate persisted temporary segments. This can be used to disable dimension/metric compression on intermediate segments to reduce memory required for final merging. However, disabling compression on intermediate segments might increase page cache use while they are used before getting merged into final segment published, see [IndexSpec](#indexspec) for possible values. | no (default = same as indexSpec) |
| `reportParseExceptions` | Boolean | If true, exceptions encountered during parsing will be thrown and will halt ingestion; if false, unparseable rows and fields will be skipped. | no (default == false) |
| `handoffConditionTimeout` | Long | Milliseconds to wait for segment handoff. It must be >= 0, where 0 means to wait forever. | no (default == 0) |
| `resetOffsetAutomatically` | Boolean | Controls behavior when Druid needs to read Kinesis messages that are no longer available.<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../operations/api-reference.html#supervisors). This mode is useful for production, since it will make you aware of issues with ingestion.<br/><br/>If true, Druid will automatically reset to the earlier or latest sequence number available in Kinesis, based on the value of the `useEarliestOffset` property (earliest if true, latest if false). Please note that this can lead to data being _DROPPED_ (if `useEarliestOffset` is false) or _DUPLICATED_ (if `useEarliestOffset` is true) without your knowledge. Messages will be logged indicating that a reset has occurred, but ingestion will continue. This mode is useful for non-production situations, since it will make Druid attempt to recover from problems automatically, even if they lead to quiet dropping or duplicating of data. | no (default == false) |
| `resetOffsetAutomatically` | Boolean | Controls behavior when Druid needs to read Kinesis messages that are no longer available.<br/><br/>If false, the exception will bubble up, which will cause your tasks to fail and ingestion to halt. If this occurs, manual intervention is required to correct the situation; potentially using the [Reset Supervisor API](../../operations/api-reference.html#supervisors). This mode is useful for production, since it will make you aware of issues with ingestion.<br/><br/>If true, Druid will automatically reset to the earlier or latest sequence number available in Kinesis, based on the value of the `useEarliestSequenceNumber` property (earliest if true, latest if false). Please note that this can lead to data being _DROPPED_ (if `useEarliestSequenceNumber` is false) or _DUPLICATED_ (if `useEarliestSequenceNumber` is true) without your knowledge. Messages will be logged indicating that a reset has occurred, but ingestion will continue. This mode is useful for non-production situations, since it will make Druid attempt to recover from problems automatically, even if they lead to quiet dropping or duplicating of data. | no (default == false) |
| `skipSequenceNumberAvailabilityCheck` | Boolean | Whether to enable checking if the current sequence number is still available in a particular Kinesis shard. If set to false, the indexing task will attempt to reset the current sequence number (or not), depending on the value of `resetOffsetAutomatically`. | no (default == false) |
| `workerThreads` | Integer | The number of threads that will be used by the supervisor for asynchronous operations. | no (default == min(10, taskCount)) |
| `chatThreads` | Integer | The number of threads that will be used for communicating with indexing tasks. | no (default == min(10, taskCount * replicas)) |
Expand Down
56 changes: 49 additions & 7 deletions integration-tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,32 @@ can either be 8 or 11.
Druid's configuration (using Docker) can be overrided by providing -Doverride.config.path=<PATH_TO_FILE>.
The file must contain one property per line, the key must start with `druid_` and the format should be snake case.

## Tips & tricks for debugging and developing integration tests

### Useful mvn command flags

- -Dskip.start.docker=true to skip starting docker containers. This can save ~3 minutes by skipping building and bringing
up the docker containers (Druid, Kafka, Hadoop, MYSQL, zookeeper, etc). Please make sure that you actually do have
these containers already running if using this flag. Additionally, please make sure that the running containers
are in the same state that the setup script (run_cluster.sh) would have brought it up in.
- -Dskip.stop.docker=true to skip stopping and teardowning down the docker containers. This can be useful in further
debugging after the integration tests have finish running.

### Debugging Druid while running tests

For your convenience, Druid processes running inside Docker have debugging enabled and the following ports have
been made available to attach your remote debugger (such as via IntelliJ IDEA's Remote Configuration):

- Overlord process at port 5009
- Middlemanager process at port 5008
- Historical process at port 5007
- Coordinator process at port 5006
- Broker process at port 5005
- Router process at port 5004
- Router with custom check tls process at port 5003
- Router with no client auth tls process at port 5002
- Router with permissive tls process at port 5001

Running Tests Using A Quickstart Cluster
-------------------

Expand Down Expand Up @@ -145,20 +171,26 @@ The integration test that indexes from Cloud or uses Cloud as deep storage is no
of the integration test run discussed above. Running these tests requires the user to provide
their own Cloud.

Currently, the integration test supports Google Cloud Storage, Amazon S3, and Microsoft Azure.
These can be run by providing "gcs-deep-storage", "s3-deep-storage", or "azure-deep-storage"
to -Dgroups for Google Cloud Storage, Amazon S3, and Microsoft Azure respectively. Note that only
Currently, the integration test supports Amazon Kinesis, Google Cloud Storage, Amazon S3, and Microsoft Azure.
These can be run by providing "kinesis-index", "gcs-deep-storage", "s3-deep-storage", or "azure-deep-storage"
to -Dgroups for Amazon Kinesis, Google Cloud Storage, Amazon S3, and Microsoft Azure respectively. Note that only
one group should be run per mvn command.

In addition to specifying the -Dgroups to mvn command, the following will need to be provided:
For all of the Cloud Integration tests, the following will also need to be provided:
1) Provide -Doverride.config.path=<PATH_TO_FILE> with your Cloud credentials/configs set. See
integration-tests/docker/environment-configs/override-examples/ directory for env vars to provide for each Cloud.

For Amazon Kinesis, the following will also need to be provided:
1) Provide -Ddruid.test.config.streamEndpoint=<STREAM_ENDPOINT> with the endpoint of your stream set.
For example, kinesis.us-east-1.amazonaws.com

For Google Cloud Storage, Amazon S3, and Microsoft Azure, the following will also need to be provided:
1) Set the bucket and path for your test data. This can be done by setting -Ddruid.test.config.cloudBucket and
-Ddruid.test.config.cloudPath in the mvn command or setting "cloud_bucket" and "cloud_path" in the config file.
2) Copy wikipedia_index_data1.json, wikipedia_index_data2.json, and wikipedia_index_data3.json
located in integration-tests/src/test/resources/data/batch_index to your Cloud storage at the location set in step 1.
3) Provide -Doverride.config.path=<PATH_TO_FILE> with your Cloud credentials/configs set. See
integration-tests/docker/environment-configs/override-examples/ directory for env vars to provide for each Cloud storage.

For running Google Cloud Storage, in addition to the above, you will also have to:
For Google Cloud Storage, in addition to the above, you will also have to:
1) Provide -Dresource.file.dir.path=<PATH_TO_FOLDER> with folder that contains GOOGLE_APPLICATION_CREDENTIALS file

For example, to run integration test for Google Cloud Storage:
Expand Down Expand Up @@ -275,3 +307,13 @@ This will tell the test framework that the test class needs to be constructed us
2) FromFileTestQueryHelper - reads queries with expected results from file and executes them and verifies the results using ResultVerifier

Refer ITIndexerTest as an example on how to use dependency Injection

### Running test methods in parallel
By default, test methods in a test class will be run in sequential order one at a time. Test methods for a given test
class can be set to run in parallel (multiple test methods of each class running at the same time) by excluding
the given class/package from the "AllSerializedTests" test tag section and including it in the "AllParallelizedTests"
test tag section in integration-tests/src/test/resources/testng.xml
Please be mindful when adding tests to the "AllParallelizedTests" test tag that the tests can run in parallel with
other tests from the same class at the same time. i.e. test does not modify/restart/stop the druid cluster or other dependency containers,
test does not use excessive memory starving other concurent task, test does not modify and/or use other task,
supervisor, datasource it did not create.
2 changes: 1 addition & 1 deletion integration-tests/docker/environment-configs/broker
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ DRUID_SERVICE=broker
DRUID_LOG_PATH=/shared/logs/broker.log

# JAVA OPTS
SERVICE_DRUID_JAVA_OPTS=-server -Xmx512m -Xms512m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:+UseG1GC
SERVICE_DRUID_JAVA_OPTS=-server -Xmx512m -Xms512m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:+UseG1GC -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005

# Druid configs
druid_processing_buffer_sizeBytes=25000000
Expand Down
2 changes: 1 addition & 1 deletion integration-tests/docker/environment-configs/coordinator
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ DRUID_SERVICE=coordinator
DRUID_LOG_PATH=/shared/logs/coordinator.log

# JAVA OPTS
SERVICE_DRUID_JAVA_OPTS=-server -Xmx128m -Xms128m -XX:+UseG1GC
SERVICE_DRUID_JAVA_OPTS=-server -Xmx128m -Xms128m -XX:+UseG1GC -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5006

# Druid configs
druid_metadata_storage_type=mysql
Expand Down
2 changes: 1 addition & 1 deletion integration-tests/docker/environment-configs/historical
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ DRUID_SERVICE=historical
DRUID_LOG_PATH=/shared/logs/historical.log

# JAVA OPTS
SERVICE_DRUID_JAVA_OPTS=-server -Xmx512m -Xms512m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:+UseG1GC
SERVICE_DRUID_JAVA_OPTS=-server -Xmx512m -Xms512m -XX:NewSize=256m -XX:MaxNewSize=256m -XX:+UseG1GC -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5007

# Druid configs
druid_processing_buffer_sizeBytes=25000000
Expand Down
3 changes: 2 additions & 1 deletion integration-tests/docker/environment-configs/middlemanager
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ DRUID_SERVICE=middleManager
DRUID_LOG_PATH=/shared/logs/middlemanager.log

# JAVA OPTS
SERVICE_DRUID_JAVA_OPTS=-server -Xmx64m -Xms64m -XX:+UseG1GC
SERVICE_DRUID_JAVA_OPTS=-server -Xmx64m -Xms64m -XX:+UseG1GC -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5008

# Druid configs
druid_server_http_numThreads=100
Expand All @@ -37,3 +37,4 @@ druid_indexer_task_chathandler_type=announce
druid_auth_basic_common_cacheDirectory=/tmp/authCache/middleManager
druid_startup_logging_logProperties=true
druid_server_https_crlPath=/tls/revocations.crl
druid_worker_capacity=20
Loading