Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
# The following .bazelrc content is forked from the main Envoy repository. This is necessary since
# this needs to be available before we can access the Envoy repository contents via Bazel.

build:clang-asan --test_timeout=900
build:clang-tsan --test_timeout=900

# Envoy specific Bazel build/test options.

# Bazel doesn't need more than 200MB of memory for local build based on memory profiling:
Expand Down
4 changes: 2 additions & 2 deletions ci/envoy_build_sha.sh
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build .circleci/config.yml | sed -e 's#.*envoyproxy/envoy-build:\(.*\)#\1#' | uniq)
[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".circleci/config.yml hashes are inconsistent!" && exit 1)
ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu $(dirname $0)/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq)
[[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1)
1 change: 1 addition & 0 deletions test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,7 @@ envoy_cc_test(
deps = [
"//source/client:nighthawk_client_lib",
"//source/client:nighthawk_service_lib",
"//source/client:output_transform_main_lib",
"//source/common:nighthawk_common_lib",
"//source/server:http_test_server_filter_lib",
"//test/test_common:environment_lib",
Expand Down
8 changes: 8 additions & 0 deletions test/integration/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ py_library(
"configurations/nighthawk_https_origin.yaml",
"configurations/sni_origin.yaml",
"//:nighthawk_client",
"//:nighthawk_output_transform",
"//:nighthawk_service",
"//:nighthawk_test_server",
"@envoy//test/config/integration/certs",
Expand Down Expand Up @@ -83,6 +84,12 @@ py_library(
deps = [":integration_test_base"],
)

py_library(
name = "test_output_transform_lib",
srcs = ["test_output_transform.py"],
deps = [":integration_test_base"],
)

py_binary(
name = "integration_test",
srcs = ["integration_test.py"],
Expand All @@ -99,6 +106,7 @@ py_binary(
":test_grpc_service_lib",
":test_integration_basics_lib",
":test_integration_zipkin_lib",
":test_output_transform_lib",
":test_remote_execution_lib",
],
)
38 changes: 30 additions & 8 deletions test/integration/test_grpc_service.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
import pytest

from integration_test_fixtures import (http_test_server_fixture)
from utility import *
from test.integration.integration_test_fixtures import http_test_server_fixture
from test.integration import utility


def test_grpc_service_happy_flow(http_test_server_fixture):
Expand All @@ -14,8 +14,8 @@ def test_grpc_service_happy_flow(http_test_server_fixture):
http_test_server_fixture.getTestServerRootUri()
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
assertGreaterEqual(counters["benchmark.http_2xx"], 5)
assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)
utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5)
utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 1)


def test_grpc_service_down(http_test_server_fixture):
Expand All @@ -26,10 +26,10 @@ def test_grpc_service_down(http_test_server_fixture):
],
expect_failure=True)
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
assertEqual(counters["requestsource.upstream_rq_pending_failure_eject"], 1)
utility.assertEqual(counters["requestsource.upstream_rq_pending_failure_eject"], 1)


@pytest.mark.skipif(isSanitizerRun(), reason="Slow in sanitizer runs")
@pytest.mark.skipif(utility.isSanitizerRun(), reason="Slow in sanitizer runs")
def test_grpc_service_stress(http_test_server_fixture):
http_test_server_fixture.startNighthawkGrpcService("dummy-request-source")
parsed_json, _ = http_test_server_fixture.runNighthawkClient([
Expand All @@ -40,5 +40,27 @@ def test_grpc_service_stress(http_test_server_fixture):
http_test_server_fixture.getTestServerRootUri()
])
counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json)
assertGreaterEqual(counters["benchmark.http_2xx"], 5000)
assertEqual(counters["requestsource.internal.upstream_rq_200"], 4)
utility.assertGreaterEqual(counters["benchmark.http_2xx"], 5000)
utility.assertEqual(counters["requestsource.internal.upstream_rq_200"], 4)


def _run_service_with_args(args):
return utility.run_binary_with_args("nighthawk_service", args)


def test_grpc_service_help():
(exit_code, output) = _run_service_with_args("--help")
utility.assertEqual(exit_code, 0)
utility.assertIn("USAGE", output)


def test_grpc_service_bad_arguments():
(exit_code, output) = _run_service_with_args("--foo")
utility.assertEqual(exit_code, 1)
utility.assertIn("PARSE ERROR: Argument: --foo", output)


def test_grpc_service_nonexisting_listener_address():
(exit_code, output) = _run_service_with_args("--listen 1.1.1.1:1")
utility.assertEqual(exit_code, 1)
utility.assertIn("Failure: Could not start the grpc service", output)
16 changes: 16 additions & 0 deletions test/integration/test_integration_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,3 +652,19 @@ def test_http_request_release_timing(http_test_server_fixture, qps_parameterizat
int(global_histograms["benchmark_http_client.queue_to_connect"]["count"]), total_requests)

assertCounterEqual(counters, "benchmark.http_2xx", (total_requests))


def _run_client_with_args(args):
return run_binary_with_args("nighthawk_client", args)


def test_client_help():
(exit_code, output) = _run_client_with_args("--help")
assertEqual(exit_code, 0)
assertIn("USAGE", output)


def test_client_bad_arg():
(exit_code, output) = _run_client_with_args("127.0.0.1 --foo")
assertEqual(exit_code, 1)
assertIn("PARSE ERROR: Argument: --foo", output)
44 changes: 44 additions & 0 deletions test/integration/test_output_transform.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env python3
import pytest

from test.integration import utility
import os
import subprocess


def _run_output_transform_with_args(args):
return utility.run_binary_with_args("nighthawk_output_transform", args)


def test_output_transform_help():
(exit_code, output) = _run_output_transform_with_args("--help")
utility.assertEqual(exit_code, 0)
utility.assertIn("USAGE", output)


def test_output_transform_bad_arguments():
(exit_code, output) = _run_output_transform_with_args("--foo")
utility.assertEqual(exit_code, 1)
utility.assertIn("PARSE ERROR: Argument: --foo", output)


def test_output_transform_101():
"""
Runs an arbitrary load test, which outputs to json.
This json output is then transformed to human readable output.
"""

test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"])
process = subprocess.run([
os.path.join(test_rundir, "nighthawk_client"), "--duration", "1", "--rps", "1", "127.0.0.1",
"--output-format", "json"
],
stdout=subprocess.PIPE)
output = process.stdout
process = subprocess.run(
[os.path.join(test_rundir, "nighthawk_output_transform"), "--output-format", "human"],
stdout=subprocess.PIPE,
input=output)
utility.assertEqual(process.returncode, 0)
utility.assertIn("Nighthawk - A layer 7 protocol benchmarking tool",
process.stdout.decode("utf-8"))
17 changes: 17 additions & 0 deletions test/integration/utility.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import subprocess


def assertEqual(a, b):
Expand Down Expand Up @@ -57,3 +58,19 @@ def assertCounterBetweenInclusive(counters, name, min_value, max_value):

def isSanitizerRun():
return True if os.environ.get("NH_INTEGRATION_TEST_SANITIZER_RUN", 0) == "1" else False


def run_binary_with_args(binary, args):
"""Executes a Nighthawk binary with the provided arguments.

Args:
binary: A string, the name of the to-be-called binary, e.g. "nighthawk_client".
args: A string, the command line arguments to the binary, e.g. "--foo --bar".

Returns:
A tuple in the form (exit_code, output), where exit_code is the code the Nighthawk
service terminated with and the output is its standard output.
"""
test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"])
args = "%s %s" % (os.path.join(test_rundir, binary), args)
return subprocess.getstatusoutput(args)
9 changes: 8 additions & 1 deletion test/rate_limiter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,11 @@ TEST_F(RateLimiterTest, DistributionSamplingRateLimiterImplTest) {
EXPECT_CALL(unsafe_mock_rate_limiter, timeSource)
.Times(AtLeast(1))
.WillRepeatedly(ReturnRef(time_system));
auto sampler = std::make_unique<UniformRandomDistributionSamplerImpl>(1);
EXPECT_EQ(sampler->min(), 0);
EXPECT_EQ(sampler->max(), 1);
RateLimiterPtr rate_limiter = std::make_unique<DistributionSamplingRateLimiterImpl>(
std::make_unique<UniformRandomDistributionSamplerImpl>(1), std::move(mock_rate_limiter));
std::move(sampler), std::move(mock_rate_limiter));

EXPECT_CALL(unsafe_mock_rate_limiter, tryAcquireOne).Times(tries).WillRepeatedly(Return(true));
EXPECT_CALL(unsafe_mock_rate_limiter, releaseOne).Times(tries);
Expand Down Expand Up @@ -383,6 +386,10 @@ class GraduallyOpeningRateLimiterFilterTest : public Test {
}
// Verify we acquired everything.
EXPECT_FALSE(rate_limiter->tryAcquireOne());
// Verify releaseOne works.
rate_limiter->releaseOne();
EXPECT_TRUE(rate_limiter->tryAcquireOne());
EXPECT_FALSE(rate_limiter->tryAcquireOne());
return acquisition_timings;
}
};
Expand Down
24 changes: 13 additions & 11 deletions test/run_nighthawk_bazel_coverage.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,23 @@

# derived from test/run_envoy_bazel_coverage.sh over the Envoy repo.

set -e
set -x
set -eo pipefail
set +x
set -u

[[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}"
[[ -z "${VALIDATE_COVERAGE}" ]] && VALIDATE_COVERAGE=true
SRCDIR="${SRCDIR:=${PWD}}"
VALIDATE_COVERAGE="${VALIDATE_COVERAGE:=true}"
ENVOY_COVERAGE_DIR="${ENVOY_COVERAGE_DIR:=}"

echo "Starting run_nighthawk_bazel_coverage.sh..."
echo " PWD=$(pwd)"
echo " SRCDIR=${SRCDIR}"
echo " VALIDATE_COVERAGE=${VALIDATE_COVERAGE}"

COVERAGE_DIR="${SRCDIR}"/generated/coverage
rm -rf "${COVERAGE_DIR}"
mkdir -p "${COVERAGE_DIR}"

# This is the target that will be run to generate coverage data. It can be overridden by consumer
# projects that want to run coverage on a different/combined target.
# Command-line arguments take precedence over ${COVERAGE_TARGET}.
Expand All @@ -25,24 +31,20 @@ else
fi

BAZEL_BUILD_OPTIONS+=" --config=test-coverage --test_tag_filters=-nocoverage --test_env=ENVOY_IP_TEST_VERSIONS=v4only"
bazel coverage ${BAZEL_BUILD_OPTIONS} --test_output=all ${COVERAGE_TARGETS}

COVERAGE_DIR="${SRCDIR}"/generated/coverage
mkdir -p "${COVERAGE_DIR}"

bazel coverage ${BAZEL_BUILD_OPTIONS} --cache_test_results=no --test_output=all ${COVERAGE_TARGETS}
COVERAGE_DATA="${COVERAGE_DIR}/coverage.dat"

cp bazel-out/_coverage/_coverage_report.dat "${COVERAGE_DATA}"

COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | tee /dev/stderr | grep lines... | cut -d ' ' -f 4)
COVERAGE_VALUE=$(genhtml --prefix ${PWD} --output "${COVERAGE_DIR}" "${COVERAGE_DATA}" | grep lines... | cut -d ' ' -f 4)
COVERAGE_VALUE=${COVERAGE_VALUE%?}

[[ -z "${ENVOY_COVERAGE_DIR}" ]] || rsync -av "${COVERAGE_DIR}"/ "${ENVOY_COVERAGE_DIR}"

if [ "$VALIDATE_COVERAGE" == "true" ]
then
# TODO(#370): restore the coverage threshold.
COVERAGE_THRESHOLD=98.1
COVERAGE_THRESHOLD=98.5
COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc)
if test ${COVERAGE_FAILED} -eq 1; then
echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD}
Expand Down
7 changes: 6 additions & 1 deletion test/statistic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,12 @@ TEST(StatisticTest, NullStatistic) {
NullStatistic stat;
EXPECT_EQ(0, stat.count());
stat.addValue(1);
EXPECT_EQ(0, stat.count());
EXPECT_EQ(0, stat.mean());
EXPECT_EQ(0, stat.pvariance());
EXPECT_EQ(0, stat.pstdev());
EXPECT_NE(nullptr, stat.combine(stat));
EXPECT_EQ(0, stat.significantDigits());
EXPECT_NE(nullptr, stat.createNewInstanceOfSameType());
}

} // namespace Nighthawk