diff --git a/LICENSE b/LICENSE index b2e6b61e974..bf7a24b6ca0 100644 --- a/LICENSE +++ b/LICENSE @@ -425,3 +425,129 @@ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +~~~ + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a dual license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2016 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + diff --git a/Makefile.am b/Makefile.am index f1a87478cf5..ac613006c61 100644 --- a/Makefile.am +++ b/Makefile.am @@ -57,8 +57,7 @@ doxygen: @cd doc && $(MAKE) $(AM_MAKEFLAGS) $@ changelog: - ./tools/changelog.pl $(VERSION) > CHANGELOG-$(VERSION) - -git add CHANGELOG-$(VERSION) && git commit -m "Adding CHANGELOG-$(VERSION)" + ./tools/changelog.pl apache trafficserver $(VERSION) > CHANGELOG-$(VERSION) asf-dist: asf-distdir tardir=$(distdir) && $(am__tar) --mtime=./configure.ac | bzip2 -9 -c >$(distdir).tar.bz2 @@ -102,6 +101,9 @@ install-data-hook: rat: java -jar $(top_srcdir)/ci/apache-rat-0.11-SNAPSHOT.jar -E $(top_srcdir)/ci/rat-regex.txt -d $(top_srcdir) +autopep8: + @autopep8 -i -r $(top_srcdir) + # # These are rules to make clang-format easy and fast to run. Run it with e.g. # make -j clang-format @@ -163,3 +165,4 @@ help: @echo 'rel-candidate recreate a signed relelease candidate source package and a signed git tag' @echo 'release recreate a signed release source package and a signed git tag' @echo 'tidy run clang-tidy in fix-it mode' + @echo 'autopep8 run autopep8 over python files' diff --git a/NOTICE b/NOTICE index 5feedac980d..7209189ae51 100644 --- a/NOTICE +++ b/NOTICE @@ -103,3 +103,9 @@ Copyright 2014 Google Inc. All Rights Reserved. plugins/experimental/memcache/protocol_binary.h developed by Sun Microsystems, Inc. Copyright (c) <2008>, Sun Microsystems, Inc. All rights reserved. + +~~ + +Reusable Gold Testing System +https://bitbucket.org/dragon512/reusable-gold-testing-system +Copyright (c) 2015-2016 Jason Kenny All Rights Reserved. diff --git a/build/crypto.m4 b/build/crypto.m4 index dc6fbd8967c..53817981772 100644 --- a/build/crypto.m4 +++ b/build/crypto.m4 @@ -22,7 +22,7 @@ dnl dnl TS_CHECK_CRYPTO: look for crypto libraries and headers dnl AC_DEFUN([TS_CHECK_CRYPTO], [ - AC_SEARCH_LIBS([crypt], [crypt], [AC_SUBST([LIBCRYPT],["-lcrypt"])]) + AC_CHECK_LIB([crypt], [crypt], [AC_SUBST([LIBCRYPT],["-lcrypt"])]) AX_CHECK_OPENSSL([ enable_crypto=yes diff --git a/build/lzma.m4 b/build/lzma.m4 index b951a6e1cd7..d16eba5500d 100644 --- a/build/lzma.m4 +++ b/build/lzma.m4 @@ -84,7 +84,7 @@ if test "$enable_lzma" != "no"; then TS_ADDTO(LDFLAGS, [-L${lzma_ldflags}]) TS_ADDTO_RPATH(${lzma_ldflags}) fi - AC_SEARCH_LIBS([lzma_code], [lzma], [lzma_have_libs=1]) + AC_CHECK_LIB([lzma], [lzma_code], [lzma_have_libs=1]) if test "$lzma_have_libs" != "0"; then AC_CHECK_HEADERS(lzma.h, [lzma_have_headers=1]) fi diff --git a/build/pcre.m4 b/build/pcre.m4 index 978e95bb1b2..bdc5961e008 100644 --- a/build/pcre.m4 +++ b/build/pcre.m4 @@ -95,7 +95,7 @@ if test "$enable_pcre" != "no"; then TS_ADDTO(LDFLAGS, [-L${pcre_ldflags}]) TS_ADDTO_RPATH(${pcre_ldflags}) fi - AC_SEARCH_LIBS([pcre_exec], [pcre], [pcre_have_libs=1]) + AC_CHECK_LIB([pcre], [pcre_exec], [pcre_have_libs=1]) if test "$pcre_have_libs" != "0"; then AC_CHECK_HEADERS(pcre.h, [pcre_have_headers=1]) AC_CHECK_HEADERS(pcre/pcre.h, [pcre_have_headers=1]) diff --git a/build/zlib.m4 b/build/zlib.m4 index d54afb1b472..aa60cb86966 100644 --- a/build/zlib.m4 +++ b/build/zlib.m4 @@ -84,7 +84,7 @@ if test "$enable_zlib" != "no"; then TS_ADDTO(LDFLAGS, [-L${zlib_ldflags}]) TS_ADDTO_RPATH(${zlib_ldflags}) fi - AC_SEARCH_LIBS([compressBound], [z], [zlib_have_libs=1]) + AC_CHECK_LIB([z], [compressBound], [zlib_have_libs=1]) if test "$zlib_have_libs" != "0"; then AC_CHECK_HEADERS(zlib.h, [zlib_have_headers=1]) fi diff --git a/ci/jenkins/bin/gh-mirror.sh b/ci/jenkins/bin/gh-mirror.sh index 50a0540272c..9bd47426bba 100755 --- a/ci/jenkins/bin/gh-mirror.sh +++ b/ci/jenkins/bin/gh-mirror.sh @@ -67,9 +67,9 @@ function checkBuild() { } # Save away previous ref-specs, you must save all branches -REF_4_2=$(getRef "4.2.x") REF_6_2=$(getRef "6.2.x") REF_7_0=$(getRef "7.0.x") +REF_7_1=$(getRef "7.1.x") REF_master=$(getRef "master") # Do the updates @@ -77,7 +77,7 @@ ${GIT} remote update --prune > /dev/null 2>&1 ${GIT} update-server-info # Check the branches, this makes assumptions that the Jenkins build are named after the branches -checkBuild "$REF_4_2" "4.2.x" checkBuild "$REF_6_2" "6.2.x" checkBuild "$REF_7_0" "7.0.x" +checkBuild "$REF_7_1" "7.1.x" checkBuild "$REF_master" "master" diff --git a/ci/rat-regex.txt b/ci/rat-regex.txt index 4755266888c..0bbb0d9313f 100644 --- a/ci/rat-regex.txt +++ b/ci/rat-regex.txt @@ -22,6 +22,7 @@ .*\.default$ .*\.default\.in$ .*\.config$ +.*\.gold$ ^\.gitignore$ ^\.gitmodules$ ^\.indent.pro$ diff --git a/ci/tsqa/tests/test_body_factory.py b/ci/tsqa/tests/test_body_factory.py index 2842a6f258e..5aebe1585c7 100644 --- a/ci/tsqa/tests/test_body_factory.py +++ b/ci/tsqa/tests/test_body_factory.py @@ -40,7 +40,8 @@ def setUpEnv(cls, env): cls.configs['remap.config'].add_line( 'map / http://www.linkedin.com/ @action=deny' ) - cls.body_factory_dir = os.path.join(cls.environment.layout.prefix, cls.configs['records.config']['CONFIG']['proxy.config.body_factory.template_sets_dir']) + cls.body_factory_dir = os.path.join(cls.environment.layout.prefix, + cls.configs['records.config']['CONFIG']['proxy.config.body_factory.template_sets_dir']) cls.domain_directory = ['www.linkedin.com', '127.0.0.1', 'www.foobar.net'] for directory_item in cls.domain_directory: current_dir = os.path.join(cls.body_factory_dir, directory_item) @@ -50,24 +51,24 @@ def setUpEnv(cls, env): pass fname = os.path.join(current_dir, "access#denied") with open(fname, "w") as f: - f.write(directory_item) + f.write(directory_item) fname = os.path.join(current_dir, ".body_factory_info") with open(fname, "w") as f: - pass + pass def test_domain_specific_body_factory(self): - times = 1000 - no_dir_domain = 'www.nodir.com' - self.domain_directory.append(no_dir_domain) - self.assertEqual(4, len(self.domain_directory)) - url = 'http://127.1.0.1:{0}'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - for i in xrange(times): - domain = random.choice(self.domain_directory) - headers = {'Host': domain} - r = requests.get(url, headers=headers) - domain_in_response = no_dir_domain - for domain_item in self.domain_directory: - if domain_item in r.text: - domain_in_response = domain_item - break - self.assertEqual(domain, domain_in_response) + times = 1000 + no_dir_domain = 'www.nodir.com' + self.domain_directory.append(no_dir_domain) + self.assertEqual(4, len(self.domain_directory)) + url = 'http://127.1.0.1:{0}'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + for i in xrange(times): + domain = random.choice(self.domain_directory) + headers = {'Host': domain} + r = requests.get(url, headers=headers) + domain_in_response = no_dir_domain + for domain_item in self.domain_directory: + if domain_item in r.text: + domain_in_response = domain_item + break + self.assertEqual(domain, domain_in_response) diff --git a/ci/tsqa/tests/test_connect_attempts.py b/ci/tsqa/tests/test_connect_attempts.py index 5bb41bfdd5f..d5387516d18 100644 --- a/ci/tsqa/tests/test_connect_attempts.py +++ b/ci/tsqa/tests/test_connect_attempts.py @@ -214,7 +214,8 @@ def test_listen_origin(self): def test_die_on_connect_origin(self): '''Verify that we get 504s from origins that die_on_connect''' - url = 'http://127.0.0.1:{0}/die_on_connect/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + url = 'http://127.0.0.1:{0}/die_on_connect/s'.format( + self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) ret = requests.get(url, timeout=2) self.assertEqual(ret.status_code, 504) @@ -225,19 +226,22 @@ def test_partial_response_origin(self): We want to bail out-- since the origin already got the request, we can't gaurantee that the request is re-entrant ''' - url = 'http://127.0.0.1:{0}/partial_response/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + url = 'http://127.0.0.1:{0}/partial_response/s'.format( + self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) ret = requests.get(url, timeout=2) self.assertEqual(ret.status_code, 500) def test_reset_after_accept_origin(self): '''Verify that we get 502s from origins that reset_after_accept, once any bytes are sent to origin we assume we cannot re-dispatch''' - url = 'http://127.0.0.1:{0}/reset_after_accept/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + url = 'http://127.0.0.1:{0}/reset_after_accept/s'.format( + self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) ret = requests.get(url, timeout=2) self.assertEqual(ret.status_code, 502) def test_slow_response(self): '''Verify that we get 5xx from origins that take longer than acceptable, since we will not retry them''' - url = 'http://127.0.0.1:{0}/slow_response/s'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + url = 'http://127.0.0.1:{0}/slow_response/s'.format( + self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) ret = requests.get(url, timeout=2) # make sure it worked self.assertEqual(ret.status_code, 504) diff --git a/ci/tsqa/tests/test_custom_log.py b/ci/tsqa/tests/test_custom_log.py index 7f32b524c67..12365b05d3a 100644 --- a/ci/tsqa/tests/test_custom_log.py +++ b/ci/tsqa/tests/test_custom_log.py @@ -46,21 +46,21 @@ def setUpEnv(cls, env): cls.configs['logging.config'].add_line('log.ascii(Format = "% %", Filename = "test_log_field"') def ip_to_hex(self, ipstr): - num_list = ipstr.split('.') - int_value = (int(num_list[0]) << 24) + (int(num_list[1]) << 16) + (int(num_list[2]) << 8) + (int(num_list[3])) - return hex(int_value).upper()[2:] + num_list = ipstr.split('.') + int_value = (int(num_list[0]) << 24) + (int(num_list[1]) << 16) + (int(num_list[2]) << 8) + (int(num_list[3])) + return hex(int_value).upper()[2:] def test_log_field(self): - random.seed() - times = 10 - for i in xrange(times): - request_ip = "127.%d.%d.%d" % (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)) - url = 'http://%s:%s' % (request_ip, self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) - requests.get(url) - # get the last line of the log file - time.sleep(10) - with open(self.log_file_path) as f: - for line in f: - pass - expected_line = "%s %s\n" % (request_ip, self.ip_to_hex(request_ip)) - self.assertEqual(line, expected_line) + random.seed() + times = 10 + for i in xrange(times): + request_ip = "127.%d.%d.%d" % (random.randint(1, 255), random.randint(1, 255), random.randint(1, 255)) + url = 'http://%s:%s' % (request_ip, self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']) + requests.get(url) + # get the last line of the log file + time.sleep(10) + with open(self.log_file_path) as f: + for line in f: + pass + expected_line = "%s %s\n" % (request_ip, self.ip_to_hex(request_ip)) + self.assertEqual(line, expected_line) diff --git a/ci/tsqa/tests/test_example.py b/ci/tsqa/tests/test_example.py index 3ba0cf04cdc..8460000425a 100644 --- a/ci/tsqa/tests/test_example.py +++ b/ci/tsqa/tests/test_example.py @@ -74,7 +74,8 @@ def test_something(self): you only need to excercise the code that you intend to test ''' # for example, you could send a request to ATS and check the response - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) + ret = requests.get( + 'http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) self.assertEqual(ret.status_code, 404) self.assertIn('ATS', ret.headers['server']) @@ -82,13 +83,15 @@ def test_something(self): class TestConfigureFlags(helpers.EnvironmentCase): feature_requirements = {'TS_HAS_WCCP': 0} + def test_wccp(self): self.assertTrue(True) class TestBootstrap(helpers.EnvironmentCase): def test_default_404(self): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) + ret = requests.get( + 'http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) self.assertEqual(ret.status_code, 404) self.assertIn('ATS', ret.headers['server']) @@ -129,7 +132,8 @@ def hello(request): def test_basic_intercept(self): for _ in xrange(0, 10): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) + ret = requests.get( + 'http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) self.assertEqual(ret.status_code, 200) @@ -151,7 +155,8 @@ def setUpEnv(cls, env): def test_logs_exist(self): # send some requests for x in xrange(0, 10): - ret = requests.get('http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) + ret = requests.get( + 'http://127.0.0.1:{0}/'.format(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) self.assertEqual(ret.status_code, 404) self.assertIn('ATS', ret.headers['server']) diff --git a/ci/tsqa/tests/test_header_rewrite.py b/ci/tsqa/tests/test_header_rewrite.py index ed8d3c3a681..6a220ff219b 100644 --- a/ci/tsqa/tests/test_header_rewrite.py +++ b/ci/tsqa/tests/test_header_rewrite.py @@ -29,6 +29,7 @@ log = logging.getLogger(__name__) + class EchoServerHandler(SocketServer.BaseRequestHandler): """ A subclass of RequestHandler which will return all data received back @@ -52,11 +53,12 @@ def handle(self): 'Content-Type: text/html; charset=UTF-8\r\n' 'Connection: keep-alive\r\n' '\r\n{data_string}'.format( - data_length = len(cookie), - data_string = cookie + data_length=len(cookie), + data_string=cookie )) self.request.sendall(resp) + class TestHeaderRewrite(helpers.EnvironmentCase): ''' Tests for header rewrite @@ -71,7 +73,7 @@ def setUpEnv(cls, env): cls.socket_server.ready.wait() cls.configs['remap.config'].add_line( - 'map / http://127.0.0.1:%d' %(cls.socket_server.port) + 'map / http://127.0.0.1:%d' % (cls.socket_server.port) ) # setup the plugin @@ -79,18 +81,18 @@ def setUpEnv(cls, env): cls.test_config_path = helpers.tests_file_path(cls.config_file) cls.configs['plugin.config'].add_line('%s/header_rewrite.so %s' % ( - cls.environment.layout.plugindir, - cls.test_config_path + cls.environment.layout.plugindir, + cls.test_config_path )) def test_cookie_rewrite(self): cookie_test_add_dict = { - '' : 'testkey=testaddvalue', - 'testkey=somevalue' : 'testkey=somevalue', - 'otherkey=testvalue' : 'otherkey=testvalue;testkey=testaddvalue', - 'testkey = "other=value"; a = a' : 'testkey = "other=value"; a = a', - 'testkeyx===' : 'testkeyx===;testkey=testaddvalue' + '': 'testkey=testaddvalue', + 'testkey=somevalue': 'testkey=somevalue', + 'otherkey=testvalue': 'otherkey=testvalue;testkey=testaddvalue', + 'testkey = "other=value"; a = a': 'testkey = "other=value"; a = a', + 'testkeyx===': 'testkeyx===;testkey=testaddvalue' } for key in cookie_test_add_dict: opener = urllib2.build_opener() @@ -100,12 +102,12 @@ def test_cookie_rewrite(self): self.assertEqual(resp, cookie_test_add_dict[key]) cookie_test_rm_dict = { - '' : '', - ' testkey=somevalue' : '', - 'otherkey=testvalue' : 'otherkey=testvalue', - 'testkey = "other=value" ; a = a' : ' a = a', - 'otherkey=othervalue= ; testkey===' : 'otherkey=othervalue= ', - 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'' : 'firstkey ="firstvalue" ; secondkey=\'\'' + '': '', + ' testkey=somevalue': '', + 'otherkey=testvalue': 'otherkey=testvalue', + 'testkey = "other=value" ; a = a': ' a = a', + 'otherkey=othervalue= ; testkey===': 'otherkey=othervalue= ', + 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'': 'firstkey ="firstvalue" ; secondkey=\'\'' } for key in cookie_test_rm_dict: opener = urllib2.build_opener() @@ -115,12 +117,12 @@ def test_cookie_rewrite(self): self.assertEqual(resp, cookie_test_rm_dict[key]) cookie_test_set_dict = { - '' : 'testkey=testsetvalue', - 'testkey=somevalue' : 'testkey=testsetvalue', - 'otherkey=testvalue' : 'otherkey=testvalue;testkey=testsetvalue', - 'testkey = "other=value"; a = a' : 'testkey = testsetvalue; a = a', - 'testkeyx===' : 'testkeyx===;testkey=testsetvalue', - 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'' : 'firstkey ="firstvalue" ; testkey = testsetvalue; secondkey=\'\'' + '': 'testkey=testsetvalue', + 'testkey=somevalue': 'testkey=testsetvalue', + 'otherkey=testvalue': 'otherkey=testvalue;testkey=testsetvalue', + 'testkey = "other=value"; a = a': 'testkey = testsetvalue; a = a', + 'testkeyx===': 'testkeyx===;testkey=testsetvalue', + 'firstkey ="firstvalue" ; testkey = =; secondkey=\'\'': 'firstkey ="firstvalue" ; testkey = testsetvalue; secondkey=\'\'' } for key in cookie_test_set_dict: opener = urllib2.build_opener() diff --git a/ci/tsqa/tests/test_headrequest.py b/ci/tsqa/tests/test_headrequest.py index e19312a0258..e3cd46a4dee 100644 --- a/ci/tsqa/tests/test_headrequest.py +++ b/ci/tsqa/tests/test_headrequest.py @@ -44,26 +44,26 @@ def handle(self): break if 'TE' in data: resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Transfer-Encoding: chunked\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) + 'Server: Apache-Coyote/1.1\r\n' + 'Transfer-Encoding: chunked\r\n' + 'Vary: Accept-Encoding\r\n' + '\r\n' + ) self.request.sendall(resp) elif 'CL' in data: resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Content-Length: 123\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) + 'Server: Apache-Coyote/1.1\r\n' + 'Content-Length: 123\r\n' + 'Vary: Accept-Encoding\r\n' + '\r\n' + ) self.request.sendall(resp) else: resp = ('HTTP/1.1 200 OK\r\n' - 'Server: Apache-Coyote/1.1\r\n' - 'Vary: Accept-Encoding\r\n' - '\r\n' - ) + 'Server: Apache-Coyote/1.1\r\n' + 'Vary: Accept-Encoding\r\n' + '\r\n' + ) self.request.sendall(resp) @@ -93,7 +93,7 @@ def test_head_request_without_timout(cls): conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.connect((cls.proxy_host, cls.proxy_port)) request_content = 'HEAD / HTTP/1.1\r\nConnection: close\r\nHost: 127.0.0.1\r\nContent-Length: %d\r\n\r\n%s' % ( - len(request_case), request_case) + len(request_case), request_case) conn.setblocking(1) conn.send(request_content) while 1: @@ -109,7 +109,7 @@ def test_head_request_without_timout(cls): conn.close() end_time = time.time() log.info("head request with case(%s) costs %f seconds while the timout is %f seconds." % ( - request_case, end_time - begin_time, cls.timeout)) + request_case, end_time - begin_time, cls.timeout)) cls.assertGreater(cls.timeout, end_time - begin_time) if request_case == 'CL': cls.assertIn('Content-Length', response_content) diff --git a/ci/tsqa/tests/test_hostdb.py b/ci/tsqa/tests/test_hostdb.py index 124a32e1c96..2eb76577a3c 100644 --- a/ci/tsqa/tests/test_hostdb.py +++ b/ci/tsqa/tests/test_hostdb.py @@ -47,6 +47,7 @@ def kill_dns(dns_server): class StubDNSResolver(object): '''Resolver to serve defined responses from `response_dict` or return SOA ''' + def __init__(self, responses): self.responses = responses self.resp_headers = {} @@ -270,7 +271,7 @@ def test_reload(self): class TestHostDB(helpers.EnvironmentCase, tsqa.test_cases.HTTPBinCase): @classmethod def setUpEnv(cls, env): - cls.dns_sock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) + cls.dns_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) cls.dns_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) cls.dns_sock.bind(('', 0)) # bind to all interfaces on an ephemeral port dns_port = cls.dns_sock.getsockname()[1] @@ -349,7 +350,6 @@ def test_dns(self): self.assertEqual(len(self._hostdb_entries()['www.huge.com']['rr_records']), NUM_RECORDS) - def test_basic(self): ''' Test basic fnctionality of resolver @@ -401,7 +401,7 @@ def test_serve_stail_for(self): # TODO: Fix this! # for whatever reason the failed DNS response is taking ~3.5s to timeout # even though the hostdb.lookup_timeout is set to 1 (meaning it should be ~1s) - #print end - end_working + # print end - end_working #self.assertTrue(end - start >= 2) @@ -441,10 +441,9 @@ def proxies(self): ret['http'] = dst return ret - @classmethod def setUpEnv(cls, env): - cls.dns_sock = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) + cls.dns_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) cls.dns_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) cls.dns_sock.bind(('', 0)) # bind to all interfaces on an ephemeral port dns_port = cls.dns_sock.getsockname()[1] @@ -494,7 +493,7 @@ def setUpEnv(cls, env): ss_dns_results.append(dnslib.server.RR( name, dnslib.dns.QTYPE.SRV, - rdata = dnslib.dns.SRV( + rdata=dnslib.dns.SRV( priority=10, weight=10, port=ss.port, @@ -574,7 +573,7 @@ def test_priority(self): NUM_REQUESTS = 10 orig_responses = self.responses['_http._tcp.www.foo.com.'] try: - self.responses['_http._tcp.www.foo.com.'][0].rdata.priority=1 + self.responses['_http._tcp.www.foo.com.'][0].rdata.priority = 1 request_distribution = {} for x in xrange(0, NUM_REQUESTS): @@ -609,7 +608,7 @@ def test_weight(self): NUM_REQUESTS = 100 orig_responses = self.responses['_http._tcp.www.foo.com.'] try: - self.responses['_http._tcp.www.foo.com.'][0].rdata.weight=100 + self.responses['_http._tcp.www.foo.com.'][0].rdata.weight = 100 request_distribution = {} for x in xrange(0, NUM_REQUESTS): diff --git a/ci/tsqa/tests/test_http2_spec.py b/ci/tsqa/tests/test_http2_spec.py index 915ca1a8180..c4be3370ebe 100644 --- a/ci/tsqa/tests/test_http2_spec.py +++ b/ci/tsqa/tests/test_http2_spec.py @@ -58,7 +58,7 @@ def setUpEnv(cls, env): # get path to h2spec cls.h2spec = which('h2spec') if cls.h2spec is None: - raise helpers.unittest.SkipTest('Cannot find h2spec. skipping test.') + raise helpers.unittest.SkipTest('Cannot find h2spec. skipping test.') # get HTTP/2 server ports cls.http2_port = tsqa.utils.bind_unused_port()[1] @@ -85,7 +85,7 @@ def __callH2Spec(self, section=None): ''' args = [self.h2spec, '-h', 'localhost', '-p', str(self.http2_port), '-t', '-k'] if section is not None: - args.extend(['-s', section]) + args.extend(['-s', section]) log.info('full args = {0}'.format(args)) p = subprocess.Popen( diff --git a/ci/tsqa/tests/test_https.py b/ci/tsqa/tests/test_https.py index 8e45d6ed76e..cddcc7b568d 100644 --- a/ci/tsqa/tests/test_https.py +++ b/ci/tsqa/tests/test_https.py @@ -231,7 +231,7 @@ class TestMix(helpers.EnvironmentCase, CertSelectionMixin): def setUpEnv(cls, env): # Temporarily skipping TestMix until we can figure out how to specify underlying open ssl versions # The behaviour of the intermediate cert chains depends on openssl version - raise helpers.unittest.SkipTest('Skip TestMix until we figure out openssl version tracking'); + raise helpers.unittest.SkipTest('Skip TestMix until we figure out openssl version tracking') # add an SSL port to ATS cls.ssl_port = tsqa.utils.bind_unused_port()[1] cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] += ' {0}:ssl'.format(cls.ssl_port) @@ -320,7 +320,7 @@ def test_config_file_group(self): cert = self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') with self.assertRaises(Exception): - self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) + self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) time.sleep(5) os.system('cp %s %s' % (helpers.tests_file_path('ec_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) log.info('cp %s %s' % (helpers.tests_file_path('ec_keys/www.test.com.pem'), helpers.tests_file_path('www.unknown.com.pem'))) @@ -329,17 +329,17 @@ def test_config_file_group(self): # waiting for the reconfiguration completed sec = 0 while True: - time.sleep(5) - sec += 5 - log.info("reloading: %d seconds" % (sec)) - self.assertLess(sec, 30) - try: - self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) - break - except: - continue + time.sleep(5) + sec += 5 + log.info("reloading: %d seconds" % (sec)) + self.assertLess(sec, 30) + try: + self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) + break + except: + continue cert = self._get_cert(addr, ciphers=CIPHER_MAP['ecdsa']) self.assertEqual(cert.get_subject().commonName.decode(), 'www.test.com') with self.assertRaises(Exception): - self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) - os.system('rm %s' %(helpers.tests_file_path('www.unknown.com.pem'))) + self._get_cert(addr, ciphers=CIPHER_MAP['rsa']) + os.system('rm %s' % (helpers.tests_file_path('www.unknown.com.pem'))) diff --git a/ci/tsqa/tests/test_keepalive.py b/ci/tsqa/tests/test_keepalive.py index 9a675078fca..47d5a7d7ffc 100644 --- a/ci/tsqa/tests/test_keepalive.py +++ b/ci/tsqa/tests/test_keepalive.py @@ -63,6 +63,7 @@ class KeepAliveInMixin(object): TODO: Allow protocol to be specified for ssl traffic """ + def _get_socket(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(('127.0.0.1', int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']))) @@ -80,7 +81,8 @@ def _aux_KA_working_path_connid(self, protocol, headers=None): if headers is None: headers = {} with requests.Session() as s: - url = '{0}://127.0.0.1:{1}/'.format(protocol, int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) + url = '{0}://127.0.0.1:{1}/'.format(protocol, + int(self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'])) conn_id = None for x in xrange(1, 10): ret = s.get(url, headers=headers) diff --git a/ci/tsqa/tests/test_origin_max_connections.py b/ci/tsqa/tests/test_origin_max_connections.py index c5bf41a3c37..7c0323a6dc6 100644 --- a/ci/tsqa/tests/test_origin_max_connections.py +++ b/ci/tsqa/tests/test_origin_max_connections.py @@ -94,11 +94,15 @@ def setUpEnv(cls, env): with open(noqueue_path, 'w') as fh: fh.write('CONFIG proxy.config.http.origin_max_connections_queue INT 0') - cls.configs['remap.config'].add_line('map /other/queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, queue_path)) - cls.configs['remap.config'].add_line('map /other/noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, noqueue_path)) + cls.configs['remap.config'].add_line( + 'map /other/queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, queue_path)) + cls.configs['remap.config'].add_line( + 'map /other/noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port2, noqueue_path)) cls.configs['remap.config'].add_line('map /other/ http://127.0.0.1:{0}'.format(cls.socket_server_port2)) - cls.configs['remap.config'].add_line('map /queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, queue_path)) - cls.configs['remap.config'].add_line('map /noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, noqueue_path)) + cls.configs['remap.config'].add_line( + 'map /queue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, queue_path)) + cls.configs['remap.config'].add_line( + 'map /noqueue/ http://127.0.0.1:{0} @plugin=conf_remap.so @pparam={1}'.format(cls.socket_server_port, noqueue_path)) cls.configs['remap.config'].add_line('map / http://127.0.0.1:{0}'.format(cls.socket_server_port)) cls.configs['records.config']['CONFIG'].update({ @@ -139,7 +143,6 @@ def _send_requests(self, total_requests, path='', other=False): return results, results2 - # TODO: enable after TS-4340 is merged # and re-enable `other` for the remaining queueing tests def tesst_origin_scoping(self): diff --git a/ci/tsqa/tests/test_tls_ticket_key_rotation.py b/ci/tsqa/tests/test_tls_ticket_key_rotation.py index 7af883a529e..07ffdbf28ae 100644 --- a/ci/tsqa/tests/test_tls_ticket_key_rotation.py +++ b/ci/tsqa/tests/test_tls_ticket_key_rotation.py @@ -60,7 +60,8 @@ def setUpEnv(cls, env): # configure SSL multicert - cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0} ssl_key_name={1} ticket_key_name={2}'.format(helpers.tests_file_path('rsa_keys/ca.crt'), helpers.tests_file_path('rsa_keys/ca.key'), helpers.tests_file_path('rsa_keys/ssl_ticket.key'))) + cls.configs['ssl_multicert.config'].add_line('dest_ip=* ssl_cert_name={0} ssl_key_name={1} ticket_key_name={2}'.format( + helpers.tests_file_path('rsa_keys/ca.crt'), helpers.tests_file_path('rsa_keys/ca.key'), helpers.tests_file_path('rsa_keys/ssl_ticket.key'))) def start_connection(self, addr): ''' diff --git a/cmd/Makefile.am b/cmd/Makefile.am index bd66e5278c8..00a63b4b6d5 100644 --- a/cmd/Makefile.am +++ b/cmd/Makefile.am @@ -24,6 +24,8 @@ SUBDIRS = \ traffic_top \ traffic_via +TESTS = $(check_PROGRAMS) + if BUILD_WCCP SUBDIRS += traffic_wccp diff --git a/cmd/traffic_cop/traffic_cop.cc b/cmd/traffic_cop/traffic_cop.cc index 9aaf88b099f..871c7f615f9 100644 --- a/cmd/traffic_cop/traffic_cop.cc +++ b/cmd/traffic_cop/traffic_cop.cc @@ -146,7 +146,7 @@ static const char localhost[] = "127.0.0.1"; static void cop_log(int priority, const char *format, ...) TS_PRINTFLIKE(2, 3); -static void get_admin_user(void); +static void get_admin_user(); struct ConfigValue { ConfigValue() : config_type(RECT_NULL), data_type(RECD_NULL) {} @@ -375,7 +375,7 @@ set_alarm_warn() } static void -process_syslog_config(void) +process_syslog_config() { int new_fac; @@ -421,7 +421,7 @@ safe_kill(const char *lockfile_name, const char *pname, bool group) // one 64bit int // static ink_hrtime -milliseconds(void) +milliseconds() { struct timeval now; @@ -935,7 +935,7 @@ open_socket(int port, const char *ip = nullptr, const char *ip_to_bind = nullptr } static int -test_port(int port, const char *request, char *buffer, int bufsize, int64_t test_timeout, const char *ip = NULL, +test_port(int port, const char *request, char *buffer, int bufsize, int64_t test_timeout, const char *ip = nullptr, const char *ip_to_bind = nullptr) { int64_t start_time, timeout; @@ -1520,11 +1520,11 @@ check_memory() if ((fp = fopen("/proc/meminfo", "r"))) { while (fgets(buf, sizeof buf, fp)) { if (strncmp(buf, "MemFree:", sizeof "MemFree:" - 1) == 0) - memfree = strtoll(buf + sizeof "MemFree:" - 1, 0, 10); + memfree = strtoll(buf + sizeof "MemFree:" - 1, nullptr, 10); else if (strncmp(buf, "SwapFree:", sizeof "SwapFree:" - 1) == 0) - swapfree = strtoll(buf + sizeof "SwapFree:" - 1, 0, 10); + swapfree = strtoll(buf + sizeof "SwapFree:" - 1, nullptr, 10); else if (strncmp(buf, "SwapTotal:", sizeof "SwapTotal:" - 1) == 0) - swapsize = strtoll(buf + sizeof "SwapTotal:" - 1, 0, 10); + swapsize = strtoll(buf + sizeof "SwapTotal:" - 1, nullptr, 10); } fclose(fp); // simple heuristic for linux diff --git a/cmd/traffic_ctl/alarm.cc b/cmd/traffic_ctl/alarm.cc index 5b014a4f26e..d3229f386ac 100644 --- a/cmd/traffic_ctl/alarm.cc +++ b/cmd/traffic_ctl/alarm.cc @@ -24,7 +24,7 @@ #include "traffic_ctl.h" struct AlarmListPolicy { - typedef char *entry_type; + using entry_type = char *; static void free(entry_type e) @@ -39,7 +39,7 @@ struct AlarmListPolicy { } }; -typedef CtrlMgmtList CtrlAlarmList; +using CtrlAlarmList = CtrlMgmtList; static int alarm_list(unsigned argc, const char **argv) diff --git a/cmd/traffic_ctl/config.cc b/cmd/traffic_ctl/config.cc index 5a15946c252..5a3c9eb3a47 100644 --- a/cmd/traffic_ctl/config.cc +++ b/cmd/traffic_ctl/config.cc @@ -22,12 +22,12 @@ */ #include "traffic_ctl.h" -#include +#include #include #include struct RecordDescriptionPolicy { - typedef TSConfigRecordDescription *entry_type; + using entry_type = TSConfigRecordDescription *; static void free(entry_type e) diff --git a/cmd/traffic_layout/traffic_layout.cc b/cmd/traffic_layout/traffic_layout.cc index f10fc17b924..cce710bc572 100644 --- a/cmd/traffic_layout/traffic_layout.cc +++ b/cmd/traffic_layout/traffic_layout.cc @@ -61,7 +61,7 @@ static void print_feature(const char *name, const char *value, bool json, bool last = false) { if (json) { - printf(" \"%s\": \"%s\"%s", name, value, last ? "\n" : ",\n"); + printf(R"( "%s": "%s"%s)", name, value, last ? "\n" : ",\n"); } else { printf("#define %s \"%s\"\n", name, value); } @@ -126,7 +126,7 @@ static void print_var(const char *name, char *value, bool json, bool free = true, bool last = false) { if (json) { - printf(" \"%s\": \"%s\"%s", name, value, last ? "\n" : ",\n"); + printf(R"( "%s": "%s"%s)", name, value, last ? "\n" : ",\n"); } else { printf("%s: %s\n", name, value); } diff --git a/cmd/traffic_manager/Makefile.am b/cmd/traffic_manager/Makefile.am index 3c84b16c102..d3c8ced9446 100644 --- a/cmd/traffic_manager/Makefile.am +++ b/cmd/traffic_manager/Makefile.am @@ -16,6 +16,9 @@ # limitations under the License. bin_PROGRAMS = traffic_manager +check_PROGRAMS = test_metrics + +TESTS = $(check_PROGRAMS) AM_CPPFLAGS = \ $(LUAJIT_CPPFLAGS) \ @@ -77,6 +80,16 @@ traffic_manager_LDADD += \ @OPENSSL_LIBS@ endif +test_metrics_SOURCES = test_metrics.cc metrics.cc WebOverview.cc +test_metrics_LDADD = \ + $(top_builddir)/mgmt/libmgmt_lm.la \ + $(top_builddir)/lib/records/librecords_lm.a \ + $(top_builddir)/lib/bindings/libbindings.la \ + $(top_builddir)/lib/luajit/src/libluajit.a \ + $(top_builddir)/lib/ts/libtsutil.la \ + $(top_builddir)/iocore/eventsystem/libinkevent.a \ + @LIBTCL@ @LIBPCRE@ + include $(top_srcdir)/build/tidy.mk tidy-local: $(DIST_SOURCES) diff --git a/cmd/traffic_manager/MgmtHandlers.cc b/cmd/traffic_manager/MgmtHandlers.cc index f1a198c868a..dafef1ed50b 100644 --- a/cmd/traffic_manager/MgmtHandlers.cc +++ b/cmd/traffic_manager/MgmtHandlers.cc @@ -252,7 +252,7 @@ mgmt_synthetic_main(void *) lmgmt->alarm_keeper->signalAlarm(MGMT_ALARM_WEB_ERROR, "Healthcheck service failed to initialize"); } - while (1) { + while (true) { struct sockaddr_in clientInfo; // Info about client connection socklen_t addrLen = sizeof(clientInfo); diff --git a/cmd/traffic_manager/metrics.cc b/cmd/traffic_manager/metrics.cc index 7186cee6bc5..88a296f1977 100644 --- a/cmd/traffic_manager/metrics.cc +++ b/cmd/traffic_manager/metrics.cc @@ -21,6 +21,8 @@ * limitations under the License. */ +#include + #include "ts/ink_config.h" #include "ts/ink_memory.h" #include "ts/Ptr.h" @@ -35,12 +37,10 @@ #include "metrics.h" struct Evaluator { - Evaluator() : rec_name(nullptr), data_type(RECD_NULL), ref(-1) {} - ~Evaluator() - { - ats_free(this->rec_name); - ink_release_assert(this->ref == -1); - } + Evaluator() : rec_name(nullptr), data_type(RECD_NULL), ref(LUA_NOREF) {} + ~Evaluator() { ink_release_assert(this->ref == LUA_NOREF); } + Evaluator(const Evaluator &) = delete; + Evaluator &operator=(const Evaluator &) = delete; bool bind(lua_State *L, const char *metric, const char *expression) @@ -68,6 +68,20 @@ struct Evaluator { return true; } + void + unbind(lua_State *L) + { + if (this->ref != LUA_NOREF) { + luaL_unref(L, LUA_REGISTRYINDEX, this->ref); + } + + ats_free(this->rec_name); + + this->ref = LUA_NOREF; + this->rec_name = nullptr; + this->data_type = RECD_NULL; + } + void eval(lua_State *L) const { @@ -102,7 +116,7 @@ struct Evaluator { case RECD_FLOAT: // Lua will eval 0/0 to NaN rather than 0. rec_value.rec_float = lua_tonumber(L, -1); - if (isnan(rec_value.rec_float)) { + if (std::isnan(rec_value.rec_float)) { rec_value.rec_float = 0.0; } break; @@ -134,12 +148,23 @@ struct EvaluatorList { } } + EvaluatorList(const EvaluatorList &) = delete; + EvaluatorList &operator=(const EvaluatorList &) = delete; + void push_back(Evaluator *e) { evaluators.push_back(e); } + void + unbind(lua_State *L) const + { + forv_Vec (Evaluator, e, this->evaluators) { + e->unbind(L); + } + } + void evaluate(lua_State *L) const { @@ -323,9 +348,6 @@ metrics_cluster_sum(lua_State *L) bool metrics_binding_initialize(BindingInstance &binding) { - ats_scoped_str sysconfdir(RecConfigReadConfigDir()); - ats_scoped_str config(Layout::get()->relative_to(sysconfdir, "metrics.config")); - if (!binding.construct()) { mgmt_fatal(0, "failed to initialize Lua runtime\n"); } @@ -346,12 +368,7 @@ metrics_binding_initialize(BindingInstance &binding) // Stash a backpointer to the evaluators. binding.attach_ptr("evaluators", new EvaluatorList()); - // Finally, execute the config file. - if (binding.require(config.get())) { - return true; - } - - return false; + return true; } void @@ -361,9 +378,20 @@ metrics_binding_destroy(BindingInstance &binding) evaluators = (EvaluatorList *)binding.retrieve_ptr("evaluators"); binding.attach_ptr("evaluators", nullptr); + + evaluators->unbind(binding.lua); delete evaluators; } +bool +metrics_binding_configure(BindingInstance &binding) +{ + ats_scoped_str sysconfdir(RecConfigReadConfigDir()); + ats_scoped_str config(Layout::get()->relative_to(sysconfdir, "metrics.config")); + + return binding.require(config.get()); +} + void metrics_binding_evaluate(BindingInstance &binding) { diff --git a/cmd/traffic_manager/metrics.h b/cmd/traffic_manager/metrics.h index 4bc97ce88e9..8a415511e09 100644 --- a/cmd/traffic_manager/metrics.h +++ b/cmd/traffic_manager/metrics.h @@ -24,8 +24,16 @@ #ifndef METRICS_H_D289E71B_AAC5_4CF3_9954_D54EDED60D1B #define METRICS_H_D289E71B_AAC5_4CF3_9954_D54EDED60D1B +#include "bindings/bindings.h" +#include "bindings/metrics.h" + bool metrics_binding_initialize(BindingInstance &binding); void metrics_binding_destroy(BindingInstance &binding); + +// Configure metrics from the metrics.config configuration file. +bool metrics_binding_configure(BindingInstance &binding); + +// Evaluate the metrics in this binding instance. void metrics_binding_evaluate(BindingInstance &binding); #endif /* METRICS_H_D289E71B_AAC5_4CF3_9954_D54EDED60D1B */ diff --git a/cmd/traffic_manager/test_metrics.cc b/cmd/traffic_manager/test_metrics.cc new file mode 100644 index 00000000000..52ca96e63a0 --- /dev/null +++ b/cmd/traffic_manager/test_metrics.cc @@ -0,0 +1,78 @@ +/* + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "ts/Regression.h" +#include "ts/TestBox.h" +#include "ts/I_Layout.h" +#include "LocalManager.h" +#include "RecordsConfig.h" +#include "P_RecLocal.h" +#include "metrics.h" + +LocalManager *lmgmt = nullptr; + +// Check that we can load and delete metrics. +REGRESSION_TEST(LoadMetrics)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus) +{ + TestBox box(t, pstatus); + + box = REGRESSION_TEST_PASSED; + + BindingInstance binding; + box.check(metrics_binding_initialize(binding), "initialize metrics"); + metrics_binding_destroy(binding); +} + +// Check that we can set a value. +REGRESSION_TEST(EvalMetrics)(RegressionTest *t, int /* atype ATS_UNUSED */, int *pstatus) +{ + TestBox box(t, pstatus); + + box = REGRESSION_TEST_PASSED; + + const char *config = R"( +integer 'proxy.node.test.value' [[ + return 5 +]] + )"; + + BindingInstance binding; + + box.check(metrics_binding_initialize(binding), "initialize metrics"); + box.check(binding.eval(config), "load metrics config"); + + metrics_binding_evaluate(binding); + + RecInt value = 0; + box.check(RecGetRecordInt("proxy.node.test.value", &value) == REC_ERR_OKAY, "read value (5) from proxy.node.test.value"); + box.check(value == 5, "proxy.node.test.value was %" PRId64 ", wanted 5", value); + + metrics_binding_destroy(binding); +} + +int +main(int argc, const char **argv) +{ + Layout::create(); + RecLocalInit(); + LibRecordsConfigInit(); + return RegressionTest::main(argc, argv, REGRESSION_TEST_QUICK); +} diff --git a/cmd/traffic_manager/traffic_manager.cc b/cmd/traffic_manager/traffic_manager.cc index ce33925d0ae..d547ca15178 100644 --- a/cmd/traffic_manager/traffic_manager.cc +++ b/cmd/traffic_manager/traffic_manager.cc @@ -53,9 +53,6 @@ #include "P_RecLocal.h" -#include "bindings/bindings.h" -#include "bindings/metrics.h" - #include "metrics.h" #if TS_USE_POSIX_CAP @@ -724,6 +721,7 @@ main(int argc, const char **argv) binding = new BindingInstance; metrics_binding_initialize(*binding); + metrics_binding_configure(*binding); int sleep_time = 0; // sleep_time given in sec @@ -737,6 +735,8 @@ main(int argc, const char **argv) binding = new BindingInstance; metrics_binding_initialize(*binding); + metrics_binding_configure(*binding); + binding_version = metrics_version; } @@ -1021,6 +1021,8 @@ fileUpdated(char *fname, bool incVersion) mgmt_log("[fileUpdated] metrics.config file has been modified\n"); } else if (strcmp(fname, "congestion.config") == 0) { lmgmt->signalFileChange("proxy.config.http.congestion_control.filename"); + } else if (strcmp(fname, "proxy.config.ssl.server.ticket_key.filename") == 0) { + lmgmt->signalFileChange("proxy.config.ssl.server.ticket_key.filename"); } else { mgmt_log("[fileUpdated] Unknown config file updated '%s'\n", fname); } diff --git a/cmd/traffic_top/Makefile.am b/cmd/traffic_top/Makefile.am index 3e723163402..4b8132d836d 100644 --- a/cmd/traffic_top/Makefile.am +++ b/cmd/traffic_top/Makefile.am @@ -38,7 +38,8 @@ traffic_top_LDADD = \ $(top_builddir)/mgmt/api/libtsmgmt.la \ $(top_builddir)/lib/ts/libtsutil.la \ @CURL_LIBS@ \ - @CURSES_LIBS@ + @CURSES_LIBS@ \ + @LIBTCL@ @HWLOC_LIBS@ endif diff --git a/cmd/traffic_top/stats.h b/cmd/traffic_top/stats.h index f1877c6d58c..ffb1c1e21bd 100644 --- a/cmd/traffic_top/stats.h +++ b/cmd/traffic_top/stats.h @@ -20,14 +20,16 @@ See the License for the specific language governing permissions and limitations under the License. */ - +#if HAS_CURL #include +#endif #include #include #include #include #include #include +#include #include "mgmtapi.h" using namespace std; @@ -44,7 +46,9 @@ struct LookupItem { int type; }; extern size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream); +#if HAS_CURL extern char curl_error[CURL_ERROR_SIZE]; +#endif extern string response; namespace constant @@ -289,6 +293,7 @@ class Stats _now = now; _time_diff = _now - _old_time; } else { +#if HAS_CURL CURL *curl; CURLcode res; @@ -328,6 +333,7 @@ class Stats /* always cleanup */ curl_easy_cleanup(curl); } +#endif } } diff --git a/cmd/traffic_top/traffic_top.cc b/cmd/traffic_top/traffic_top.cc index 34a5fb1c66e..32441ae3df3 100644 --- a/cmd/traffic_top/traffic_top.cc +++ b/cmd/traffic_top/traffic_top.cc @@ -25,10 +25,10 @@ #include #include #include -#include +#include #include -#include -#include +#include +#include #include #include @@ -54,7 +54,9 @@ #include "stats.h" using namespace std; +#if HAS_CURL char curl_error[CURL_ERROR_SIZE]; +#endif string response; namespace colorPair @@ -128,12 +130,12 @@ makeTable(const int x, const int y, const list &items, Stats &stats) { int my_y = y; - for (list::const_iterator it = items.begin(); it != items.end(); ++it) { + for (const auto &item : items) { string prettyName; double value = 0; int type; - stats.getStat(*it, value, prettyName, type); + stats.getStat(item, value, prettyName, type); mvprintw(my_y, x, prettyName.c_str()); prettyPrint(x + 10, my_y++, value, type); } @@ -219,7 +221,7 @@ help(const string &host, const string &version) { timeout(1000); - while (1) { + while (true) { clear(); time_t now = time(nullptr); struct tm *nowtm = localtime(&now); @@ -263,7 +265,11 @@ help(const string &host, const string &version) static void usage() { +#if HAS_CURL fprintf(stderr, "Usage: traffic_top [-s seconds] [URL|hostname|hostname:port]\n"); +#else + fprintf(stderr, "Usage: traffic_top [-s seconds]\n"); +#endif exit(1); } @@ -400,9 +406,17 @@ main(int argc, char **argv) } string url = ""; +#if HAS_CURL if (optind >= argc) { +#else + if (1) { +#endif if (TS_ERR_OKAY != TSInit(nullptr, static_cast(TS_MGMT_OPT_NO_EVENTS | TS_MGMT_OPT_NO_SOCK_TESTS))) { +#if HAS_CURL fprintf(stderr, "Error: missing URL on command line or error connecting to the local manager\n"); +#else + fprintf(stderr, "Error: error connecting to the local manager\n"); +#endif usage(); } } else { @@ -433,7 +447,7 @@ main(int argc, char **argv) Page page = MAIN_PAGE; string page_alt = "(r)esponse"; - while (1) { + while (true) { attron(COLOR_PAIR(colorPair::border)); attron(A_BOLD); diff --git a/cmd/traffic_via/Makefile.am b/cmd/traffic_via/Makefile.am index d9c8e94a752..36393590a5b 100644 --- a/cmd/traffic_via/Makefile.am +++ b/cmd/traffic_via/Makefile.am @@ -34,6 +34,7 @@ traffic_via_SOURCES = \ traffic_via_LDADD = \ $(top_builddir)/lib/ts/libtsutil.la \ + @LIBPCRE@ \ @LIBTCL@ TESTS = \ diff --git a/cmd/traffic_via/traffic_via.cc b/cmd/traffic_via/traffic_via.cc index b0a5d0eeb32..4a82b73f7b4 100644 --- a/cmd/traffic_via/traffic_via.cc +++ b/cmd/traffic_via/traffic_via.cc @@ -27,8 +27,8 @@ #include "ts/Tokenizer.h" #include "ts/TextBuffer.h" #include "mgmtapi.h" -#include -#include +#include +#include #include "ts/Regex.h" /// XXX Use DFA or Regex wrappers? @@ -269,7 +269,7 @@ filterViaHeader() int pcreExecCode; int i; const char *viaPattern = - "\\[([ucsfpe]+[^\\]]+)\\]"; // Regex to match via header with in [] which can start with character class ucsfpe + R"(\[([ucsfpe]+[^\]]+)\])"; // Regex to match via header with in [] which can start with character class ucsfpe char *viaHeaderString; char viaHeader[1024]; diff --git a/config.layout b/config.layout index 7ecd064721d..415f89b1d96 100644 --- a/config.layout +++ b/config.layout @@ -184,7 +184,7 @@ bindir: ${exec_prefix}/bin sbindir: ${exec_prefix}/sbin libdir: ${exec_prefix}/lib+ - libexecdir: ${exec_prefix}/lib/trafficserver/modules + libexecdir: ${libdir}/modules infodir: ${prefix}/share/info mandir: ${prefix}/share/man sysconfdir: /etc+ @@ -224,11 +224,11 @@ prefix: /usr/local exec_prefix: ${prefix} bindir: ${exec_prefix}/bin - sbindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin libdir: ${exec_prefix}/lib libexecdir: ${exec_prefix}/libexec+ infodir: ${prefix}/info - mandir: ${prefix}/share/man + mandir: ${prefix}/man sysconfdir: /etc+ datadir: ${prefix}/share+ docdir: ${prefix}/share/doc+ diff --git a/configure.ac b/configure.ac index e7b29edb5ce..f6adcbaaa05 100644 --- a/configure.ac +++ b/configure.ac @@ -167,6 +167,14 @@ AC_ARG_ENABLE([debug], ) AC_MSG_RESULT([$enable_debug]) +AC_MSG_CHECKING([whether to enable mime sanity check]) +AC_ARG_ENABLE([mime-sanity-check], + [AS_HELP_STRING([--enable-mime-sanity-check],[turn on mime sanity check])], + [], + [enable_mime_sanity_check=no] +) +AC_MSG_RESULT([$enable_mime_sanity_check]) + # Enable code coverage instrumentation only if requested by the user. AC_MSG_CHECKING([whether to code coverage]) AC_ARG_ENABLE([coverage], @@ -220,6 +228,19 @@ AC_ARG_ENABLE([fast-sdk], AC_MSG_RESULT([$enable_fast_sdk]) TS_ARG_ENABLE_VAR([use], [fast-sdk]) +# Curl support for traffic_top +AC_MSG_CHECKING([whether to enable CURL]) +AC_ARG_ENABLE([curl], + [AS_HELP_STRING([--disable-curl],[turn off CURL support for traffic_top])], + [], + [enable_curl=yes] +) +AC_MSG_RESULT([$enable_curl]) + +if test "x${enable_curl}" = "xyes"; then +AX_LIB_CURL([7.19], [AC_DEFINE([HAS_CURL], [1], [Define if libcurl >= 7.19.])]) +fi + # # Diags # @@ -423,6 +444,12 @@ AC_ARG_WITH([max-threads-per-type], ) AC_SUBST(max_threads_per_type) +# Check Brotli +AC_CHECK_HEADERS([brotli/encode.h], [has_brotli=1],[has_brotli=0]) +AC_CHECK_LIB([brotlienc],[BrotliEncoderCreateInstance],[AC_SUBST([LIB_BROTLIENC],["-lbrotlienc"])],[has_brotli=0]) +AC_SUBST(has_brotli) +AM_CONDITIONAL([HAS_BROTLI], [ test "x${has_brotli}" = "x1" ]) + # # Experimental plugins # @@ -770,24 +797,10 @@ AS_IF([test "x${has_optimizer_flags}" = "xno"], case $host_os_def in linux) AS_IF([test "x$ax_cv_c_compiler_vendor" = "xintel"], [ - # -Wall goes crazy, so turned these specific checks off for now: + # -Wall is overzealous for us, so need to turn this off for now: # - # 111 is "statement is unreachable" - # 279 is "controlling expression is constant", triggered by our asserts - # 383 is "value copied to temporary, reference to temporary used" - # 444 is "destructor for base class is not virtual" - # 522 is "function "xyz" redeclared "inline" after being called - # 873 is "has no corresponding operator delete". ToDo: we should fix. - # 981 is "operands are evaluated in unspecified order" - # 1418 is "external function definition with no prior declaration" - # 1419 is "external declaration in primary source file" - # 1572 is "floating-point equality and inequality comparisons are unreliable" - # 1720 is "operator new" has no corresponding member operator delete" - # 2256 is "non-pointer conversion from "int" to "unsigned char" " - # 2259 is "non-pointer conversion from "int" to "unsigned char" " - # - # TODO: We should try to eliminate more of these -wd exclusions. - common_opt="-pipe -Wall -wd111 -wd279 -wd383 -wd522 -wd444 -wd873 -wd981 -wd1418 -wd1419 -wd1572 -wd1720 -wd2256 -wd2259" + # #873 is "has no corresponding operator delete" + common_opt="-pipe -Wall -wd873" debug_opt="-ggdb3 $common_opt" release_opt="-g $common_opt $optimization_flags -axsse4.2 -fno-strict-aliasing" cxx_opt="-Wno-invalid-offsetof" @@ -798,7 +811,7 @@ case $host_os_def in debug_opt="-ggdb3 $common_opt -Qunused-arguments" release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing -Qunused-arguments" cxx_opt="-Wno-invalid-offsetof" - luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare -analyzer-disable-all-checks" + luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare -analyzer-disable-all-checks -Wno-varargs" ]) AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ @@ -820,7 +833,7 @@ case $host_os_def in debug_opt="-g $common_opt" release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing" cxx_opt="-Wno-invalid-offsetof" - luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare" + luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare -Wno-varargs" ], [ AC_MSG_WARN([clang is the only supported compiler on Darwin]) ]) @@ -841,7 +854,7 @@ case $host_os_def in debug_opt="-ggdb3 $common_opt" release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing" cxx_opt="-Wno-invalid-offsetof" - luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare" + luajit_cflags="-Wno-parentheses-equality -Wno-tautological-compare -Wno-varargs" ]) AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ @@ -928,6 +941,9 @@ if test "x${enable_debug}" = "xyes"; then TS_ADDTO(CFLAGS, [${cc_oflag_dbg}]) TS_ADDTO(CXXFLAGS, [${cxx_oflag_dbg}]) TS_ADDTO(CPPFLAGS, [-DDEBUG -D_DEBUG]) + if test "x${enable_mime_sanity_check}" = "xyes"; then + TS_ADDTO(CPPFLAGS, [-DENABLE_MIME_SANITY_CHECK]) + fi else TS_ADDTO(CFLAGS, [${cc_oflag_opt}]) TS_ADDTO(CXXFLAGS, [${cxx_oflag_opt}]) @@ -1091,13 +1107,6 @@ TS_REMOVEFROM(CFLAGS, -Werror) AX_WITH_CURSES CFLAGS="$__saved_CFLAGS" -AX_LIB_CURL([7.19], [ - AM_CONDITIONAL([BUILD_TRAFFIC_TOP], [test "x$ax_cv_curses" = "xyes"]) - ], [ - AM_CONDITIONAL([BUILD_TRAFFIC_TOP], [false]) - ] -) - # # Check for SSL presence and usability TS_CHECK_CRYPTO @@ -1128,6 +1137,39 @@ TS_CHECK_CRYPTO_SET_RBIO # Check for DH_get_2048_256 TS_CHECK_CRYPTO_DH_GET_2048_256 +saved_LIBS="$LIBS" +TS_ADDTO([LIBS], ["$OPENSSL_LIBS"]) + +AC_CHECK_FUNCS([ \ + BIO_meth_new \ + BIO_sock_non_fatal_error \ + CRYPTO_set_mem_functions \ + HMAC_CTX_new \ + X509_get0_signature \ +]) + +AC_CHECK_FUNC([BIO_set_data], [], + [AC_DEFINE([BIO_set_data(a, _ptr)], [((a)->ptr = (_ptr))], [Added in OpenSSL 1.1])]) +AC_CHECK_FUNC([BIO_get_data], [], + [AC_DEFINE([BIO_get_data(a)], [((a)->ptr)], [Added in OpenSSL 1.1])]) +AC_CHECK_FUNC([BIO_get_shutdown], [], + [AC_DEFINE([BIO_get_shutdown(a)], [((a)->shutdown)], [Added in OpenSSL 1.1])]) +AC_CHECK_FUNC([BIO_meth_get_ctrl], [], + [AC_DEFINE([BIO_meth_get_ctrl(biom)], [((biom)->ctrl)], [Added in OpenSSL 1.1])]) +AC_CHECK_FUNC([BIO_meth_get_create], [], + [AC_DEFINE([BIO_meth_get_create(biom)], [((biom)->create)], [Added in OpenSSL 1.1])]) +AC_CHECK_FUNC([BIO_meth_get_destroy], [], + [AC_DEFINE([BIO_meth_get_destroy(biom)], [((biom)->destroy)], [Added in OpenSSL 1.1])]) + +AC_CHECK_FUNC([EVP_MD_CTX_new], [], + [AC_DEFINE([EVP_MD_CTX_new], [EVP_MD_CTX_create], [Renamed in OpenSSL 1.1])]) +AC_CHECK_FUNC([EVP_MD_CTX_reset], [], + [AC_DEFINE([EVP_MD_CTX_reset], [EVP_MD_CTX_cleanup], [Renamed in OpenSSL 1.1])]) +AC_CHECK_FUNC([EVP_MD_CTX_free], [], + [AC_DEFINE([EVP_MD_CTX_free], [EVP_MD_CTX_destroy], [Renamed in OpenSSL 1.1])]) + +LIBS="$saved_LIBS" + # # Check for zlib presence and usability TS_CHECK_ZLIB @@ -1270,7 +1312,7 @@ AC_SUBST(use_port) # Profiler support has_profiler=0 if test "x${with_profiler}" = "xyes"; then - AC_SEARCH_LIBS([ProfilerStart], [profiler], + AC_CHECK_LIB([profiler], [ProfilerStart], [AC_SUBST([LIBPROFILER], ["-lprofiler"]) has_profiler=1 ], @@ -1298,7 +1340,7 @@ TS_TRY_COMPILE_NO_WARNING([],[ has_128bit_cas=1 ], [ dnl If 128bit CAS fails, try again with the -mcx16 option. GCC needs this; - dnl clang doesn't; icc is unknown but presumed sane. + dnl clang doesn't; icc does not support -mcx16 (but gives a non-fatal warning). TS_ADDTO(CXXFLAGS, [-mcx16]) TS_ADDTO(CFLAGS, [-mcx16]) TS_TRY_COMPILE_NO_WARNING([],[ @@ -1318,15 +1360,17 @@ AC_LANG_POP AC_SUBST(has_128bit_cas) AS_IF([test "x$has_128bit_cas" = "x1"], [ - TS_ADDTO(CFLAGS, [-mcx16]) - TS_ADDTO(CXXFLAGS, [-mcx16]) + AS_IF([test "x$ax_cv_c_compiler_vendor" != "xintel"], [ + TS_ADDTO(CFLAGS, [-mcx16]) + TS_ADDTO(CXXFLAGS, [-mcx16]) + ]) ]) # Check for POSIX capabilities library. # If we don't find it, disable checking for header. use_posix_cap=0 AS_IF([test "x$enable_posix_cap" != "xno"], - AC_SEARCH_LIBS([cap_set_proc],[cap], + AC_CHECK_LIB([cap], [cap_set_proc], [AC_SUBST([LIBCAP], ["-lcap"]) use_posix_cap=1 ],[ @@ -1448,6 +1492,9 @@ AC_SUBST([LUAJIT_CPPFLAGS], ['-I$(abs_top_srcdir)/lib/luajit/src']) # We should be able to build http_load if epoll(2) is available. AM_CONDITIONAL([BUILD_HTTP_LOAD], [test x"$ac_cv_func_epoll_ctl" = x"yes"]) +# We should only build traffic_top if we have curses +AM_CONDITIONAL([BUILD_TRAFFIC_TOP], [test "x$ax_cv_curses" = "xyes"]) + AC_CHECK_HEADERS([mysql/mysql.h], [has_mysql=1],[has_mysql=0]) AC_CHECK_LIB([mysqlclient],[mysql_info],[AC_SUBST([LIB_MYSQLCLIENT],["-lmysqlclient"])],[has_mysql=0]) AC_SUBST(has_mysql) @@ -1506,7 +1553,6 @@ AC_CHECK_HEADERS([sys/types.h \ stropts.h \ sys/param.h \ sys/sysmacros.h \ - math.h \ stdint.h \ stdbool.h \ sysexits.h \ diff --git a/contrib/python/compare_RecordsConfigcc.py b/contrib/python/compare_RecordsConfigcc.py index ae7794d5ee3..9e3c303f133 100644 --- a/contrib/python/compare_RecordsConfigcc.py +++ b/contrib/python/compare_RecordsConfigcc.py @@ -43,41 +43,41 @@ "proxy.config.proxy_name": 1, "proxy.config.cluster.ethernet_interface": 1, "proxy.config.ssl.client.private_key.path": 1, - "proxy.config.net.defer_accept": 1 # Specified in RecordsConfig.cc funny + "proxy.config.net.defer_accept": 1 # Specified in RecordsConfig.cc funny } rc_cc = {} # RecordsConfig.cc values rc_in = {} # records.config.in values -rc_doc = {} # documented values +rc_doc = {} # documented values # Process RecordsConfig.cc with open("%s/mgmt/RecordsConfig.cc" % src_dir) as fh: - cc_re = re.compile(r'\{RECT_(?:CONFIG|LOCAL), "([^"]+)", RECD_([A-Z]+), (.+?), ') - for line in fh: - m = cc_re.search(line) - if m: - value = m.group(3) - value = string.lstrip(value, '"') - value = string.rstrip(value, '"') - rc_cc[m.group(1)] = (m.group(2), value) + cc_re = re.compile(r'\{RECT_(?:CONFIG|LOCAL), "([^"]+)", RECD_([A-Z]+), (.+?), ') + for line in fh: + m = cc_re.search(line) + if m: + value = m.group(3) + value = string.lstrip(value, '"') + value = string.rstrip(value, '"') + rc_cc[m.group(1)] = (m.group(2), value) # Process records.config.default.in with open("%s/proxy/config/records.config.default.in" % src_dir) as fh: - in_re = re.compile(r'(?:CONFIG|LOCAL) (\S+)\s+(\S+)\s+(\S+)') - for line in fh: - m = in_re.match(line) - if m: - rc_in[m.group(1)] = (m.group(2), m.group(3)) + in_re = re.compile(r'(?:CONFIG|LOCAL) (\S+)\s+(\S+)\s+(\S+)') + for line in fh: + m = in_re.match(line) + if m: + rc_in[m.group(1)] = (m.group(2), m.group(3)) # Process records.comfig documentation. # eg. .. ts:cv:: CONFIG proxy.config.proxy_binary STRING traffic_server with open("%s/doc/admin-guide/files/records.config.en.rst" % src_dir) as fh: - doc_re = re.compile(r'ts:cv:: CONFIG (\S+)\s+(\S+)\s+(\S+)') - for line in fh: - m = doc_re.search(line) - if m: - rc_doc[m.group(1)] = (m.group(2), m.group(3)) - rc_doc[m.group(1)] = (m.group(2), m.group(3)) + doc_re = re.compile(r'ts:cv:: CONFIG (\S+)\s+(\S+)\s+(\S+)') + for line in fh: + m = doc_re.search(line) + if m: + rc_doc[m.group(1)] = (m.group(2), m.group(3)) + rc_doc[m.group(1)] = (m.group(2), m.group(3)) # Compare the two # If a value is in RecordsConfig.cc and not records.config.default.in, it is @@ -93,7 +93,7 @@ print "%s : %s -> %s" % (key, "%s %s" % rc_cc[key], "%s %s" % rc_in[key]) # Search for undocumented variables ... -missing = [ k for k in rc_cc if k not in rc_doc ] +missing = [k for k in rc_cc if k not in rc_doc] if len(missing) > 0: print print "Undocumented configuration variables:" @@ -101,7 +101,7 @@ print "\t%s %s" % (m, "%s %s" % rc_cc[m]) # Search for incorrectly documented default values ... -defaults = [ k for k in rc_cc if k in rc_doc and rc_cc[k] != rc_doc[k] ] +defaults = [k for k in rc_cc if k in rc_doc and rc_cc[k] != rc_doc[k]] if len(defaults) > 0: print print "Incorrectly documented defaults:" @@ -110,10 +110,9 @@ # Search for stale documentation ... -stale = [ k for k in rc_doc if k not in rc_cc ] +stale = [k for k in rc_doc if k not in rc_cc] if (len(stale) > 0): print print "Stale documentation:" for s in sorted(stale): - print "\t%s" %(s) - + print "\t%s" % (s) diff --git a/contrib/python/compare_records_config.py b/contrib/python/compare_records_config.py index a8283a38972..acde26ae0e1 100644 --- a/contrib/python/compare_records_config.py +++ b/contrib/python/compare_records_config.py @@ -21,6 +21,7 @@ # Ignores FLOAT differences and @foo@ values from the source code defaults. import sys + def parse_records_file(filename): fh = open(filename) settings = {} @@ -47,6 +48,7 @@ def parse_records_file(filename): continue return settings + def compare_settings(old, new): for key in sorted(tuple(set(old) | set(new))): if key not in old: @@ -61,6 +63,7 @@ def compare_settings(old, new): if old[key] != new[key]: print "%s %s -> %s" % (key, old[key], new[key]) + if __name__ == '__main__': settings_orig = parse_records_file(sys.argv[1]) settings_new = parse_records_file(sys.argv[2]) diff --git a/doc/admin-guide/configuration/cache-basics.en.rst b/doc/admin-guide/configuration/cache-basics.en.rst index 8770e8ab45f..7351d1e0a88 100644 --- a/doc/admin-guide/configuration/cache-basics.en.rst +++ b/doc/admin-guide/configuration/cache-basics.en.rst @@ -683,7 +683,7 @@ cases the content will be buffered in ram while waiting to be sent to the client. This could potentially also happen for ``POST`` requests if the client connection is fast and the origin server connection slow. If very large objects are being used this can cause the memory usage of Traffic Server to become -very large (See issue :ts:jira:`1496`). +`very large `_. This problem can be ameliorated by controlling the amount of buffer space used by a transaction. A high water and low water mark are set in terms of bytes diff --git a/doc/admin-guide/files/records.config.en.rst b/doc/admin-guide/files/records.config.en.rst index 58b2667a417..5345cf508bd 100644 --- a/doc/admin-guide/files/records.config.en.rst +++ b/doc/admin-guide/files/records.config.en.rst @@ -783,6 +783,12 @@ ip-resolve 9090:proto=http2;http:ssl +.. topic:: Example + + Listen on port 9090 for TSL disabled HTTP/2 and enabled HTTP connections, accept no other session protocols.:: + + 9090:proto=http:ssl + .. ts:cv:: CONFIG proxy.config.http.connect_ports STRING 443 563 The range of origin server ports that can be used for tunneling via ``CONNECT``. @@ -814,18 +820,19 @@ ip-resolve Set how the ``Via`` field is handled on a request to the origin server. - ===== ============================================ + ===== ==================================================================== Value Effect - ===== ============================================ + ===== ==================================================================== ``0`` Do not modify or set this Via header. - ``1`` Update the Via, with normal verbosity. - ``2`` Update the Via, with higher verbosity. - ``3`` Update the Via, with highest verbosity. - ===== ============================================ + ``1`` Add the basic protocol and proxy identifier. + ``2`` And basic transaction codes. + ``3`` And detailed transaction codes. + ``4`` And full user agent connection :ref:`protocol tags `. + ===== ==================================================================== .. note:: - The ``Via`` header string can be decoded with the `Via Decoder Ring `_. + The ``Via`` transaction codes can be decoded with the `Via Decoder Ring `_. .. ts:cv:: CONFIG proxy.config.http.request_via_str STRING ApacheTrafficServer/${PACKAGE_VERSION} :reloadable: @@ -839,18 +846,19 @@ ip-resolve Set how the ``Via`` field is handled on the response to the client. - ===== ============================================ + ===== ================================================================== Value Effect - ===== ============================================ - ``0`` Do not modify or set this via header. - ``1`` Update the via, with normal verbosity. - ``2`` Update the via, with higher verbosity. - ``3`` Update the via, with highest verbosity. - ===== ============================================ + ===== ================================================================== + ``0`` Do not modify or set this Via header. + ``1`` Add the basic protocol and proxy identifier. + ``2`` And basic transaction codes. + ``3`` And detailed transaction codes. + ``4`` And full upstream connection :ref:`protocol tags `. + ===== ================================================================== .. note:: - The ``Via`` header string can be decoded with the `Via Decoder Ring `_. + The ``Via`` transaction codes can be decoded with the `Via Decoder Ring `_. .. ts:cv:: CONFIG proxy.config.http.response_via_str STRING ApacheTrafficServer/${PACKAGE_VERSION} :reloadable: @@ -1057,6 +1065,19 @@ ip-resolve according to this setting then it will be used, otherwise it will be released to the pool and a different session selected or created. +.. ts:cv:: CONFIG proxy.config.http.safe_requests_retryable INT 1 + :overridable: + + This setting, on by default, allows requests which are considered safe to be retried on an error. + See https://tools.ietf.org/html/rfc7231#section-4.2.1 to RFC for details on which request methods are considered safe. + + If this setting is ``0`` then ATS retries a failed origin server request only if the bytes sent by ATS + are not acknowledged by the origin server. + + If this setting is ``1`` then ATS retries all the safe methods to a failed origin server irrespective of + previous connection failure status. + + .. ts:cv:: CONFIG proxy.config.http.record_heartbeat INT 0 :reloadable: @@ -1091,7 +1112,7 @@ ip-resolve performed. The result is cached (if allowed otherwise). This option is vulnerable to cache poisoning if an incorrect ``Host`` header is specified, so this option should be used with extreme caution. See - bug :ts:jira:`2954` for details. + bug TS-2954 for details. ===== ====================================================================== If all of these conditions are met, then the origin server IP address is @@ -1205,11 +1226,13 @@ Parent Proxy Configuration .. ts:cv:: CONFIG proxy.config.http.parent_proxy.retry_time INT 300 :reloadable: + :overridable: The amount of time allowed between connection retries to a parent cache that is unavailable. .. ts:cv:: CONFIG proxy.config.http.parent_proxy.fail_threshold INT 10 :reloadable: + :overridable: The number of times the connection to the parent cache can fail before Traffic Server considers the parent unavailable. @@ -1223,17 +1246,28 @@ Parent Proxy Configuration .. ts:cv:: CONFIG proxy.config.http.parent_proxy.per_parent_connect_attempts INT 2 :reloadable: + :overridable: The total number of connection attempts allowed per parent for a specific transaction, if multiple parents are used. .. ts:cv:: CONFIG proxy.config.http.parent_proxy.connect_attempts_timeout INT 30 :reloadable: + :overridable: The timeout value (in seconds) for parent cache connection attempts. See :ref:`admin-performance-timeouts` for more discussion on |TS| timeouts. +.. ts:cv:: CONFIG proxy.config.http.parent_proxy.mark_down_hostdb INT 0 + :reloadable: + :overridable: + + Enables (``1``) or disables (``0``) marking parent proxies down in hostdb when a connection + error is detected. Normally parent selection manages parent proxies and will mark them as unavailable + as needed. But when parents are defined in dns with multiple ip addresses, it may be useful to mark the + failing ip down in hostdb. In this case you would enable these updates. + .. ts:cv:: CONFIG proxy.config.http.forward.proxy_auth_to_parent INT 0 :reloadable: :overridable: @@ -2018,14 +2052,14 @@ RAM Cache in memory in order to improve performance. **4MB** (4194304) -.. ts:cv:: CONFIG proxy.config.cache.ram_cache.algorithm INT 0 +.. ts:cv:: CONFIG proxy.config.cache.ram_cache.algorithm INT 1 Two distinct RAM caches are supported, the default (0) being the **CLFUS** (*Clocked Least Frequently Used by Size*). As an alternative, a simpler **LRU** (*Least Recently Used*) cache is also available, by changing this configuration to 1. -.. ts:cv:: CONFIG proxy.config.cache.ram_cache.use_seen_filter INT 0 +.. ts:cv:: CONFIG proxy.config.cache.ram_cache.use_seen_filter INT 1 Enabling this option will filter inserts into the RAM cache to ensure that they have been seen at least once. For the **LRU**, this provides scan diff --git a/doc/admin-guide/files/ssl_multicert.config.en.rst b/doc/admin-guide/files/ssl_multicert.config.en.rst index 25bab6efcb7..e8112a78cb7 100644 --- a/doc/admin-guide/files/ssl_multicert.config.en.rst +++ b/doc/admin-guide/files/ssl_multicert.config.en.rst @@ -98,20 +98,8 @@ ssl_ticket_enabled=1|0 (optional) OpenSSL should be upgraded to version 0.9.8f or higher. This option must be set to `0` to disable session ticket support. -ticket_key_name=FILENAME (optional) - The name of session ticket key file which contains a secret for - encrypting and decrypting TLS session tickets. If *FILENAME* is - not an absolute path, it is resolved relative to the - :ts:cv:`proxy.config.ssl.server.cert.path` configuration variable. - This option has no effect if session tickets are disabled by the - ``ssl_ticket_enabled`` option. The contents of the key file should - be 48 random (ASCII) bytes. One way to generate this would be to run - ``head -c48 /dev/urandom | openssl enc -base64 | head -c48 > file.ticket``. - - Session ticket support is enabled by default. If neither of the - ``ssl_ticket_enabled`` and ``ticket_key_name`` options are - specified, and internal session ticket key is generated. This - key will be different each time Traffic Server is started. +ticket_key_name=FILENAME (optional) [**REMOVED in 7.1.x and 8.0**] + Ticket key should be set in records.config via :ts:cv:`proxy.config.ssl.server.ticket_key.filename` ssl_key_dialog=builtin|"exec:/path/to/program [args]" (optional) Method used to provide a pass phrase for encrypted private keys. If the diff --git a/doc/admin-guide/monitoring/error-messages.en.rst b/doc/admin-guide/monitoring/error-messages.en.rst index 61094d43771..74ebca25f80 100644 --- a/doc/admin-guide/monitoring/error-messages.en.rst +++ b/doc/admin-guide/monitoring/error-messages.en.rst @@ -145,18 +145,22 @@ is provided in :ref:`appendix-http-status-codes`. The error messages can be customized. The actual response is generated from a template. These templates are stored in files which means the errors responses can be customized by modifying these -files. The default directory for the template files is ``PREFIX/body_factory/default`` -but this can be changed by the configuration variable -:ts:cv:`proxy.config.body_factory.template_sets_dir`. All files in this directory are added to a -lookup table which is consulted when the error message is generated. The name used for lookup is by -default that listed in the :ref:`following table `. It can be overridden by +files. The default directory for the template files is ``PREFIX/body_factory/default`` but this can +be changed by the configuration variable :ts:cv:`proxy.config.body_factory.template_sets_dir`. All +files in this directory are added to a lookup table which is consulted when the error message is +generated. The name used for lookup is by default that listed in the :ref:`following table +`. It can be overridden by :ts:cv:`proxy.config.body_factory.template_base` which, if set, is a string that is prepended to the search name along with an underscore. For example, if the default lookup name is ``cache#read_error`` then by default the response will be generated from the template in the file -named ``cache#read_error``. If the template base name were set to "apache" then the lookup would +named ``cache#read_error``. If the template base name were set to ``apache`` then the lookup would look for a file named ``apache_cache#read_error`` in the template table. This can be used to switch out error message sets or, because this variable is overridable, to select an error message set -based on data in the transaction. +based on data in the transaction. In addition the suffix ``_default`` has a special meaning. If +there is a file with the base name and that suffix it is used as the default error page for the base +set, instead of falling back to the global (built in) default page in the case where there is not a +file that matches the specific error. In the example case, if the file ``apache_default`` exists +it would be used instead of ``cache#read_error`` if there is no ``apache_cache#read_error``. The text for an error message is processed as if it were a :ref:`admin-logging-fields` which enables customization by values present in the transaction for which the error occurred. diff --git a/doc/admin-guide/plugins/gzip.en.rst b/doc/admin-guide/plugins/gzip.en.rst index 5ba8363700b..2283f261945 100644 --- a/doc/admin-guide/plugins/gzip.en.rst +++ b/doc/admin-guide/plugins/gzip.en.rst @@ -117,6 +117,15 @@ of the objects will be cached and returned to clients. This may be useful for objects which already have their own compression built-in, to avoid the expense of multiple rounds of compression for trivial gains. +allow +-------- + +Provides a wildcard pattern which will be applied to request URLs. Any which +match the pattern will be considered compressible, and only deflated versions +of the objects will be cached and returned to clients. This may be useful for +objects which already have their own compression built-in, to avoid the expense +of multiple rounds of compression for trivial gains. + enabled ------- @@ -141,6 +150,14 @@ will leave the header intact if the client provided it. - For when the proxy parses responses, and the resulting compression and decompression is wasteful. +supported-algorithms +---------------------- + +Provides the compression algorithms that are supported. This will allow the proxy to selectively +support certain compressions. The default is gzip. Multiple algorthims can be selected using ',' delimiter + +-- To selectively support only certain compression algorithms. + Examples ======== @@ -160,7 +177,24 @@ might create a configuration with the following options:: cache false remove-accept-encoding true disallow /notthis/*.js + allow /this/*.js + flush true + + # Allows brotli encoded response from origin but is not capable of brotli compression + [brotli.allowed.com] + enabled true + compressible-content-type text/* + compressible-content-type application/json + flush true + supported-algorithms gzip,deflate + + # Supports brotli compression + [brotli.compress.com] + enabled true + compressible-content-type text/* + compressible-content-type application/json flush true + supported-algorithms br, gzip # This origin does it all [bar.example.com] diff --git a/doc/admin-guide/plugins/header_rewrite.en.rst b/doc/admin-guide/plugins/header_rewrite.en.rst index 4e0b4a454bc..c4b3eb320ee 100644 --- a/doc/admin-guide/plugins/header_rewrite.en.rst +++ b/doc/admin-guide/plugins/header_rewrite.en.rst @@ -702,7 +702,8 @@ Variable Description % Client IP % Client request length % Client HTTP method -% Client unmapped URI +% Client effective URI +% Client unmapped URI path ============ ================================================================== Header Values diff --git a/doc/admin-guide/plugins/s3_auth.en.rst b/doc/admin-guide/plugins/s3_auth.en.rst index d6f57095fef..7293188f81c 100644 --- a/doc/admin-guide/plugins/s3_auth.en.rst +++ b/doc/admin-guide/plugins/s3_auth.en.rst @@ -27,57 +27,119 @@ to use ``S3`` as your origin server, yet want to avoid direct user access to the content. Using the plugin ----------------- +================ -There are three configuration options for this plugin:: - - --access_key - --secret_key - --virtual_host - --config +Using the plugin in a remap rule would be e.g.:: -Using the first two in a remap rule would be e.g.:: + # remap.config ... @plugin=s3_auth @pparam=--access_key @pparam=my-key \ @pparam=--secret_key @pparam=my-secret \ @pparam=--virtual_host -Alternatively, you can store the access key and secret in an external -configuration file, and point the remap rule(s) to it: +Alternatively, you can store the access key and secret in an external configuration file, and point the remap rule(s) to it:: - ... @plugin=s3_auth @pparam=--config @pparam=s3.config + # remap.config + ... @plugin=s3_auth @pparam=--config @pparam=s3_auth_v2.config -Where s3.config would look like:: - # AWS S3 authentication - access_key=my-key - secret_key=my-secret - virtual_host=yes +Where ``s3.config`` could look like:: + # s3_auth_v2.config -For more details on the S3 auth, see:: + access_key=my-key + secret_key=my-secret + version=2 + virtual_host=yes - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +Both ways could be combined as well -ToDo ----- +AWS Authentication version 4 +============================ -This is a pretty barebone start for the S3 services, it is missing a number of features: +The s3_auth plugin fully implements: `AWS Signing Version 4 `_ / `Authorization Header `_ / `Transferring Payload in a Single Chunk `_ / Unsigned Payload Option -- It does not do UTF8 encoding (as required) +Configuration options:: -- It only implements the v2 authentication mechanism. For details on v4, see + # Mandatory options + --access_key= + --secret_key= + --version=4 - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html + # Optional + --v4-include-headers= + --v4-exclude-headers= + --v4-region-map=region_map.config -- It does not deal with canonicalization of AMZ headers. -- It does not handle POST requests (but do we need to ?) +If the following option is used then the options could be specified in a file:: + + --config=s3_auth_v4.config + + +The ``s3_auth_v4.config`` config file could look like this:: + + # s3_auth_v4.config + + access_key= + secret_key= + version=4 + v4-include-headers= + v4-exclude-headers= + v4-region-map=region_map.config + +Where the ``region_map.config`` defines the entry-point hostname to region mapping i.e.:: + + # region_map.config + + # "us-east-1" + s3.amazonaws.com : us-east-1 + s3-external-1.amazonaws.com : us-east-1 + s3.dualstack.us-east-1.amazonaws.com : us-east-1 + + # us-west-1 + s3-us-west-1.amazonaws.com : us-west-1 + s3.dualstack.us-west-1.amazonaws.com : us-west-1 + + # Default region if no entry-point matches: + : s3.amazonaws.com + +If ``--v4-region-map`` is not specified the plugin defaults to the mapping defined in `"Regions and Endpoints - S3" `_ +According to `Transferring Payload in a Single Chunk `_ specification +the ``CanonicalHeaders`` list *must* include the ``Host`` header, the ``Content-Type`` header if present in the request and all the ``x-amz-*`` headers +so ``--v4-include-headers`` and ``--v4-exclude-headers`` do not impact those headers and they are *always* signed. + +The ``Via`` and ``X-Forwarded-For`` headers are *always* excluded from the signature since they are meant to be changed by the proxies and signing them could lead to invalidation of the signatue. + +If ``--v4-include-headers`` is not specified all headers except those specified in ``--v4-exclude-headers`` will be signed. + +If ``--v4-include-headers`` is specified only the headers specified will be signed except those specified in ``--v4-exclude-headers`` + + +AWS Authentication version 2 +============================ + +For more details on the S3 auth version 2 , see: `Signing and Authenticating REST Requests `_ + + +There are 4 plugin configuration options for version 2:: + + --access_key + --secret_key + --virtual_host + --config + --version=2 + +This is a pretty barebone start for the S3 services, it is missing a number of features: + +- It does not do UTF8 encoding (as required) +- It does not deal with canonicalization of AMZ headers. +- It does not handle POST requests (but do we need to ?) - It does not incorporate query parameters. diff --git a/doc/admin-guide/storage/index.en.rst b/doc/admin-guide/storage/index.en.rst index 88ea07f6a25..93813af1c69 100644 --- a/doc/admin-guide/storage/index.en.rst +++ b/doc/admin-guide/storage/index.en.rst @@ -77,7 +77,7 @@ The RAM cache supports two cache eviction algorithms, a regular *LRU* (Least Recently Used) and the more advanced *CLFUS* (Clocked Least Frequently Used by Size; which balances recentness, frequency, and size to maximize hit rate, similar to a most frequently used algorithm). -The default is to use *CLFUS*, and this is controlled via +The default is to use *LRU*, and this is controlled via :ts:cv:`proxy.config.cache.ram_cache.algorithm`. Both the *LRU* and *CLFUS* RAM caches support a configuration to increase diff --git a/doc/checkvers.py b/doc/checkvers.py index cc950532f31..0955c20e95c 100644 --- a/doc/checkvers.py +++ b/doc/checkvers.py @@ -14,44 +14,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys, os +import sys +import os if __name__ == '__main__': - # Use optparse instead of argparse because this needs to work on old Python versions. - import optparse - - parser = optparse.OptionParser(description='Traffic Server Sphinx docs configuration') - parser.add_option('--check-version', action='store_true', dest='checkvers') - - (options, args) = parser.parse_args() - - # Check whether we have a recent version of sphinx. EPEL and CentOS are completely crazy and I don't understand their - # packaging at all. The test below works on Ubuntu and places where sphinx is installed sanely AFAICT. - if options.checkvers: - print 'checking for sphinx version >= 1.2... ', - # Need at least 1.2 because of some command line options stuff HRP added. - # Also 1.2 guarantees sphinx.version_info is available. - try: - import sphinx - - if 'version_info' in dir(sphinx) : - print 'Found Sphinx version {0}'.format(sphinx.version_info) - else : - version = sphinx.__version__ - print 'Found Sphinx version (old) {0}'.format(sphinx.__version__) - sphinx.version_info = version.split('.') - - if sphinx.version_info < (1,2) : - sys.exit(1) - - except Exception as e: - print e - sys.exit(1) - - print 'checking for sphinx.writers.manpage... ', - try: - from sphinx.writers import manpage - print 'yes' - except Exception as e: - print e - sys.exit(1) + # Use optparse instead of argparse because this needs to work on old Python versions. + import optparse + + parser = optparse.OptionParser(description='Traffic Server Sphinx docs configuration') + parser.add_option('--check-version', action='store_true', dest='checkvers') + + (options, args) = parser.parse_args() + + # Check whether we have a recent version of sphinx. EPEL and CentOS are completely crazy and I don't understand their + # packaging at all. The test below works on Ubuntu and places where sphinx is installed sanely AFAICT. + if options.checkvers: + print 'checking for sphinx version >= 1.2... ', + # Need at least 1.2 because of some command line options stuff HRP added. + # Also 1.2 guarantees sphinx.version_info is available. + try: + import sphinx + + if 'version_info' in dir(sphinx): + print 'Found Sphinx version {0}'.format(sphinx.version_info) + else: + version = sphinx.__version__ + print 'Found Sphinx version (old) {0}'.format(sphinx.__version__) + sphinx.version_info = version.split('.') + + if sphinx.version_info < (1, 2): + sys.exit(1) + + except Exception as e: + print e + sys.exit(1) + + print 'checking for sphinx.writers.manpage... ', + try: + from sphinx.writers import manpage + print 'yes' + except Exception as e: + print e + sys.exit(1) diff --git a/doc/conf.py b/doc/conf.py index 760570c7821..a85eda382c4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -27,7 +27,8 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os +import sys +import os from sphinx import version_info # If extensions (or modules to document with autodoc) are in another directory, @@ -47,19 +48,19 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.graphviz', - 'sphinx.ext.intersphinx', - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', - 'traffic-server', + 'sphinx.ext.graphviz', + 'sphinx.ext.intersphinx', + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', + 'traffic-server', ] -if version_info >= (1,4) : - extensions.append('sphinx.ext.imgmath') -else : - extensions.append('sphinx.ext.pngmath') +if version_info >= (1, 4): + extensions.append('sphinx.ext.imgmath') +else: + extensions.append('sphinx.ext.pngmath') # XXX Disabling docxygen for now, since it make RTD documentation builds time # out, eg. https://readthedocs.org/projects/trafficserver/builds/3525976/ @@ -107,31 +108,31 @@ locale_dirs = ['locale/'] gettext_compact = False -## HACK for Read-the-Docs -## Generate .mo files just in time +# HACK for Read-the-Docs +# Generate .mo files just in time if os.environ.get('READTHEDOCS') == 'True': - import polib - print "Generating .mo files", - for locale_dir in locale_dirs: - for path, dummy, filenames in os.walk(locale_dir): - for filename in filenames: - po_file = os.path.join(path, filename) - base, ext = os.path.splitext(po_file) - if ext == ".po": - mo_file = base + ".mo" - po = polib.pofile(po_file) - po.save_as_mofile(fpath=mo_file) - print "done" + import polib + print "Generating .mo files", + for locale_dir in locale_dirs: + for path, dummy, filenames in os.walk(locale_dir): + for filename in filenames: + po_file = os.path.join(path, filename) + base, ext = os.path.splitext(po_file) + if ext == ".po": + mo_file = base + ".mo" + po = polib.pofile(po_file) + po.save_as_mofile(fpath=mo_file) + print "done" else: - # On RedHat-based distributions, install the python-sphinx_rtd_theme package - # to get an end result tht looks more like readthedoc.org. - try: - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - except: - pass -## End of HACK + # On RedHat-based distributions, install the python-sphinx_rtd_theme package + # to get an end result tht looks more like readthedoc.org. + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except: + pass +# End of HACK # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -163,12 +164,67 @@ # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] -nitpicky=1 +nitpicky = 1 + +# Autolink issue references. +# See Customizing the Parser in the docutils.parsers.rst module. + +from docutils import nodes +from docutils.parsers.rst import states +from docutils.utils import punctuation_chars +from docutils.utils import unescape + +# Customize parser.inliner in the only way that Sphinx supports. +# docutils.parsers.rst.Parser takes an instance of states.Inliner or a +# subclass, but Sphinx initializes the parser without any arguments, +# in SphinxStandaloneReader.set_parser('restructuredtext'), +# which is called from Publisher.set_components(). + +# states.Inliner isn't a new-style class, so super() isn't an option. +BaseInliner = states.Inliner + + +class Inliner(states.Inliner): + def init_customizations(self, settings): + self.__class__ = BaseInliner + BaseInliner.init_customizations(self, settings) + self.__class__ = Inliner + + # Copied from states.Inliner.init_customizations(). + # In Docutils 0.13 these are locals. + if not hasattr(self, 'start_string_prefix'): + self.start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' % + (punctuation_chars.openers, + punctuation_chars.delimiters)) + if not hasattr(self, 'end_string_suffix'): + self.end_string_suffix = (u'($|(?=\\s|[\x00%s%s%s]))' % + (punctuation_chars.closing_delimiters, + punctuation_chars.delimiters, + punctuation_chars.closers)) + + issue = re.compile( + ur''' + {start_string_prefix} + TS-\d+ + {end_string_suffix}'''.format( + start_string_prefix=self.start_string_prefix, + end_string_suffix=self.end_string_suffix), + re.VERBOSE | re.UNICODE) + + self.implicit_dispatch.append((issue, self.issue_reference)) + + def issue_reference(self, match, lineno): + text = match.group(0) + + rawsource = unescape(text, True) + text = unescape(text, False) + + refuri = 'https://issues.apache.org/jira/browse/' + text -# Autolink issue references -# moved into traffic_server Sphinx extension -trafficserver_jira_url='https://issues.apache.org/jira/browse/' -trafficserver_github_url='https://github.com/apache/trafficserver/issues/' + return [nodes.reference(rawsource, text, refuri=refuri)] + + +states.Inliner = Inliner # -- Options for HTML output --------------------------------------------------- @@ -208,18 +264,18 @@ # Include a stylesheet that overrides default table styling, to provide # content wrapping. html_context = { - 'css_files': [ - '_static/override.css' - ] -} -if os.environ.get('READTHEDOCS', None) == 'True': - html_context = { 'css_files': [ - 'https://media.readthedocs.org/css/sphinx_rtd_theme.css', - 'https://media.readthedocs.org/css/readthedocs-doc-embed.css', - '_static/override.css' + '_static/override.css' ] - } +} +if os.environ.get('READTHEDOCS', None) == 'True': + html_context = { + 'css_files': [ + 'https://media.readthedocs.org/css/sphinx_rtd_theme.css', + 'https://media.readthedocs.org/css/readthedocs-doc-embed.css', + '_static/override.css' + ] + } # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -268,21 +324,21 @@ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation', - u'dev@trafficserver.apache.org', 'manual'), + ('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation', + u'dev@trafficserver.apache.org', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -317,8 +373,6 @@ # documents and includes the same brief description in both the HTML # and manual page outputs. -from docutils import nodes -from docutils.utils import unescape from docutils.transforms import frontmatter from sphinx.writers import manpage @@ -326,41 +380,47 @@ # that Sphinx supports BaseWriter = manpage.ManualPageWriter + + class ManualPageWriter(BaseWriter): - def translate(self): - transform = frontmatter.DocTitle(self.document) + def translate(self): + transform = frontmatter.DocTitle(self.document) + + section, index = transform.candidate_index(self.document) + if index: - section, index = transform.candidate_index(self.document) - if index: + # A sentence after the title is the manual page description + if len(section) > 1 and isinstance(section[1], nodes.paragraph): - # A sentence after the title is the manual page description - if len(section) > 1 and isinstance(section[1], nodes.paragraph): + description = section.pop(1).astext() + description = description[:1].lower() + description[1:] + description = description.rstrip('.') - description = section.pop(1).astext() - description = description[:1].lower() + description[1:] - description = description.rstrip('.') + self.document.settings.subtitle = description - self.document.settings.subtitle = description + # Instead of section_level = -1, use the standard Docutils + # DocTitle transform to hide the top level title + transform.promote_title(self.document) - # Instead of section_level = -1, use the standard Docutils - # DocTitle transform to hide the top level title - transform.promote_title(self.document) + # The title is the manual page name + transform.set_metadata() - # The title is the manual page name - transform.set_metadata() + BaseWriter.translate(self) - BaseWriter.translate(self) manpage.ManualPageWriter = ManualPageWriter BaseTranslator = manpage.ManualPageTranslator + + class ManualPageTranslator(BaseTranslator): - def __init__(self, builder, *args, **kwds): - BaseTranslator.__init__(self, builder, *args, **kwds) + def __init__(self, builder, *args, **kwds): + BaseTranslator.__init__(self, builder, *args, **kwds) + + # Instead of section_level = -1, use the standard Docutils + # DocTitle transform to hide the top level title + self.section_level = 0 - # Instead of section_level = -1, use the standard Docutils - # DocTitle transform to hide the top level title - self.section_level = 0 manpage.ManualPageTranslator = ManualPageTranslator @@ -370,9 +430,9 @@ def __init__(self, builder, *args, **kwds): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation', - u'dev@trafficserver.apache.org', 'ApacheTrafficServer', 'One line description of project.', - 'Miscellaneous'), + ('index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation', + u'dev@trafficserver.apache.org', 'ApacheTrafficServer', 'One line description of project.', + 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. diff --git a/doc/developer-guide/api/functions/TSAcceptor.en.rst b/doc/developer-guide/api/functions/TSAcceptor.en.rst new file mode 100644 index 00000000000..35bc21e9902 --- /dev/null +++ b/doc/developer-guide/api/functions/TSAcceptor.en.rst @@ -0,0 +1,50 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed + with this work for additional information regarding copyright + ownership. The ASF licenses this file to you under the Apache + License, Version 2.0 (the "License"); you may not use this file + except in compliance with the License. You may obtain a copy of + the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied. See the License for the specific language governing + permissions and limitations under the License. + +.. include:: ../../../common.defs + +.. default-domain:: c + +TSAcceptor +********** + +Traffic Server API's related to Accept objects + +Synopsis +======== + +`#include ` + +.. function:: TSAcceptor TSAcceptorGet(TSVConn sslp) +.. function:: TSAcceptor TSAcceptorGetbyID(int id) +.. function:: int TSAcceptorIDGet(TSAcceptor acceptor) +.. function:: int TSAcceptorCount() + + +Description +=========== + +Traffic Server allows plugins to get information from an accept object that created a certain TSVConn object using the above mentioned APIs. +An acceptor thread listens for incoming connections and creates the virtual connection (:type:`TSVConn`) for each accepted connection. + +:func:`TSAcceptorGet` returns :type:`TSAcceptor` object that created :arg:`sslp`. + +:func:`TSAcceptorGetbyID` returns the :type:`TSAcceptor` object identified by :arg:`id`. :type:`TSAcceptor` represents the acceptor object created by the core +traffic server. + +:func:`TSAcceptorIDGet` returns the Integer number that identifies :arg:`acceptor`. All the cloned :type:`TSAcceptor` objects will have the same identifying number. + +:func:`TSAcceptorCount` returns the number of :type:`TSAcceptor` objects created by the server. diff --git a/doc/developer-guide/api/functions/TSClientProtocolStack.en.rst b/doc/developer-guide/api/functions/TSClientProtocolStack.en.rst index 2d99e758939..dff2945d782 100644 --- a/doc/developer-guide/api/functions/TSClientProtocolStack.en.rst +++ b/doc/developer-guide/api/functions/TSClientProtocolStack.en.rst @@ -42,41 +42,43 @@ Synopsis Description =========== -These functions are used to explore the protocol stack of the client (user agent) connection to -|TS|. The functions :func:`TSHttpTxnClientProtocolStackGet` and -:func:`TSHttpSsnClientProtocolStackGet` can be used to retrieve the entire protocol stack for the -user agent connection. :func:`TSHttpTxnClientProtocolStackContains` and -:func:`TSHttpSsnClientProtocolStackContains` will check for a specific protocol :arg:`tag` being +These functions are used to explore the protocol stack of the client (user agent) connection to +|TS|. The functions :func:`TSHttpTxnClientProtocolStackGet` and +:func:`TSHttpSsnClientProtocolStackGet` can be used to retrieve the entire protocol stack for the +user agent connection. :func:`TSHttpTxnClientProtocolStackContains` and +:func:`TSHttpSsnClientProtocolStackContains` will check for a specific protocol :arg:`tag` being present in the stack. -Each protocol is represented by tag which is a null terminated string. A particular tag will always -be returned as the same character pointer and so protocols can be reliably checked with pointer -comparisons. :func:`TSNormalizedProtocolTag` will return this character pointer for a specific -:arg:`tag`. A return value of :const:`NULL` indicates the provided :arg:`tag` is not registered as -a known protocol tag. :func:`TSRegisterProtocolTag` registers the :arg:`tag` and then returns its +Each protocol is represented by tag which is a null terminated string. A particular tag will always +be returned as the same character pointer and so protocols can be reliably checked with pointer +comparisons. :func:`TSNormalizedProtocolTag` will return this character pointer for a specific +:arg:`tag`. A return value of :const:`NULL` indicates the provided :arg:`tag` is not registered as +a known protocol tag. :func:`TSRegisterProtocolTag` registers the :arg:`tag` and then returns its normalized value. This is useful for plugins that provide custom protocols for user agents. -The protocols are ordered from higher level protocols to the lower level ones on which the higher -operate. For instance a stack might look like "http/1.1,tls/1.2,tcp,ipv4". For -:func:`TSHttpTxnClientProtocolStackGet` and :func:`TSHttpSsnClientProtocolStackGet` these values -are placed in the array :arg:`result`. :arg:`count` is the maximum number of elements of -:arg:`result` that may be modified by the function call. If :arg:`actual` is not :const:`NULL` then -the actual number of elements in the protocol stack will be returned. If this is equal or less than -:arg:`count` then all elements were returned. If it is larger then some layers were omitted from -:arg:`result`. If the full stack is required :arg:`actual` can be used to resize :arg:`result` to -be sufficient to hold all of the elements and the function called again with updated :arg:`count` -and :arg:`result`. In practice the maximum number of elements will is almost certain to be less -than 10 which therefore should suffice. These functions return :const:`TS_SUCCESS` on success and +The protocols are ordered from higher level protocols to the lower level ones on which the higher +operate. For instance a stack might look like "http/1.1,tls/1.2,tcp,ipv4". For +:func:`TSHttpTxnClientProtocolStackGet` and :func:`TSHttpSsnClientProtocolStackGet` these values +are placed in the array :arg:`result`. :arg:`count` is the maximum number of elements of +:arg:`result` that may be modified by the function call. If :arg:`actual` is not :const:`NULL` then +the actual number of elements in the protocol stack will be returned. If this is equal or less than +:arg:`count` then all elements were returned. If it is larger then some layers were omitted from +:arg:`result`. If the full stack is required :arg:`actual` can be used to resize :arg:`result` to +be sufficient to hold all of the elements and the function called again with updated :arg:`count` +and :arg:`result`. In practice the maximum number of elements will is almost certain to be less +than 10 which therefore should suffice. These functions return :const:`TS_SUCCESS` on success and :const:`TS_ERROR` on failure which should only occurr if :arg:`txnp` or :arg:`ssnp` are invalid. -The :func:`TSHttpTxnClientProtocolStackContains` and :func:`TSHttpSsnClientProtocolStackContains` -functions are provided for the convenience when only the presence of a protocol is of interest, not -its location or the presence of other protocols. These functions return NULL if the protocol +The :func:`TSHttpTxnClientProtocolStackContains` and :func:`TSHttpSsnClientProtocolStackContains` +functions are provided for the convenience when only the presence of a protocol is of interest, not +its location or the presence of other protocols. These functions return NULL if the protocol :arg:`tag` is not present, and a pointer to the normalized tag if it is present. The strings are matched with an anchor prefix search, as with debug tags. For instance if :arg:`tag` is "tls" then it will match "tls/1.2" or "tls/1.3". This makes checking for TLS or IP more convenient. If more precision is required the entire protocol stack can be retrieved and processed more thoroughly. +.. _protocol_tags: + The protocol tags defined by |TS|. =========== ========= @@ -107,4 +109,3 @@ use :func:`TSHttpTxnClientProtocolStackGet` and :func:`TSHttpTxnClientProtocolSt .. literalinclude:: ../../../../example/protocol-stack/protocol-stack.cc :language: c :lines: 31-46 - diff --git a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst index 3ae47902464..0f5094e632a 100644 --- a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst +++ b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst @@ -29,7 +29,6 @@ Synopsis `#include ` -.. type:: TSOverridableConfigKey .. function:: TSReturnCode TSHttpTxnConfigIntSet(TSHttpTxn txnp, TSOverridableConfigKey key, TSMgmtInt value) .. function:: TSReturnCode TSHttpTxnConfigIntGet(TSHttpTxn txnp, TSOverridableConfigKey key, TSMgmtInt* value) @@ -74,6 +73,7 @@ c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_FROM` :ts:cv:`prox c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_REFERER` :ts:cv:`proxy.config.http.anonymize_remove_referer` c:member:`TS_CONFIG_HTTP_ANONYMIZE_REMOVE_USER_AGENT` :ts:cv:`proxy.config.http.anonymize_remove_user_agent` c:member:`TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT` :ts:cv:`proxy.config.http.attach_server_session_to_client` +c:member:`TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE` :ts:cv:`proxy.config.http.safe_requests_retryable` c:member:`TS_CONFIG_HTTP_AUTH_SERVER_SESSION_PRIVATE` :ts:cv:`proxy.config.http.auth_server_session_private` c:member:`TS_CONFIG_HTTP_BACKGROUND_FILL_ACTIVE_TIMEOUT` :ts:cv:`proxy.config.http.background_fill_active_timeout` c:member:`TS_CONFIG_HTTP_BACKGROUND_FILL_COMPLETED_THRESHOLD` :ts:cv:`proxy.config.http.background_fill_completed_threshold` @@ -140,6 +140,7 @@ c:member:`TS_CONFIG_HTTP_NUMBER_OF_REDIRECTIONS` :ts:cv:`prox c:member:`TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS` :ts:cv:`proxy.config.http.origin_max_connections` c:member:`TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE` :ts:cv:`proxy.config.http.origin_max_connections_queue` c:member:`TS_CONFIG_HTTP_PARENT_PROXY_TOTAL_CONNECT_ATTEMPTS` :ts:cv:`proxy.config.http.parent_proxy.total_connect_attempts` +c:member:`TS_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB` :ts:cv:`proxy.config.http.parent_proxy.mark_down_hostdb` c:member:`TS_CONFIG_HTTP_POST_CHECK_CONTENT_LENGTH_ENABLED` :ts:cv:`proxy.config.http.post.check.content_length.enabled` c:member:`TS_CONFIG_HTTP_POST_CONNECT_ATTEMPTS_TIMEOUT` :ts:cv:`proxy.config.http.post_connect_attempts_timeout` c:member:`TS_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY` :ts:cv:`proxy.config.http.redirect_use_orig_cache_key` @@ -167,6 +168,10 @@ c:member:`TS_CONFIG_SSL_HSTS_MAX_AGE` :ts:cv:`prox c:member:`TS_CONFIG_URL_REMAP_PRISTINE_HOST_HDR` :ts:cv:`proxy.config.url_remap.pristine_host_hdr` c:member:`TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT` :ts:cv:`proxy.config.websocket.active_timeout` c:member:`TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT` :ts:cv:`proxy.config.websocket.no_activity_timeout` +c:member:`TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD` :ts:cv:`proxy.config.http.parent_proxy.fail_threshold` +c:member:`TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME` :ts:cv:`proxy.config.http.parent_proxy.retry_time` +c:member:`TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS` :ts:cv:`proxy.config.http.parent_proxy.per_parent_connect_attempts` +c:member:`TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT` :ts:cv:`proxy.config.http.parent_proxy.connect_attempts_timeout` ================================================================== ==================================================================== Examples diff --git a/doc/developer-guide/api/functions/TSHttpTxnClientPacketMarkSet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnClientPacketMarkSet.en.rst index 0272a271c55..437f528b036 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnClientPacketMarkSet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnClientPacketMarkSet.en.rst @@ -41,4 +41,4 @@ See Also .. _Traffic Shaping: https://cwiki.apache.org/confluence/display/TS/Traffic+Shaping - :ts:cv:`proxy.config.net.sock_packet_mark_in` and :ts:jira:`1090` + :ts:cv:`proxy.config.net.sock_packet_mark_in` and TS-1090 diff --git a/doc/developer-guide/api/functions/TSHttpTxnClientPacketTosSet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnClientPacketTosSet.en.rst index d5f1f3aa8b1..f1b694b9167 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnClientPacketTosSet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnClientPacketTosSet.en.rst @@ -47,4 +47,4 @@ See Also .. _Traffic Shaping: https://cwiki.apache.org/confluence/display/TS/Traffic+Shaping - :ts:cv:`proxy.config.net.sock_packet_tos_in` and :ts:jira:`1090` + :ts:cv:`proxy.config.net.sock_packet_tos_in` and TS-1090 diff --git a/doc/developer-guide/api/functions/TSHttpTxnCacheParentSelectionUrlGet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnParentSelectionUrlGet.en.rst similarity index 86% rename from doc/developer-guide/api/functions/TSHttpTxnCacheParentSelectionUrlGet.en.rst rename to doc/developer-guide/api/functions/TSHttpTxnParentSelectionUrlGet.en.rst index 9a14a6ed8cb..405423407b2 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnCacheParentSelectionUrlGet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnParentSelectionUrlGet.en.rst @@ -19,7 +19,7 @@ .. default-domain:: c -TSHttpTxnCacheParentSelectionUrlSet +TSHttpTxnParentSelectionUrlSet *********************************** Traffic Server Parent Selection consistent hash URL manipulation API. @@ -29,8 +29,8 @@ Synopsis `#include ` -.. function:: TSReturnCode TSHttpTxnCacheParentSelectionUrlSet(TSHttpTxn txnp, TSMBuffer bufp, TSMLoc offset) -.. function:: TSReturnCode TSHttpTxnCacheParentSelectionUrlGet(TSHttpTxn txnp, TSMBuffer bufp, TSMLoc offset) +.. function:: TSReturnCode TSHttpTxnParentSelectionUrlSet(TSHttpTxn txnp, TSMBuffer bufp, TSMLoc offset) +.. function:: TSReturnCode TSHttpTxnParentSelectionUrlGet(TSHttpTxn txnp, TSMBuffer bufp, TSMLoc offset) Description =========== @@ -58,9 +58,9 @@ generating the over-ride URL. For example, since the over-ride URL is arbitrary, the URL scheme and hostname can simply be set to "fake://fake.fake" when creating the over-ride URL. -:func:`TSHttpTxnCacheParentSelectionUrlSet` will set the over-ride URL. +:func:`TSHttpTxnParentSelectionUrlSet` will set the over-ride URL. -:func:`TSHttpTxnCacheParentSelectionUrlGet` will get the over-ride URL. +:func:`TSHttpTxnParentSelectionUrlGet` will get the over-ride URL. Return Values ============= diff --git a/doc/developer-guide/api/functions/TSHttpTxnServerPacketMarkSet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnServerPacketMarkSet.en.rst index 28c4c0dfe2d..af31dd84ed3 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnServerPacketMarkSet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnServerPacketMarkSet.en.rst @@ -45,4 +45,4 @@ See Also .. _Traffic Shaping: https://cwiki.apache.org/confluence/display/TS/Traffic+Shaping - :ts:cv:`proxy.config.net.sock_packet_mark_out` and :ts:jira:`1090` + :ts:cv:`proxy.config.net.sock_packet_mark_out` and TS-1090 diff --git a/doc/developer-guide/api/functions/TSHttpTxnServerPacketTosSet.en.rst b/doc/developer-guide/api/functions/TSHttpTxnServerPacketTosSet.en.rst index 60a7f3da7d9..c412be1d08f 100644 --- a/doc/developer-guide/api/functions/TSHttpTxnServerPacketTosSet.en.rst +++ b/doc/developer-guide/api/functions/TSHttpTxnServerPacketTosSet.en.rst @@ -49,4 +49,4 @@ See Also .. _Traffic Shaping: https://cwiki.apache.org/confluence/display/TS/Traffic+Shaping - :ts:cv:`proxy.config.net.sock_packet_tos_out` and :ts:jira:`1090` + :ts:cv:`proxy.config.net.sock_packet_tos_out` and TS-1090 diff --git a/doc/developer-guide/api/functions/TSLifecycleHookAdd.en.rst b/doc/developer-guide/api/functions/TSLifecycleHookAdd.en.rst index 45c73139009..24ef96d158e 100644 --- a/doc/developer-guide/api/functions/TSLifecycleHookAdd.en.rst +++ b/doc/developer-guide/api/functions/TSLifecycleHookAdd.en.rst @@ -162,7 +162,7 @@ History ======= Lifecycle hooks were introduced to solve process initialization ordering issues -:ts:jira:`1487`. Different API calls required different modules of |TS| to be +(TS-1487). Different API calls required different modules of |TS| to be initialized for the call to work, but others did not work that late in initialization, which was problematic because all of them could effectively only be called from :func:`TSPluginInit` . The solution was to move diff --git a/doc/developer-guide/api/functions/TSProtoSet.en.rst b/doc/developer-guide/api/functions/TSProtoSet.en.rst new file mode 100644 index 00000000000..8b57209c385 --- /dev/null +++ b/doc/developer-guide/api/functions/TSProtoSet.en.rst @@ -0,0 +1,46 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed + with this work for additional information regarding copyright + ownership. The ASF licenses this file to you under the Apache + License, Version 2.0 (the "License"); you may not use this file + except in compliance with the License. You may obtain a copy of + the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied. See the License for the specific language governing + permissions and limitations under the License. + +.. include:: ../../../common.defs + +.. default-domain:: c + +TSProtoSet +****************** + +Synopsis +======== + +`#include ` + +.. function:: TSNextProtocolSet TSGetcloneProtoSet(TSAcceptor tna) +.. function:: TSNextProtocolSet TSUnregisterProtocol(TSNextProtocolSet protoset, const char* protocol) +.. function:: void TSRegisterProtocolSet(TSVConn sslp, TSNextProtocolSet ps) + +Description +=========== + +:func:`TSGetcloneProtoSet` makes a copy of the ProtocolSet to be advertised by the ssl connection associated with :arg:`tna`. This function +returns :type:`TSNextProtocolSet` object which points to a clone of the protocolset owned by :arg:`tna`. This type represents the protocolset +containing the protocols which are advertised by an ssl connection during ssl handshake. Each :type:`TSAcceptor` object is associated with a protocolset. + + +:func:`TSUnregisterProtocol` unregisters :arg:`protocol` from :arg:`protoset` and returns the protocol set. +The returned protocol set needs to be registered with the :type:`TSVConn` using :func:`TSRegisterProtocolSet` that will advertise the protocols. + + +:func:`TSRegisterProtocolSet` registers :arg:`ps` with :arg:`sslp`. This function clears the protocolset string created by the already registered +protocolset before registering the new protocolset. On Success, the ssl object associated with :arg:`sslp` will then advertise the protocols contained in :arg:`ps`. diff --git a/doc/developer-guide/api/functions/TSTypes.en.rst b/doc/developer-guide/api/functions/TSTypes.en.rst index b8200827827..ba925458de8 100644 --- a/doc/developer-guide/api/functions/TSTypes.en.rst +++ b/doc/developer-guide/api/functions/TSTypes.en.rst @@ -118,36 +118,16 @@ more widely. Those are described on this page. .. type:: TSMutex -.. type:: TSParseResult - - This set of enums are possible values returned by - :func:`TSHttpHdrParseReq` and :func:`TSHttpHdrParseResp`. - .. type:: TSPluginRegistrationInfo The following struct is used by :func:`TSPluginRegister`. It stores registration information about the plugin. -.. type:: TSRecordDataType - - An enumeration that specifies the type of a value in an internal data structure that is accessible via the API. - .. type:: TSRemapInterface .. type:: TSRemapRequestInfo -.. type:: TSReturnCode - - An indicator of the results of an API call. A value of :const:`TS_SUCCESS` means the call was successful. Any other value - indicates a failure and is specific to the API call. - -.. type:: TSSDKVersion - - Starting 2.0, SDK now follows same versioning as Traffic Server. - -.. type:: TSServerState - .. type:: TSTextLogObject This type represents a custom log file that you create with @@ -160,12 +140,6 @@ more widely. Those are described on this page. .. type:: TSThreadFunc -.. type:: TSThreadPool - -.. type:: TSUuid - - Opaque type that refers to an allocated UUID. - .. type:: TSUuidVersion A version value for at :type:`TSUuid`. @@ -180,4 +154,27 @@ more widely. Those are described on this page. .. type:: TSVConn + A virtual connection. This is the basic mechanism for abstracting I/O operations in |TS|. + +.. type:: TSNetVConnection + + A subtype of :type:`TSVConn` that provides additional IP network information and operations. + .. type:: TSVIO + +.. type:: ModuleVersion + + A module version. + +.. cpp:type:: ModuleVersion + + A module version. + +.. cpp:class:: template DLL + + An anchor for a double linked instrusive list of instance of :arg:`T`. + +.. type:: TSAcceptor + +.. type:: TSNextProtocolSet + diff --git a/doc/developer-guide/api/functions/TSVConnTunnel.en.rst b/doc/developer-guide/api/functions/TSVConnTunnel.en.rst index 0669b9a4759..85fa1842b51 100644 --- a/doc/developer-guide/api/functions/TSVConnTunnel.en.rst +++ b/doc/developer-guide/api/functions/TSVConnTunnel.en.rst @@ -32,8 +32,5 @@ Description =========== Set the SSL connection :arg:`svc` to convert to a blind tunnel. Can be called -from :member:`TS_VCONN_PRE_ACCEPT_HOOK` or :member:`TS_SSL_SNI_HOOK` / :member:`TS_SSL_CERT_HOOK`. +from :member:`TS_VCONN_PRE_ACCEPT_HOOK`, :member:`TS_SSL_SERVERNAME_HOOK`, or :member:`TS_SSL_SNI_HOOK` / :member:`TS_SSL_CERT_HOOK`. -For this to work from the :member:`TS_SSL_SNI_HOOK` or :member:`TS_SSL_CERT_HOOK`, -either the server must be running OpenSSL 1.0.2 or a version of OpenSSL 1.0.1 -with the appropriate patch installed. diff --git a/doc/developer-guide/api/types/CoreTypes.en.rst b/doc/developer-guide/api/types/CoreTypes.en.rst new file mode 100644 index 00000000000..801ee8b492b --- /dev/null +++ b/doc/developer-guide/api/types/CoreTypes.en.rst @@ -0,0 +1,48 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +.. include:: ../../../common.defs + +.. This is basically a holding pen to avoid dangling references as I can't find a better way to deal with + non-ATS types. Some of this was handled by the "EXTERNAL_TYPES" in ext/traffic-server.py but that's + even uglier than this. + +System Types +************ + +Synopsis +======== + +This is a place for defining compiler or system provided types to avoid dangling references. + +Description +=========== + +These types are provided by the compiler ("built-in") or from a required operating system, POSIX, or package header. + + + +.. cpp:type:: uint24_t + + + +.. cpp:type:: Event + + +.. cpp:type:: DLL + + +.. cpp:type:: INK_MD5 \ No newline at end of file diff --git a/doc/developer-guide/api/types/SystemTypes.en.rst b/doc/developer-guide/api/types/SystemTypes.en.rst index 8ff4fc8df8b..5c1a3697ca8 100644 --- a/doc/developer-guide/api/types/SystemTypes.en.rst +++ b/doc/developer-guide/api/types/SystemTypes.en.rst @@ -16,8 +16,6 @@ .. include:: ../../../common.defs -.. default-domain:: c - .. This is basically a holding pen to avoid dangling references as I can't find a better way to deal with non-ATS types. Some of this was handled by the "EXTERNAL_TYPES" in ext/traffic-server.py but that's even uglier than this. @@ -35,6 +33,23 @@ Description These types are provided by the compiler ("built-in") or from a required operating system, POSIX, or package header. -.. type:: off_t +.. c:type:: off_t + + `Reference `__. + +.. cpp:type:: off_t + + `Reference `__. + +.. cpp:type:: uint64_t + + `Reference `__. + +.. cpp:type:: uint32_t + + `Reference `__. + +.. cpp:type:: uint8_t - `Reference `_. + `Reference `__. + \ No newline at end of file diff --git a/doc/developer-guide/api/types/TSHttpHookID.en.rst b/doc/developer-guide/api/types/TSHttpHookID.en.rst index 6ebdcae23c2..0520931fbf0 100644 --- a/doc/developer-guide/api/types/TSHttpHookID.en.rst +++ b/doc/developer-guide/api/types/TSHttpHookID.en.rst @@ -74,6 +74,8 @@ Enumeration Members .. c:member:: TSHttpHookID TS_SSL_CERT_HOOK +.. c:member:: TSHttpHookID TS_SSL_SERVERNAME_HOOK + .. c:member:: TSHttpHookID TS_SSL_LAST_HOOK .. c:member:: TSHttpHookID TS_HTTP_LAST_HOOK @@ -81,3 +83,5 @@ Enumeration Members Description =========== +Note that :member:`TS_SSL_CERT_HOOK` and :member:`TS_SSL_SNI_HOOK` hook the same openssl callbacks. +In openssl 1.0.2 and beyond :member:`TS_SSL_SERVERNAME_HOOK` is involved only for the openssl servername callback. :member:`TS_SSL_SNI_HOOK` and :member:`TS_SSL_CERT_HOOK` are called for the openssl certificate callback. diff --git a/doc/developer-guide/architecture/architecture.en.rst b/doc/developer-guide/architecture/architecture.en.rst index ec2e32c0f57..a38a1a56e02 100644 --- a/doc/developer-guide/architecture/architecture.en.rst +++ b/doc/developer-guide/architecture/architecture.en.rst @@ -1147,12 +1147,12 @@ including the size of each stripe. .. [#store-disk-array] - Work is under way on extending this to include objects that are in the - memory cache. (See issue :ts:jira:`2020`) + `Work is under way `_ on + extending this to include objects that are in the memory cache. .. [#coalesced-spans] This linked list is mostly ignored in later processing, causing all but one file or directory storage units on the same device to be ignored. See - :ts:jira:`1869`. + `TS-1869 `_. diff --git a/doc/developer-guide/architecture/consistency.en.rst b/doc/developer-guide/architecture/consistency.en.rst index 413cff54d2e..c5d5fafd20e 100644 --- a/doc/developer-guide/architecture/consistency.en.rst +++ b/doc/developer-guide/architecture/consistency.en.rst @@ -59,7 +59,7 @@ Volume Tagging ~~~~~~~~~~~~~~ Currently, :term:`cache volumes ` are allocated somewhat -arbitrarily from storage elements. `This enhancement :ts:jira:`1728``__ +arbitrarily from storage elements. `This enhancement `__ allows :file:`storage.config` to assign :term:`storage units ` to specific :term:`volumes ` although the volumes must still be listed in :file:`volume.config` in general and in particular to map domains to diff --git a/doc/ext/doxygen.py b/doc/ext/doxygen.py index 864592d9bc3..7ffdc004ab3 100644 --- a/doc/ext/doxygen.py +++ b/doc/ext/doxygen.py @@ -13,148 +13,156 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -import os, subprocess +import os +import subprocess from docutils import nodes from os import path from sphinx import addnodes from sphinx.util import osutil try: - from lxml import etree + from lxml import etree except ImportError: - etree = None + etree = None # Run Doxygen on Read the Docs to generate XML files if os.environ.get('READTHEDOCS'): - subprocess.call('doxygen') + subprocess.call('doxygen') if etree and path.isfile('xml/index.xml'): - # Doxygen files that have already been parsed - cache = {} + # Doxygen files that have already been parsed + cache = {} + + # Doxygen index + index = etree.parse('xml/index.xml') - # Doxygen index - index = etree.parse('xml/index.xml') def escape(name): - """ - Partial reimplementation in Python of Doxygen escapeCharsInString() - """ + """ + Partial reimplementation in Python of Doxygen escapeCharsInString() + """ + + return name.replace('_', '__').replace(':', '_1').replace('/', '_2').replace('<', '_3').replace('>', '_4').replace('*', '_5').replace('&', '_6').replace('|', '_7').replace('.', '_8').replace('!', '_9').replace(',', '_00').replace(' ', '_01').replace('{', '_02').replace('}', '_03').replace('?', '_04').replace('^', '_05').replace('%', '_06').replace('(', '_07').replace(')', '_08').replace('+', '_09').replace('=', '_0A').replace('$', '_0B').replace('\\', '_0C') - return name.replace('_', '__').replace(':', '_1').replace('/', '_2').replace('<', '_3').replace('>', '_4').replace('*', '_5').replace('&', '_6').replace('|', '_7').replace('.', '_8').replace('!', '_9').replace(',', '_00').replace(' ', '_01').replace('{', '_02').replace('}', '_03').replace('?', '_04').replace('^', '_05').replace('%', '_06').replace('(', '_07').replace(')', '_08').replace('+', '_09').replace('=', '_0A').replace('$', '_0B').replace('\\', '_0C') class doctree_resolved: - """ - Add links from an API description to the source code for that object. - Doxygen knows where in the source code it's located. - Based on the sphinx.ext.viewcode and sphinx.ext.linkcode extensions. - """ + """ + Add links from an API description to the source code for that object. + Doxygen knows where in the source code it's located. + Based on the sphinx.ext.viewcode and sphinx.ext.linkcode extensions. + """ - has_link = None + has_link = None - def __init__(self, app, doctree, docname): + def __init__(self, app, doctree, docname): - self.app = app - self.docname = docname + self.app = app + self.docname = docname - self.traverse(doctree, None) - if self.has_link: + self.traverse(doctree, None) + if self.has_link: - # Style the links - raw = nodes.raw('', '', format='html') - doctree.insert(0, raw) + # Style the links + raw = nodes.raw( + '', '', format='html') + doctree.insert(0, raw) - def traverse(self, node, owner): - """ - If an API description is nested in another description, - lookup the child in the context of the parent - """ + def traverse(self, node, owner): + """ + If an API description is nested in another description, + lookup the child in the context of the parent + """ - # nodes.Text iterates over characters, not children - for child in node.children: - if isinstance(child, addnodes.desc): - for desc_child in child.children: - if isinstance(desc_child, addnodes.desc_signature): + # nodes.Text iterates over characters, not children + for child in node.children: + if isinstance(child, addnodes.desc): + for desc_child in child.children: + if isinstance(desc_child, addnodes.desc_signature): - # Get the name of the object. An owner in the signature - # overrides an owner from a parent description. - signature_owner = None - for child in desc_child.children: - if isinstance(child, addnodes.desc_addname): + # Get the name of the object. An owner in the signature + # overrides an owner from a parent description. + signature_owner = None + for child in desc_child.children: + if isinstance(child, addnodes.desc_addname): - # An owner in the signature ends with :: - signature_owner = child.astext()[:-2] + # An owner in the signature ends with :: + signature_owner = child.astext()[:-2] - elif isinstance(child, addnodes.desc_name): - name = child.astext() + elif isinstance(child, addnodes.desc_name): + name = child.astext() - break + break - # Lookup the object in the Doxygen index - try: - compound, = index.xpath('descendant::compound[(not($owner) or name[text() = $owner]) and descendant::name[text() = $name]][1]', owner=signature_owner or owner, name=name) + # Lookup the object in the Doxygen index + try: + compound, = index.xpath( + 'descendant::compound[(not($owner) or name[text() = $owner]) and descendant::name[text() = $name]][1]', owner=signature_owner or owner, name=name) - except ValueError: - continue + except ValueError: + continue - filename = compound.get('refid') + '.xml' - if filename not in cache: - cache[filename] = etree.parse('xml/' + filename) + filename = compound.get('refid') + '.xml' + if filename not in cache: + cache[filename] = etree.parse('xml/' + filename) - # An enumvalue has no location - memberdef, = cache[filename].xpath('descendant::compounddef[compoundname[text() = $name]]', name=name) or cache[filename].xpath('descendant::memberdef[name[text() = $name] | enumvalue[name[text() = $name]]]', name=name) + # An enumvalue has no location + memberdef, = cache[filename].xpath('descendant::compounddef[compoundname[text() = $name]]', name=name) or cache[filename].xpath( + 'descendant::memberdef[name[text() = $name] | enumvalue[name[text() = $name]]]', name=name) - # Append the link after the object's signature. - # Get the source file and line number from Doxygen and use - # them to construct the link. - location = memberdef.find('location') - filename = path.basename(location.get('file')) + # Append the link after the object's signature. + # Get the source file and line number from Doxygen and use + # them to construct the link. + location = memberdef.find('location') + filename = path.basename(location.get('file')) - # Declarations have no bodystart - line = location.get('bodystart') or location.get('line') + # Declarations have no bodystart + line = location.get('bodystart') or location.get('line') - emphasis = nodes.emphasis('', ' ' + filename + ' line ' + line) + emphasis = nodes.emphasis('', ' ' + filename + ' line ' + line) - # Use a relative link if the output is HTML, otherwise fall - # back on an absolute link to Read the Docs. I haven't - # figured out how to get the page name for e.g. a struct - # from the XML files so ape Doxygen escapeCharsInString() - # instead. - refuri = 'api/' + escape(filename) + '_source.html#l' + line.rjust(5, '0') - if self.app.builder.name == 'html': - refuri = osutil.relative_uri(self.app.builder.get_target_uri(self.docname), refuri) + # Use a relative link if the output is HTML, otherwise fall + # back on an absolute link to Read the Docs. I haven't + # figured out how to get the page name for e.g. a struct + # from the XML files so ape Doxygen escapeCharsInString() + # instead. + refuri = 'api/' + escape(filename) + '_source.html#l' + line.rjust(5, '0') + if self.app.builder.name == 'html': + refuri = osutil.relative_uri(self.app.builder.get_target_uri(self.docname), refuri) - else: - refuri = 'http://docs.trafficserver.apache.org/en/latest/' + refuri + else: + refuri = 'http://docs.trafficserver.apache.org/en/latest/' + refuri - reference = nodes.reference('', '', emphasis, classes=['viewcode-link'], reftitle='Source code', refuri=refuri) - desc_child += reference + reference = nodes.reference('', '', emphasis, classes=[ + 'viewcode-link'], reftitle='Source code', refuri=refuri) + desc_child += reference - # Style the links - self.has_link = True + # Style the links + self.has_link = True + + else: + self.traverse(desc_child, name) - else: - self.traverse(desc_child, name) + else: + self.traverse(child, owner) - else: - self.traverse(child, owner) def setup(app): - if etree and path.isfile('xml/index.xml'): + if etree and path.isfile('xml/index.xml'): - # The doctree-read event hasn't got the docname argument - app.connect('doctree-resolved', doctree_resolved) + # The doctree-read event hasn't got the docname argument + app.connect('doctree-resolved', doctree_resolved) - else: - if not etree: - app.warn('''Python lxml library not found + else: + if not etree: + app.warn('''Python lxml library not found The library is used to add links from an API description to the source code for that object. Depending on your system, try installing the python-lxml package.''') - if not path.isfile('xml/index.xml'): - app.warn('''Doxygen files not found: xml/index.xml + if not path.isfile('xml/index.xml'): + app.warn('''Doxygen files not found: xml/index.xml The files are used to add links from an API description to the source code for that object. Run "$ make doxygen" to generate these XML files.''') diff --git a/doc/ext/traffic-server.py b/doc/ext/traffic-server.py index bdfc32158ba..26e3c46b524 100644 --- a/doc/ext/traffic-server.py +++ b/doc/ext/traffic-server.py @@ -26,8 +26,7 @@ :license: Apache """ -from docutils import nodes, utils -from docutils.parsers.rst.roles import set_classes +from docutils import nodes from docutils.parsers import rst from docutils.parsers.rst import directives from sphinx.domains import Domain, ObjType, std @@ -35,101 +34,6 @@ from sphinx.locale import l_, _ import sphinx -# Autolink for Trafficserver issues -# Moved from doc/conf.py, now integrated into TS domain -# -# Two types of issues supported: -# * Jira: for archive purpose only (after the Jira to Github move) -# * Github: for new issues (after the Jira to Github move) -# -# Syntax: -# :ts:jira:`XXXX` where XXXX is the issue number -# :ts:github:`XXXX` where XXXX is the issue number -# -# Output -# * Prefix issue number with 'TS-' -# * Render HTML link: -# - either to https://issues.apache.org/jira/browse/TS-XXXX -# - or to https://github.com/apache/trafficserver/issues/XXXX - -def ts_jira_role(name, rawtext, issue_num, lineno, inliner, options={}, content=[]): - """Link to a Trafficserver Jira issue. - - Returns 2 part tuple containing list of nodes to insert into the - document and a list of system messages. Both are allowed to be - empty. - - :param name: The role name used in the document. - :param rawtext: The entire markup snippet, with role. - :param issue_num: The issue number marked with the role. - :param lineno: The line number where rawtext appears in the input. - :param inliner: The inliner instance that called us. - :param options: Directive options for customization. - :param content: The directive content for customization. - """ - app = inliner.document.settings.env.app - try: - base_url = app.config.trafficserver_jira_url - if not base_url: - raise AttributeError - except AttributeError, err: - raise ValueError('trafficserver_jira_url configuration values not set (%s)' % str(err)) - # - issue_prefix = 'TS-' - node = make_link_node(rawtext, app, base_url, issue_prefix, issue_num, options) - return [node], [] - -def ts_github_role(name, rawtext, issue_num, lineno, inliner, options={}, content=[]): - """Link to a Trafficserver Github issue. - - Returns 2 part tuple containing list of nodes to insert into the - document and a list of system messages. Both are allowed to be - empty. - - :param name: The role name used in the document. - :param rawtext: The entire markup snippet, with role. - :param text: The text marked with the role. - :param lineno: The line number where rawtext appears in the input. - :param inliner: The inliner instance that called us. - :param options: Directive options for customization. - :param content: The directive content for customization. - """ - app = inliner.document.settings.env.app - try: - base_url = app.config.trafficserver_github_url - if not base_url: - raise AttributeError - except AttributeError, err: - raise ValueError('trafficserver_github_url configuration values not set (%s)' % str(err)) - # - issue_prefix = '' - node = make_link_node(rawtext, app, base_url, issue_prefix, issue_num, options) - return [node], [] - -def make_link_node(rawtext, app, base_url, issue_prefix, issue_num, options): - """Create a link to a Apache Jira resource. - - :param rawtext: Text being replaced with link node. - :param app: Sphinx application context - :param type: Link type ('jira' or 'github') - :param slug: ID of the thing to link to - :param options: Options dictionary passed to role func. - """ - # - try: - issue_num_int = int(issue_num) - if issue_num_int <= 0: - raise ValueError - except ValueError: - raise ValueError('Trafficserver issue number must be a number greater than or equal to 1; ' - '"%s" is invalid.' % text, line=lineno) - # - base_url = base_url + issue_prefix + '{0}' - ref = base_url.format(issue_num) - set_classes(options) - node = nodes.reference(rawtext, issue_prefix + issue_num, refuri=ref, - **options) - return node class TSConfVar(std.Target): """ @@ -144,19 +48,19 @@ class TSConfVar(std.Target): """ option_spec = { - 'class' : rst.directives.class_option, - 'reloadable' : rst.directives.flag, - 'deprecated' : rst.directives.flag, - 'overridable' : rst.directives.flag, - 'units' : rst.directives.unchanged, + 'class': rst.directives.class_option, + 'reloadable': rst.directives.flag, + 'deprecated': rst.directives.flag, + 'overridable': rst.directives.flag, + 'units': rst.directives.unchanged, } required_arguments = 3 - optional_arguments = 1 # default is optional, special case if omitted + optional_arguments = 1 # default is optional, special case if omitted final_argument_whitespace = True has_content = True def make_field(self, tag, value): - field = nodes.field(); + field = nodes.field() field.append(nodes.field_name(text=tag)) body = nodes.field_body() if (isinstance(value, basestring)): @@ -223,34 +127,37 @@ def run(self): fl.append(self.make_field('Deprecated', 'Yes')) # Get any contained content - nn = nodes.compound(); + nn = nodes.compound() self.state.nested_parse(self.content, self.content_offset, nn) # Create an index node so that Sphinx adds this config variable to the # index. nodes.make_id() specifies the link anchor name that is # implicitly generated by the anchor node above. indexnode = sphinx.addnodes.index(entries=[]) - if sphinx.version_info >= (1, 4) : + if sphinx.version_info >= (1, 4): indexnode['entries'].append( ('single', _('%s') % cv_name, nodes.make_id(cv_name), '', '') ) - else : + else: indexnode['entries'].append( ('single', _('%s') % cv_name, nodes.make_id(cv_name), '') ) - return [ indexnode, node, fl, nn ] + return [indexnode, node, fl, nn] class TSConfVarRef(XRefRole): def process_link(self, env, ref_node, explicit_title_p, title, target): return title, target + def metrictypes(typename): - return directives.choice(typename.lower(), ('counter','gauge','derivative','flag','text')) + return directives.choice(typename.lower(), ('counter', 'gauge', 'derivative', 'flag', 'text')) + def metricunits(unitname): - return directives.choice(unitname.lower(), ('ratio','percent','kbits','mbits','bytes','kbytes','mbytes','nanoseconds','microseconds','milliseconds','seconds')) + return directives.choice(unitname.lower(), ('ratio', 'percent', 'kbits', 'mbits', 'bytes', 'kbytes', 'mbytes', 'nanoseconds', 'microseconds', 'milliseconds', 'seconds')) + class TSStat(std.Target): """ @@ -270,17 +177,17 @@ class TSStat(std.Target): option_spec = { 'type': metrictypes, 'unit': metricunits, - 'introduced' : rst.directives.unchanged, - 'deprecated' : rst.directives.unchanged, - 'ungathered' : rst.directives.flag + 'introduced': rst.directives.unchanged, + 'deprecated': rst.directives.unchanged, + 'ungathered': rst.directives.flag } required_arguments = 3 - optional_arguments = 1 # example value is optional + optional_arguments = 1 # example value is optional final_argument_whitespace = True has_content = True def make_field(self, tag, value): - field = nodes.field(); + field = nodes.field() field.append(nodes.field_name(text=tag)) body = nodes.field_body() if (isinstance(value, basestring)): @@ -306,7 +213,7 @@ def run(self): # Next, make a signature node. This creates a permalink and a # highlighted background when the link is selected. title = sphinx.addnodes.desc_signature(stat_name, '') - title['ids'].append(nodes.make_id('stat-'+stat_name)) + title['ids'].append(nodes.make_id('stat-' + stat_name)) title['names'].append(stat_name) title['first'] = False title['objtype'] = 'stat' @@ -347,7 +254,7 @@ def run(self): fl.append(self.make_field('Example', stat_example)) # Get any contained content - nn = nodes.compound(); + nn = nodes.compound() self.state.nested_parse(self.content, self.content_offset, nn) # Create an index node so that Sphinx adds this statistic to the @@ -355,24 +262,22 @@ def run(self): # implicitly generated by the anchor node above. indexnode = sphinx.addnodes.index(entries=[]) - if sphinx.version_info >= (1, 4) : + if sphinx.version_info >= (1, 4): indexnode['entries'].append( ('single', _('%s') % stat_name, nodes.make_id(stat_name), '', '') ) - else : + else: indexnode['entries'].append( ('single', _('%s') % stat_name, nodes.make_id(stat_name), '') ) - return [ indexnode, node, fl, nn ] + return [indexnode, node, fl, nn] + class TSStatRef(XRefRole): def process_link(self, env, ref_node, explicit_title_p, title, target): return title, target -class TSIssueRef(XRefRole): - def process_link(self, env, ref_node, explicit_title_p, title, target): - return 'TS-' + title, 'https://issues.apache.org/jira/browse/TS-' + target class TrafficServerDomain(Domain): """ @@ -385,32 +290,27 @@ class TrafficServerDomain(Domain): object_types = { 'cv': ObjType(l_('configuration variable'), 'cv'), - 'stat': ObjType(l_('statistic'), 'stat'), - 'jira': ObjType(l_('jira'), 'jira') + 'stat': ObjType(l_('statistic'), 'stat') } directives = { - 'cv' : TSConfVar, - 'stat' : TSStat + 'cv': TSConfVar, + 'stat': TSStat } roles = { - 'cv' : TSConfVarRef(), - 'stat' : TSStatRef(), - 'jira' : ts_jira_role, - 'github' : ts_github_role + 'cv': TSConfVarRef(), + 'stat': TSStatRef() } initial_data = { - 'cv' : {}, # full name -> docname - 'stat' : {}, - 'issue' : {} + 'cv': {}, # full name -> docname + 'stat': {} } dangling_warnings = { - 'cv' : "No definition found for configuration variable '%(target)s'", - 'stat' : "No definition found for statistic '%(target)s'", - 'issue' : "No definition found for issue '%(target)s'" + 'cv': "No definition found for configuration variable '%(target)s'", + 'stat': "No definition found for statistic '%(target)s'" } def clear_doc(self, docname): @@ -422,20 +322,14 @@ def clear_doc(self, docname): for var, doc in stat_list.items(): if doc == docname: del stat_list[var] - issue_list = self.data['issue'] - for var, doc in issue_list.items(): - if doc == docname: - del issue_list[var] def find_doc(self, key, obj_type): zret = None - if obj_type == 'cv' : + if obj_type == 'cv': obj_list = self.data['cv'] - elif obj_type == 'stat' : + elif obj_type == 'stat': obj_list = self.data['stat'] - elif obj_type == 'issue' : - obj_list = self.data['issue'] else: obj_list = None @@ -445,20 +339,16 @@ def find_doc(self, key, obj_type): return zret def resolve_xref(self, env, src_doc, builder, obj_type, target, node, cont_node): - if obj_type == 'issue': - return sphinx.util.nodes.make_refnode(builder, src_doc, src_doc, nodes.make_id(target), cont_node, None) - else: - dst_doc = self.find_doc(target, obj_type) - if (dst_doc): - return sphinx.util.nodes.make_refnode(builder, src_doc, dst_doc, nodes.make_id(target), cont_node, 'records.config') + dst_doc = self.find_doc(target, obj_type) + if (dst_doc): + return sphinx.util.nodes.make_refnode(builder, src_doc, dst_doc, nodes.make_id(target), cont_node, 'records.config') def get_objects(self): for var, doc in self.data['cv'].iteritems(): yield var, var, 'cv', doc, var, 1 for var, doc in self.data['stat'].iteritems(): yield var, var, 'stat', doc, var, 1 - for var, doc in self.data['issue'].iteritems(): - yield var, var, 'issue', doc, var, 1 + # These types are ignored as missing references for the C++ domain. # We really need to do better with this. Editing this file for each of @@ -471,10 +361,12 @@ def get_objects(self): 'off_t', 'size_t', 'time_t', 'Event', 'INK_MD5', 'DLL', 'sockaddr' - )) +)) # Clean up specific references that we know will never be defined but are implicitly used by # other domain directives. Hand convert them to literals. + + def xref_cleanup(app, env, node, contnode): rdomain = node['refdomain'] rtype = node['reftype'] @@ -486,22 +378,21 @@ def xref_cleanup(app, env, node, contnode): node = nodes.literal() node += contnode return node - return; + return + def setup(app): app.add_crossref_type('configfile', 'file', - objname='Configuration file', - indextemplate='pair: %s; Configuration files') + objname='Configuration file', + indextemplate='pair: %s; Configuration files') app.add_crossref_type('logfile', 'file', - objname='Log file', - indextemplate='pair: %s; Log files') + objname='Log file', + indextemplate='pair: %s; Log files') rst.roles.register_generic_role('arg', nodes.emphasis) rst.roles.register_generic_role('const', nodes.literal) app.add_domain(TrafficServerDomain) - app.add_config_value('trafficserver_jira_url', None, 'env') - app.add_config_value('trafficserver_github_url', None, 'env') # Types that we want the C domain to consider built in for word in EXTERNAL_TYPES: diff --git a/doc/manpages.py b/doc/manpages.py index 3ccd9de0f17..761a74c402a 100644 --- a/doc/manpages.py +++ b/doc/manpages.py @@ -14,54 +14,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys, os +import sys +import os man_pages = [ - # Add all files in the reference/api directory to the list of manual - # pages - ('developer-guide/api/functions/' + filename[:-4], filename.split('.', 1)[0], '', None, '3ts') for filename in os.listdir('developer-guide/api/functions/') if filename != 'index.en.rst' and filename.endswith('.rst')] + [ + # Add all files in the reference/api directory to the list of manual + # pages + ('developer-guide/api/functions/' + filename[:-4], filename.split('.', 1)[0], '', None, '3ts') for filename in os.listdir('developer-guide/api/functions/') if filename != 'index.en.rst' and filename.endswith('.rst')] + [ - ('appendices/command-line/traffic_cop.en', 'traffic_cop', u'Traffic Server watchdog', None, '8'), - ('appendices/command-line/traffic_ctl.en', 'traffic_ctl', u'Traffic Server command line tool', None, '8'), - ('appendices/command-line/traffic_crashlog.en', 'traffic_crashlog', u'Traffic Server crash log helper', None, '8'), - ('appendices/command-line/traffic_logcat.en', 'traffic_logcat', u'Traffic Server log spooler', None, '8'), - ('appendices/command-line/traffic_logstats.en', 'traffic_logstats', u'Traffic Server analyzer', None, '8'), - ('appendices/command-line/traffic_manager.en', 'traffic_manager', u'Traffic Server process manager', None, '8'), - ('appendices/command-line/traffic_server.en', 'traffic_server', u'Traffic Server', None, '8'), + ('appendices/command-line/traffic_cop.en', 'traffic_cop', u'Traffic Server watchdog', None, '8'), + ('appendices/command-line/traffic_ctl.en', 'traffic_ctl', u'Traffic Server command line tool', None, '8'), + ('appendices/command-line/traffic_crashlog.en', 'traffic_crashlog', u'Traffic Server crash log helper', None, '8'), + ('appendices/command-line/traffic_logcat.en', 'traffic_logcat', u'Traffic Server log spooler', None, '8'), + ('appendices/command-line/traffic_logstats.en', 'traffic_logstats', u'Traffic Server analyzer', None, '8'), + ('appendices/command-line/traffic_manager.en', 'traffic_manager', u'Traffic Server process manager', None, '8'), + ('appendices/command-line/traffic_server.en', 'traffic_server', u'Traffic Server', None, '8'), - ('appendices/command-line/tspush.en', 'tspush', u'Push objects into the Traffic Server cache', None, '1'), - ('appendices/command-line/traffic_top.en','traffic_top', u'Display Traffic Server statistics', None, '1'), - ('appendices/command-line/tsxs.en', 'tsxs', u'Traffic Server plugin tool', None, '1'), - ('appendices/command-line/traffic_via.en', 'traffic_via', u'Traffic Server Via header decoder', None, '1'), + ('appendices/command-line/tspush.en', 'tspush', u'Push objects into the Traffic Server cache', None, '1'), + ('appendices/command-line/traffic_top.en', 'traffic_top', u'Display Traffic Server statistics', None, '1'), + ('appendices/command-line/tsxs.en', 'tsxs', u'Traffic Server plugin tool', None, '1'), + ('appendices/command-line/traffic_via.en', 'traffic_via', u'Traffic Server Via header decoder', None, '1'), - ('admin-guide/files/cache.config.en', 'cache.config', u'Traffic Server cache configuration file', None, '5'), - ('admin-guide/files/congestion.config.en', 'congestion.config', u'Traffic Server congestion control configuration file', None, '5'), - ('admin-guide/files/hosting.config.en', 'hosting.config', u'Traffic Server domain hosting configuration file', None, '5'), - ('admin-guide/files/ip_allow.config.en', 'ip_allow.config', u'Traffic Server IP access control configuration file', None, '5'), - ('admin-guide/files/log_hosts.config.en', 'log_hosts.config', u'Traffic Server log host configuration file', None, '5'), - ('admin-guide/files/logging.config.en', 'logging.config', u'Traffic Server logging configuration file', None, '5'), - ('admin-guide/files/metrics.config.en', 'metrics.config', u'Traffic Server dynamic metrics configuration file', None, '5'), - ('admin-guide/files/parent.config.en', 'parent.config', u'Traffic Server parent cache configuration file', None, '5'), - ('admin-guide/files/plugin.config.en', 'plugin.config', u'Traffic Server global plugin configuration file', None, '5'), - ('admin-guide/files/records.config.en', 'records.config', u'Traffic Server configuration file', None, '5'), - ('admin-guide/files/remap.config.en', 'remap.config', u'Traffic Server remap rules configuration file', None, '5'), - ('admin-guide/files/splitdns.config.en', 'splitdns.config', u'Traffic Server split DNS configuration file', None, '5'), - ('admin-guide/files/ssl_multicert.config.en', 'ssl_multicert.config', u'Traffic Server SSL certificate configuration file', None, '5'), - ('admin-guide/files/storage.config.en', 'storage.config', u'Traffic Server cache storage configuration file', None, '5'), - ('admin-guide/files/volume.config.en', 'volume.config', u'Traffic Server cache volume configuration file', None, '5'), + ('admin-guide/files/cache.config.en', 'cache.config', u'Traffic Server cache configuration file', None, '5'), + ('admin-guide/files/congestion.config.en', 'congestion.config', u'Traffic Server congestion control configuration file', None, '5'), + ('admin-guide/files/hosting.config.en', 'hosting.config', u'Traffic Server domain hosting configuration file', None, '5'), + ('admin-guide/files/ip_allow.config.en', 'ip_allow.config', u'Traffic Server IP access control configuration file', None, '5'), + ('admin-guide/files/log_hosts.config.en', 'log_hosts.config', u'Traffic Server log host configuration file', None, '5'), + ('admin-guide/files/logging.config.en', 'logging.config', u'Traffic Server logging configuration file', None, '5'), + ('admin-guide/files/metrics.config.en', 'metrics.config', u'Traffic Server dynamic metrics configuration file', None, '5'), + ('admin-guide/files/parent.config.en', 'parent.config', u'Traffic Server parent cache configuration file', None, '5'), + ('admin-guide/files/plugin.config.en', 'plugin.config', u'Traffic Server global plugin configuration file', None, '5'), + ('admin-guide/files/records.config.en', 'records.config', u'Traffic Server configuration file', None, '5'), + ('admin-guide/files/remap.config.en', 'remap.config', u'Traffic Server remap rules configuration file', None, '5'), + ('admin-guide/files/splitdns.config.en', 'splitdns.config', u'Traffic Server split DNS configuration file', None, '5'), + ('admin-guide/files/ssl_multicert.config.en', 'ssl_multicert.config', + u'Traffic Server SSL certificate configuration file', None, '5'), + ('admin-guide/files/storage.config.en', 'storage.config', u'Traffic Server cache storage configuration file', None, '5'), + ('admin-guide/files/volume.config.en', 'volume.config', u'Traffic Server cache volume configuration file', None, '5'), ] if __name__ == '__main__': - # Use optparse instead of argparse because this needs to work on old Python versions. - import optparse + # Use optparse instead of argparse because this needs to work on old Python versions. + import optparse - parser = optparse.OptionParser(description='Traffic Server Sphinx docs configuration') - parser.add_option('--section', type=int, default=0, dest='section') + parser = optparse.OptionParser(description='Traffic Server Sphinx docs configuration') + parser.add_option('--section', type=int, default=0, dest='section') - (options, args) = parser.parse_args() + (options, args) = parser.parse_args() - # Print the names of the man pages for the requested manual section. - for page in man_pages: - if options.section == 0 or options.section == int(page[4][0]): - print page[1] + '.' + page[4] + # Print the names of the man pages for the requested manual section. + for page in man_pages: + if options.section == 0 or options.section == int(page[4][0]): + print page[1] + '.' + page[4] diff --git a/example/Makefile.am b/example/Makefile.am index ec414b07d30..acbc156d220 100644 --- a/example/Makefile.am +++ b/example/Makefile.am @@ -54,7 +54,8 @@ example_Plugins = \ statistic.la \ thread-1.la \ txn-data-sink.la \ - version.la + version.la \ + replace-protoset.la example_Plugins += \ cppapi/AsyncHttpFetch.la \ @@ -115,6 +116,7 @@ server_transform_la_SOURCES = server-transform/server-transform.c ssl_preaccept_la_SOURCES = ssl-preaccept/ssl-preaccept.cc ssl_sni_la_SOURCES = ssl-sni/ssl-sni.cc ssl_sni_whitelist_la_SOURCES = ssl-sni-whitelist/ssl-sni-whitelist.cc +replace_protoset_la_SOURCES = replace-protoset/replace-protoset.cc statistic_la_SOURCES = statistic/statistic.cc thread_1_la_SOURCES = thread-1/thread-1.c txn_data_sink_la_SOURCES = txn-data-sink/txn-data-sink.c diff --git a/example/cache-scan/cache-scan.cc b/example/cache-scan/cache-scan.cc index ad9fb47ea2a..63da032941b 100644 --- a/example/cache-scan/cache-scan.cc +++ b/example/cache-scan/cache-scan.cc @@ -25,10 +25,10 @@ * cache_scan.cc: use TSCacheScan to print URLs and headers for objects in * the cache when endpoint /show-cache is requested */ -#include -#include -#include -#include +#include +#include +#include +#include #include "ts/ts.h" #include "ts/experimental.h" @@ -57,7 +57,7 @@ struct cache_scan_state_t { bool write_pending; }; -typedef struct cache_scan_state_t cache_scan_state; +using cache_scan_state = struct cache_scan_state_t; //---------------------------------------------------------------------------- static int @@ -147,7 +147,7 @@ handle_scan(TSCont contp, TSEvent event, void *edata) cstate->total_bytes += TSIOBufferWrite(cstate->resp_buffer, s2, sizeof(s2) - 1); if (!cstate->write_pending) { - cstate->write_pending = 1; + cstate->write_pending = true; TSVIOReenable(cstate->write_vio); } @@ -166,7 +166,7 @@ handle_scan(TSCont contp, TSEvent event, void *edata) cstate->total_bytes += TSIOBufferWrite(cstate->resp_buffer, s, s_len); TSVIONBytesSet(cstate->write_vio, cstate->total_bytes); if (!cstate->write_pending) { - cstate->write_pending = 1; + cstate->write_pending = true; TSVIOReenable(cstate->write_vio); } return TS_CACHE_SCAN_RESULT_DONE; @@ -282,7 +282,7 @@ handle_io(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) } case TS_EVENT_VCONN_WRITE_READY: { TSDebug("cache_iter", "ndone: %" PRId64 " total_bytes: % " PRId64, TSVIONDoneGet(cstate->write_vio), cstate->total_bytes); - cstate->write_pending = 0; + cstate->write_pending = false; // the cache scan handler should call vio reenable when there is // available data // TSVIOReenable(cstate->write_vio); diff --git a/example/cppapi/async_http_fetch/AsyncHttpFetch.cc b/example/cppapi/async_http_fetch/AsyncHttpFetch.cc index 1fb29a861d5..fbb5c5713ed 100644 --- a/example/cppapi/async_http_fetch/AsyncHttpFetch.cc +++ b/example/cppapi/async_http_fetch/AsyncHttpFetch.cc @@ -25,6 +25,7 @@ #include #include #include +#include using namespace atscppapi; using std::string; @@ -41,28 +42,28 @@ GlobalPlugin *plugin; class AsyncHttpFetch2 : public AsyncHttpFetch { public: - AsyncHttpFetch2(string request) : AsyncHttpFetch(request){}; + AsyncHttpFetch2(const string &request) : AsyncHttpFetch(request){}; }; class AsyncHttpFetch3 : public AsyncHttpFetch { public: - AsyncHttpFetch3(string request, HttpMethod method) : AsyncHttpFetch(request, method){}; + AsyncHttpFetch3(const string &request, HttpMethod method) : AsyncHttpFetch(request, method){}; }; class DelayedAsyncHttpFetch : public AsyncHttpFetch, public AsyncReceiver { public: - DelayedAsyncHttpFetch(string request, HttpMethod method, std::shared_ptr mutex) - : AsyncHttpFetch(request, method), mutex_(mutex), timer_(nullptr){}; + DelayedAsyncHttpFetch(const string &request, HttpMethod method, std::shared_ptr mutex) + : AsyncHttpFetch(request, method), mutex_(std::move(mutex)), timer_(nullptr){}; void - run() + run() override { timer_ = new AsyncTimer(AsyncTimer::TYPE_ONE_OFF, 1000 /* 1s */); Async::execute(this, timer_, mutex_); } void - handleAsyncComplete(AsyncTimer & /*timer ATS_UNUSED */) + handleAsyncComplete(AsyncTimer & /*timer ATS_UNUSED */) override { TS_DEBUG(TAG, "Receiver should not be reachable"); assert(!getDispatchController()->dispatch()); @@ -73,7 +74,7 @@ class DelayedAsyncHttpFetch : public AsyncHttpFetch, public AsyncReceiverisEnabled(); } - ~DelayedAsyncHttpFetch() { delete timer_; } + ~DelayedAsyncHttpFetch() override { delete timer_; } private: std::shared_ptr mutex_; AsyncTimer *timer_; @@ -94,7 +95,7 @@ class TransactionHookPlugin : public TransactionPlugin, } void - handleSendRequestHeaders(Transaction & /*transaction ATS_UNUSED */) + handleSendRequestHeaders(Transaction & /*transaction ATS_UNUSED */) override { Async::execute(this, new AsyncHttpFetch("http://127.0.0.1/"), getMutex()); ++num_fetches_pending_; @@ -122,7 +123,7 @@ class TransactionHookPlugin : public TransactionPlugin, } void - handleAsyncComplete(AsyncHttpFetch &async_http_fetch) + handleAsyncComplete(AsyncHttpFetch &async_http_fetch) override { // This will be called when our async event is complete. TS_DEBUG(TAG, "AsyncHttpFetch completed"); @@ -130,14 +131,14 @@ class TransactionHookPlugin : public TransactionPlugin, } void - handleAsyncComplete(AsyncHttpFetch2 &async_http_fetch) + handleAsyncComplete(AsyncHttpFetch2 &async_http_fetch) override { // This will be called when our async event is complete. TS_DEBUG(TAG, "AsyncHttpFetch2 completed"); handleAnyAsyncComplete(async_http_fetch); } - virtual ~TransactionHookPlugin() + ~TransactionHookPlugin() override { TS_DEBUG(TAG, "Destroyed TransactionHookPlugin!"); // since we die right away, we should not receive the callback for this (using POST request this time) @@ -145,13 +146,13 @@ class TransactionHookPlugin : public TransactionPlugin, } void - handleAsyncComplete(AsyncHttpFetch3 & /* async_http_fetch ATS_UNUSED */) + handleAsyncComplete(AsyncHttpFetch3 & /* async_http_fetch ATS_UNUSED */) override { assert(!"AsyncHttpFetch3 shouldn't have completed!"); } void - handleAsyncComplete(DelayedAsyncHttpFetch & /*async_http_fetch ATS_UNUSED */) + handleAsyncComplete(DelayedAsyncHttpFetch & /*async_http_fetch ATS_UNUSED */) override { assert(!"Should've been canceled!"); } @@ -197,8 +198,8 @@ class GlobalHookPlugin : public GlobalPlugin registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { TS_DEBUG(TAG, "Received a request in handleReadRequestHeadersPostRemap."); diff --git a/example/cppapi/async_http_fetch_streaming/AsyncHttpFetchStreaming.cc b/example/cppapi/async_http_fetch_streaming/AsyncHttpFetchStreaming.cc index dcb78e2611a..e91a3100d6b 100644 --- a/example/cppapi/async_http_fetch_streaming/AsyncHttpFetchStreaming.cc +++ b/example/cppapi/async_http_fetch_streaming/AsyncHttpFetchStreaming.cc @@ -47,10 +47,10 @@ class Intercept : public InterceptPlugin, public AsyncReceiver { main_url_ = transaction.getClientRequest().getUrl().getUrlString(); } - void consume(const string &data, InterceptPlugin::RequestDataType type); - void handleInputComplete(); - void handleAsyncComplete(AsyncHttpFetch &async_http_fetch); - ~Intercept(); + void consume(const string &data, InterceptPlugin::RequestDataType type) override; + void handleInputComplete() override; + void handleAsyncComplete(AsyncHttpFetch &async_http_fetch) override; + ~Intercept() override; private: Transaction &transaction_; @@ -68,7 +68,7 @@ class InterceptInstaller : public GlobalPlugin GlobalPlugin::registerHook(Plugin::HOOK_READ_REQUEST_HEADERS_PRE_REMAP); } void - handleReadRequestHeadersPreRemap(Transaction &transaction) + handleReadRequestHeadersPreRemap(Transaction &transaction) override { transaction.addPlugin(new Intercept(transaction)); TS_DEBUG(TAG, "Added intercept"); diff --git a/example/cppapi/async_timer/AsyncTimer.cc b/example/cppapi/async_timer/AsyncTimer.cc index c940031516e..0fb337ef349 100644 --- a/example/cppapi/async_timer/AsyncTimer.cc +++ b/example/cppapi/async_timer/AsyncTimer.cc @@ -38,7 +38,7 @@ class TimerEventReceiver : public AsyncReceiver } void - handleAsyncComplete(AsyncTimer &timer ATSCPPAPI_UNUSED) + handleAsyncComplete(AsyncTimer &timer ATSCPPAPI_UNUSED) override { TS_DEBUG(TAG, "Got timer event in object %p!", this); if ((type_ == AsyncTimer::TYPE_ONE_OFF) || (max_instances_ && (++instance_count_ == max_instances_))) { @@ -47,7 +47,7 @@ class TimerEventReceiver : public AsyncReceiver } } - ~TimerEventReceiver() { delete timer_; } + ~TimerEventReceiver() override { delete timer_; } private: int max_instances_; int instance_count_; diff --git a/example/cppapi/boom/boom.cc b/example/cppapi/boom/boom.cc index dfe14d811e7..a919bd3b8b1 100644 --- a/example/cppapi/boom/boom.cc +++ b/example/cppapi/boom/boom.cc @@ -194,7 +194,7 @@ BoomResponseRegistry::register_error_codes(const std::vector &error } } // forward declaration -bool get_file_contents(std::string fileName, std::string &contents); +bool get_file_contents(const std::string &fileName, std::string &contents); // Examine the error file directory and populate the error_response // map with the file contents. @@ -312,7 +312,7 @@ class BoomTransactionPlugin : public TransactionPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { transaction.getClientResponse().setStatusCode(status_); transaction.getClientResponse().setReasonPhrase(reason_); @@ -339,7 +339,7 @@ stringSplit(const std::string &in, char delim, std::vector &res) // Utility routine to read file contents into a string // @returns true if the file exists and has been successfully read bool -get_file_contents(std::string fileName, std::string &contents) +get_file_contents(const std::string &fileName, std::string &contents) { if (fileName.empty()) { return false; @@ -376,7 +376,7 @@ class BoomGlobalPlugin : public atscppapi::GlobalPlugin } // Upcall method that is called for every transaction. - void handleReadResponseHeaders(Transaction &transaction); + void handleReadResponseHeaders(Transaction &transaction) override; private: BoomGlobalPlugin(); diff --git a/example/cppapi/clientredirect/ClientRedirect.cc b/example/cppapi/clientredirect/ClientRedirect.cc index f4aca409d88..9db0a696b13 100644 --- a/example/cppapi/clientredirect/ClientRedirect.cc +++ b/example/cppapi/clientredirect/ClientRedirect.cc @@ -48,7 +48,7 @@ class ClientRedirectTransactionPlugin : public atscppapi::TransactionPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { transaction.getClientResponse().setStatusCode(HTTP_STATUS_MOVED_TEMPORARILY); transaction.getClientResponse().setReasonPhrase("Moved Temporarily"); @@ -56,7 +56,7 @@ class ClientRedirectTransactionPlugin : public atscppapi::TransactionPlugin transaction.resume(); } - virtual ~ClientRedirectTransactionPlugin() {} + ~ClientRedirectTransactionPlugin() override {} private: string location_; }; @@ -66,7 +66,7 @@ class ClientRedirectGlobalPlugin : public GlobalPlugin public: ClientRedirectGlobalPlugin() { registerHook(HOOK_SEND_REQUEST_HEADERS); } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { if (transaction.getClientRequest().getUrl().getQuery().find("redirect=1") != string::npos) { transaction.addPlugin(new ClientRedirectTransactionPlugin(transaction, "http://www.linkedin.com/")); diff --git a/example/cppapi/clientrequest/ClientRequest.cc b/example/cppapi/clientrequest/ClientRequest.cc index eb96c495464..3179813717e 100644 --- a/example/cppapi/clientrequest/ClientRequest.cc +++ b/example/cppapi/clientrequest/ClientRequest.cc @@ -44,7 +44,7 @@ class GlobalHookPlugin : public GlobalPlugin } void - handleReadRequestHeadersPreRemap(Transaction &transaction) + handleReadRequestHeadersPreRemap(Transaction &transaction) override { cout << "Hello from handleReadRequesHeadersPreRemap!" << endl; @@ -69,7 +69,7 @@ class GlobalHookPlugin : public GlobalPlugin } void - handleReadRequestHeadersPostRemap(Transaction &transaction) + handleReadRequestHeadersPostRemap(Transaction &transaction) override { cout << "Hello from handleReadRequesHeadersPostRemap!" << endl; @@ -123,7 +123,7 @@ class GlobalHookPlugin : public GlobalPlugin } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { cout << "Hello from handleSendRequestHeaders!" << endl; cout << "---------------------IP INFORMATION-----------------" << endl; diff --git a/example/cppapi/custom_error_remap_plugin/CustomErrorRemapPlugin.cc b/example/cppapi/custom_error_remap_plugin/CustomErrorRemapPlugin.cc index a3e599f57cb..dea2fab51bf 100644 --- a/example/cppapi/custom_error_remap_plugin/CustomErrorRemapPlugin.cc +++ b/example/cppapi/custom_error_remap_plugin/CustomErrorRemapPlugin.cc @@ -33,7 +33,7 @@ class MyRemapPlugin : public RemapPlugin public: MyRemapPlugin(void **instance_handle) : RemapPlugin(instance_handle) {} Result - doRemap(const Url &map_from_url, const Url &map_to_url, Transaction &transaction, bool &redirect) + doRemap(const Url &map_from_url, const Url &map_to_url, Transaction &transaction, bool &redirect) override { if (transaction.getClientRequest().getUrl().getQuery().find("custom=1") != string::npos) { transaction.setStatusCode(HTTP_STATUS_FORBIDDEN); diff --git a/example/cppapi/customresponse/CustomResponse.cc b/example/cppapi/customresponse/CustomResponse.cc index 462af08b94a..8ebd9920c3f 100644 --- a/example/cppapi/customresponse/CustomResponse.cc +++ b/example/cppapi/customresponse/CustomResponse.cc @@ -53,14 +53,14 @@ class CustomResponseTransactionPlugin : public atscppapi::TransactionPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { transaction.getClientResponse().setStatusCode(status_); transaction.getClientResponse().setReasonPhrase(reason_); transaction.resume(); } - virtual ~CustomResponseTransactionPlugin() {} + ~CustomResponseTransactionPlugin() override {} private: HttpStatus status_; string reason_; @@ -72,7 +72,7 @@ class ClientRedirectGlobalPlugin : public GlobalPlugin public: ClientRedirectGlobalPlugin() { registerHook(HOOK_SEND_REQUEST_HEADERS); } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { if (transaction.getClientRequest().getUrl().getQuery().find("custom=1") != string::npos) { transaction.addPlugin(new CustomResponseTransactionPlugin(transaction, HTTP_STATUS_OK, "Ok", diff --git a/example/cppapi/globalhook/GlobalHookPlugin.cc b/example/cppapi/globalhook/GlobalHookPlugin.cc index a94e3488808..cccf900f0b5 100644 --- a/example/cppapi/globalhook/GlobalHookPlugin.cc +++ b/example/cppapi/globalhook/GlobalHookPlugin.cc @@ -33,8 +33,8 @@ class GlobalHookPlugin : public GlobalPlugin { public: GlobalHookPlugin() { registerHook(HOOK_READ_REQUEST_HEADERS_PRE_REMAP); } - virtual void - handleReadRequestHeadersPreRemap(Transaction &transaction) + void + handleReadRequestHeadersPreRemap(Transaction &transaction) override { std::cout << "Hello from handleReadRequesHeadersPreRemap!" << std::endl; transaction.resume(); diff --git a/example/cppapi/gzip_transformation/GzipTransformationPlugin.cc b/example/cppapi/gzip_transformation/GzipTransformationPlugin.cc index 6a195c922bc..785b7ef54e0 100644 --- a/example/cppapi/gzip_transformation/GzipTransformationPlugin.cc +++ b/example/cppapi/gzip_transformation/GzipTransformationPlugin.cc @@ -95,7 +95,7 @@ class SomeTransformationPlugin : public TransformationPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { TS_DEBUG(TAG, "Added X-Content-Transformed header"); transaction.getClientResponse().getHeaders()["X-Content-Transformed"] = "1"; @@ -103,13 +103,13 @@ class SomeTransformationPlugin : public TransformationPlugin } void - consume(const string &data) + consume(const string &data) override { produce(data); } void - handleInputComplete() + handleInputComplete() override { Helpers::ContentType content_type = Helpers::getContentType(transaction_); if (content_type == Helpers::TEXT_HTML) { @@ -124,7 +124,7 @@ class SomeTransformationPlugin : public TransformationPlugin setOutputComplete(); } - virtual ~SomeTransformationPlugin() {} + ~SomeTransformationPlugin() override {} private: Transaction &transaction_; }; @@ -139,8 +139,8 @@ class GlobalHookPlugin : public GlobalPlugin registerHook(HOOK_SEND_RESPONSE_HEADERS); } - virtual void - handleSendRequestHeaders(Transaction &transaction) + void + handleSendRequestHeaders(Transaction &transaction) override { // Since we can only decompress gzip we will change the accept encoding header // to gzip, even if the user cannot accept gziped content we will return to them @@ -154,8 +154,8 @@ class GlobalHookPlugin : public GlobalPlugin transaction.resume(); } - virtual void - handleReadResponseHeaders(Transaction &transaction) + void + handleReadResponseHeaders(Transaction &transaction) override { TS_DEBUG(TAG, "Determining if we need to add an inflate transformation or a deflate transformation.."); // We're guaranteed to have been returned either gzipped content or Identity. @@ -176,8 +176,8 @@ class GlobalHookPlugin : public GlobalPlugin transaction.resume(); } - virtual void - handleSendResponseHeaders(Transaction &transaction) + void + handleSendResponseHeaders(Transaction &transaction) override { // If the client supported gzip then we can guarantee they are receiving gzip since regardless of the // origins content-encoding we returned gzip, so let's make sure the content-encoding header is correctly diff --git a/example/cppapi/intercept/intercept.cc b/example/cppapi/intercept/intercept.cc index caff5405885..b98ba6ea1ff 100644 --- a/example/cppapi/intercept/intercept.cc +++ b/example/cppapi/intercept/intercept.cc @@ -36,9 +36,9 @@ class Intercept : public InterceptPlugin { public: Intercept(Transaction &transaction) : InterceptPlugin(transaction, InterceptPlugin::SERVER_INTERCEPT) {} - void consume(const string &data, InterceptPlugin::RequestDataType type); - void handleInputComplete(); - ~Intercept() { cout << "Shutting down" << endl; } + void consume(const string &data, InterceptPlugin::RequestDataType type) override; + void handleInputComplete() override; + ~Intercept() override { cout << "Shutting down" << endl; } }; class InterceptInstaller : public GlobalPlugin @@ -49,7 +49,7 @@ class InterceptInstaller : public GlobalPlugin GlobalPlugin::registerHook(Plugin::HOOK_READ_REQUEST_HEADERS_PRE_REMAP); } void - handleReadRequestHeadersPreRemap(Transaction &transaction) + handleReadRequestHeadersPreRemap(Transaction &transaction) override { transaction.addPlugin(new Intercept(transaction)); cout << "Added intercept" << endl; diff --git a/example/cppapi/internal_transaction_handling/InternalTransactionHandling.cc b/example/cppapi/internal_transaction_handling/InternalTransactionHandling.cc index 259b40c291d..2a7e03e39e1 100644 --- a/example/cppapi/internal_transaction_handling/InternalTransactionHandling.cc +++ b/example/cppapi/internal_transaction_handling/InternalTransactionHandling.cc @@ -41,8 +41,8 @@ class AllTransactionsGlobalPlugin : public GlobalPlugin registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { TS_DEBUG(TAG, "Received a request in handleReadRequestHeadersPostRemap."); transaction.resume(); @@ -58,8 +58,8 @@ class NoInternalTransactionsGlobalPlugin : public GlobalPlugin, public AsyncRece registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { TS_DEBUG(TAG, "Received a request in handleReadRequestHeadersPostRemap."); std::shared_ptr mutex(new Mutex()); // required for async operation @@ -68,7 +68,7 @@ class NoInternalTransactionsGlobalPlugin : public GlobalPlugin, public AsyncRece } void - handleAsyncComplete(AsyncHttpFetch &provider ATSCPPAPI_UNUSED) + handleAsyncComplete(AsyncHttpFetch &provider ATSCPPAPI_UNUSED) override { } }; diff --git a/example/cppapi/logger_example/LoggerExample.cc b/example/cppapi/logger_example/LoggerExample.cc index 14228c19c96..ba8075fde5b 100644 --- a/example/cppapi/logger_example/LoggerExample.cc +++ b/example/cppapi/logger_example/LoggerExample.cc @@ -58,8 +58,8 @@ class GlobalHookPlugin : public GlobalPlugin registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { LOG_DEBUG(log, "handleReadRequestHeadersPostRemap.\n" "\tRequest URL: %s\n" diff --git a/example/cppapi/multiple_transaction_hooks/MultipleTransactionHookPlugins.cc b/example/cppapi/multiple_transaction_hooks/MultipleTransactionHookPlugins.cc index 5e97ebcb3bd..372a5fe50d2 100644 --- a/example/cppapi/multiple_transaction_hooks/MultipleTransactionHookPlugins.cc +++ b/example/cppapi/multiple_transaction_hooks/MultipleTransactionHookPlugins.cc @@ -37,9 +37,9 @@ class MultipleTransactionHookPluginsOne : public atscppapi::TransactionPlugin std::cout << "Constructed MultipleTransactionHookPluginsOne!" << std::endl; } - virtual ~MultipleTransactionHookPluginsOne() { std::cout << "Destroyed MultipleTransactionHookPluginsOne!" << std::endl; } + ~MultipleTransactionHookPluginsOne() override { std::cout << "Destroyed MultipleTransactionHookPluginsOne!" << std::endl; } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { std::cerr << "MultipleTransactionHookPluginsOne -- Send response headers!" << std::endl; transaction.resume(); @@ -56,9 +56,9 @@ class MultipleTransactionHookPluginsTwo : public atscppapi::TransactionPlugin std::cout << "Constructed MultipleTransactionHookPluginsTwo!" << std::endl; } - virtual ~MultipleTransactionHookPluginsTwo() { std::cout << "Destroyed MultipleTransactionHookPluginsTwo!" << std::endl; } + ~MultipleTransactionHookPluginsTwo() override { std::cout << "Destroyed MultipleTransactionHookPluginsTwo!" << std::endl; } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { std::cout << "MultipleTransactionHookPluginsTwo -- Send request headers!" << std::endl; some_container_.push_back("We have transaction scoped storage in Transaction Hooks!"); @@ -66,7 +66,7 @@ class MultipleTransactionHookPluginsTwo : public atscppapi::TransactionPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { std::cout << "MultipleTransactionHookPluginsTwo -- Send response headers!" << std::endl; @@ -86,8 +86,8 @@ class GlobalHookPlugin : public atscppapi::GlobalPlugin { public: GlobalHookPlugin() { GlobalPlugin::registerHook(HOOK_READ_REQUEST_HEADERS_PRE_REMAP); } - virtual void - handleReadRequestHeadersPreRemap(Transaction &transaction) + void + handleReadRequestHeadersPreRemap(Transaction &transaction) override { std::cout << "Hello from handleReadRequesHeadersPreRemap!" << std::endl; diff --git a/example/cppapi/null_transformation_plugin/NullTransformationPlugin.cc b/example/cppapi/null_transformation_plugin/NullTransformationPlugin.cc index 2dc7626f101..5f17fb6d918 100644 --- a/example/cppapi/null_transformation_plugin/NullTransformationPlugin.cc +++ b/example/cppapi/null_transformation_plugin/NullTransformationPlugin.cc @@ -43,32 +43,32 @@ class NullTransformationPlugin : public TransformationPlugin } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { transaction.getServerRequest().getHeaders()["X-Content-Transformed"] = "1"; transaction.resume(); } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { transaction.getClientResponse().getHeaders()["X-Content-Transformed"] = "1"; transaction.resume(); } void - consume(const string &data) + consume(const string &data) override { produce(data); } void - handleInputComplete() + handleInputComplete() override { setOutputComplete(); } - virtual ~NullTransformationPlugin() {} + ~NullTransformationPlugin() override {} private: }; @@ -81,15 +81,15 @@ class GlobalHookPlugin : public GlobalPlugin registerHook(HOOK_READ_RESPONSE_HEADERS); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { transaction.addPlugin(new NullTransformationPlugin(transaction, TransformationPlugin::REQUEST_TRANSFORMATION)); transaction.resume(); } - virtual void - handleReadResponseHeaders(Transaction &transaction) + void + handleReadResponseHeaders(Transaction &transaction) override { transaction.addPlugin(new NullTransformationPlugin(transaction, TransformationPlugin::RESPONSE_TRANSFORMATION)); transaction.resume(); diff --git a/example/cppapi/post_buffer/PostBuffer.cc b/example/cppapi/post_buffer/PostBuffer.cc index fa8c2ecdfb6..1cc4d82b119 100644 --- a/example/cppapi/post_buffer/PostBuffer.cc +++ b/example/cppapi/post_buffer/PostBuffer.cc @@ -43,19 +43,19 @@ class PostBufferTransformationPlugin : public TransformationPlugin } void - consume(const string &data) + consume(const string &data) override { buffer_.append(data); } void - handleInputComplete() + handleInputComplete() override { produce(buffer_); setOutputComplete(); } - virtual ~PostBufferTransformationPlugin() {} + ~PostBufferTransformationPlugin() override {} private: Transaction &transaction_; string buffer_; @@ -65,8 +65,8 @@ class GlobalHookPlugin : public GlobalPlugin { public: GlobalHookPlugin() { registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { cerr << "Read Request Headers Post Remap" << endl; cerr << "Path: " << transaction.getClientRequest().getUrl().getPath() << endl; diff --git a/example/cppapi/remap_plugin/RemapPlugin.cc b/example/cppapi/remap_plugin/RemapPlugin.cc index be9a56a1d6b..455a805f80c 100644 --- a/example/cppapi/remap_plugin/RemapPlugin.cc +++ b/example/cppapi/remap_plugin/RemapPlugin.cc @@ -38,7 +38,7 @@ class MyRemapPlugin : public RemapPlugin public: MyRemapPlugin(void **instance_handle) : RemapPlugin(instance_handle) {} Result - doRemap(const Url &map_from_url, const Url &map_to_url, Transaction &transaction, bool &redirect) + doRemap(const Url &map_from_url, const Url &map_to_url, Transaction &transaction, bool &redirect) override { Url &request_url = transaction.getClientRequest().getUrl(); TS_DEBUG(LOG_TAG, "from URL is [%s], to URL is [%s], request URL is [%s]", map_from_url.getUrlString().c_str(), diff --git a/example/cppapi/serverresponse/ServerResponse.cc b/example/cppapi/serverresponse/ServerResponse.cc index b75e0c6075a..5e2e7f863d6 100644 --- a/example/cppapi/serverresponse/ServerResponse.cc +++ b/example/cppapi/serverresponse/ServerResponse.cc @@ -44,7 +44,7 @@ class ServerResponsePlugin : public GlobalPlugin } void - handleSendRequestHeaders(Transaction &transaction) + handleSendRequestHeaders(Transaction &transaction) override { // Here we can decide to abort the request to the origin (we can do this earlier too) // and just send the user an error page. @@ -62,7 +62,7 @@ class ServerResponsePlugin : public GlobalPlugin } void - handleReadResponseHeaders(Transaction &transaction) + handleReadResponseHeaders(Transaction &transaction) override { cout << "Hello from handleReadResponseHeaders!" << endl; cout << "Server response headers are" << endl; @@ -73,7 +73,7 @@ class ServerResponsePlugin : public GlobalPlugin } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { cout << "Hello from handleSendResponseHeaders!" << endl; cout << "Client response headers are" << endl; diff --git a/example/cppapi/stat_example/StatExample.cc b/example/cppapi/stat_example/StatExample.cc index 97414dd20f3..cbfb5c4c76b 100644 --- a/example/cppapi/stat_example/StatExample.cc +++ b/example/cppapi/stat_example/StatExample.cc @@ -55,8 +55,8 @@ class GlobalHookPlugin : public GlobalPlugin registerHook(HOOK_READ_REQUEST_HEADERS_POST_REMAP); } - virtual void - handleReadRequestHeadersPostRemap(Transaction &transaction) + void + handleReadRequestHeadersPostRemap(Transaction &transaction) override { TS_DEBUG(TAG, "Received a request, incrementing the counter."); stat.increment(); diff --git a/example/cppapi/timeout_example/TimeoutExamplePlugin.cc b/example/cppapi/timeout_example/TimeoutExamplePlugin.cc index b05dc03908c..d2b3120970c 100644 --- a/example/cppapi/timeout_example/TimeoutExamplePlugin.cc +++ b/example/cppapi/timeout_example/TimeoutExamplePlugin.cc @@ -39,15 +39,15 @@ class TimeoutExamplePlugin : public GlobalPlugin registerHook(HOOK_SEND_RESPONSE_HEADERS); } - virtual void - handleSendResponseHeaders(Transaction &transaction) + void + handleSendResponseHeaders(Transaction &transaction) override { TS_DEBUG(TAG, "Sending response headers to the client, status=%d", transaction.getClientResponse().getStatusCode()); transaction.resume(); } - virtual void - handleReadRequestHeadersPreRemap(Transaction &transaction) + void + handleReadRequestHeadersPreRemap(Transaction &transaction) override { TS_DEBUG(TAG, "Setting all timeouts to 1ms, this will likely cause the transaction to receive a 504."); transaction.setTimeout(Transaction::TIMEOUT_CONNECT, 1); diff --git a/example/cppapi/transactionhook/TransactionHookPlugin.cc b/example/cppapi/transactionhook/TransactionHookPlugin.cc index 64e41bb7027..c89dec048b9 100644 --- a/example/cppapi/transactionhook/TransactionHookPlugin.cc +++ b/example/cppapi/transactionhook/TransactionHookPlugin.cc @@ -37,13 +37,13 @@ class TransactionHookPlugin : public atscppapi::TransactionPlugin TransactionPlugin::registerHook(HOOK_SEND_RESPONSE_HEADERS); std::cout << "Constructed!" << std::endl; } - virtual ~TransactionHookPlugin() + ~TransactionHookPlugin() override { delete[] char_ptr_; // cleanup std::cout << "Destroyed!" << std::endl; } void - handleSendResponseHeaders(Transaction &transaction) + handleSendResponseHeaders(Transaction &transaction) override { std::cout << "Send response headers!" << std::endl; transaction.resume(); @@ -57,8 +57,8 @@ class GlobalHookPlugin : public atscppapi::GlobalPlugin { public: GlobalHookPlugin() { GlobalPlugin::registerHook(HOOK_READ_REQUEST_HEADERS_PRE_REMAP); } - virtual void - handleReadRequestHeadersPreRemap(Transaction &transaction) + void + handleReadRequestHeadersPreRemap(Transaction &transaction) override { std::cout << "Hello from handleReadRequesHeadersPreRemap!" << std::endl; transaction.addPlugin(new TransactionHookPlugin(transaction)); diff --git a/example/cppapi/websocket/WSBuffer.cc b/example/cppapi/websocket/WSBuffer.cc index e84429c620b..2609f34ed62 100644 --- a/example/cppapi/websocket/WSBuffer.cc +++ b/example/cppapi/websocket/WSBuffer.cc @@ -24,6 +24,7 @@ #include "WSBuffer.h" #include +#include #include "openssl/evp.h" #include #include @@ -157,54 +158,28 @@ WSBuffer::read_buffered_message(std::string &message, int &code) std::string WSBuffer::ws_digest(std::string const &key) { -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX digest[1]; - EVP_MD_CTX_init(digest); -#else - EVP_MD_CTX *digest; - digest = EVP_MD_CTX_new(); -#endif + EVP_MD_CTX *digest = EVP_MD_CTX_new(); if (!EVP_DigestInit_ex(digest, EVP_sha1(), nullptr)) { -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX_cleanup(digest); -#else EVP_MD_CTX_free(digest); -#endif return "init-failed"; } if (!EVP_DigestUpdate(digest, key.data(), key.length())) { -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX_cleanup(digest); -#else EVP_MD_CTX_free(digest); -#endif return "update1-failed"; } if (!EVP_DigestUpdate(digest, magic.data(), magic.length())) { -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX_cleanup(digest); -#else EVP_MD_CTX_free(digest); -#endif return "update2-failed"; } unsigned char hash_buf[EVP_MAX_MD_SIZE]; unsigned int hash_len = 0; if (!EVP_DigestFinal_ex(digest, hash_buf, &hash_len)) { -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX_cleanup(digest); -#else EVP_MD_CTX_free(digest); -#endif return "final-failed"; } -#if OPENSSL_VERSION_NUMBER < 0x10100000L - EVP_MD_CTX_cleanup(digest); -#else EVP_MD_CTX_free(digest); -#endif if (hash_len != 20) { return "bad-hash-length"; } diff --git a/example/intercept/intercept.cc b/example/intercept/intercept.cc index 8d62c30cea4..6f076275472 100644 --- a/example/intercept/intercept.cc +++ b/example/intercept/intercept.cc @@ -22,10 +22,10 @@ */ #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include diff --git a/example/passthru/passthru.cc b/example/passthru/passthru.cc index 426fc163a79..acb3d928874 100644 --- a/example/passthru/passthru.cc +++ b/example/passthru/passthru.cc @@ -32,8 +32,8 @@ */ #include -#include -#include +#include +#include #define PLUGIN_NAME "passthru" diff --git a/example/protocol-stack/protocol-stack.cc b/example/protocol-stack/protocol-stack.cc index 411f6afc7de..7a688c347cf 100644 --- a/example/protocol-stack/protocol-stack.cc +++ b/example/protocol-stack/protocol-stack.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include "ts/ts.h" #include "ts/ink_defs.h" diff --git a/example/remap/remap.cc b/example/remap/remap.cc index cc94c7ff937..ceb4aef259a 100644 --- a/example/remap/remap.cc +++ b/example/remap/remap.cc @@ -29,11 +29,11 @@ # tsxs -i -o remap.so */ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include diff --git a/example/remap_header_add/remap_header_add.cc b/example/remap_header_add/remap_header_add.cc index 75ab12bf8a3..21feef07281 100644 --- a/example/remap_header_add/remap_header_add.cc +++ b/example/remap_header_add/remap_header_add.cc @@ -27,9 +27,9 @@ map /foo http://127.0.0.1/ @plugin=remap_header_add.so @pparam=foo:"x" @pparam=@test:"c" @pparam=a:"b" */ -#include -#include -#include +#include +#include +#include #include "ts/ts.h" #include "ts/remap.h" @@ -156,7 +156,7 @@ TSRemapDoRemap(void *ih, NOWARN_UNUSED TSHttpTxn txn, NOWARN_UNUSED TSRemapReque } for (int i = 0; i < rl->nvc; ++i) { - TSDebug(TAG, "Attaching header \"%s\" with value \"%s\".", rl->name[i], rl->val[i]); + TSDebug(TAG, R"(Attaching header "%s" with value "%s".)", rl->name[i], rl->val[i]); TSMLoc field_loc; if (TSMimeHdrFieldCreate(req_bufp, req_loc, &field_loc) == TS_SUCCESS) { diff --git a/example/replace-protoset/replace-protoset.cc b/example/replace-protoset/replace-protoset.cc new file mode 100644 index 00000000000..bfd4dbcaaf6 --- /dev/null +++ b/example/replace-protoset/replace-protoset.cc @@ -0,0 +1,126 @@ +/** @file + + A brief file description + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +/* + * replace-protoset.c: + * an example plugin... + * Clones protoset attached with all the accept objects + * Unregisters H2 from the clone + * Replaces the protoset attached with all the incoming VCs with a clone + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PLNAME "TLS Protocol Adjuster" +#define PLTAG "replace_protoset" + +typedef std::unordered_map protoTable; // stores protocolset keyed by NetAccept ID +protoTable ProtoSetTable; +typedef std::unordered_set Table; +// Map of domains to tweak. +Table _table; + +int +CB_SNI(TSCont contp, TSEvent, void *cb_data) +{ + TSVConn vc = (static_cast(cb_data)); + TSSslConnection ssl_conn = TSVConnSSLConnectionGet(vc); + auto *ssl = reinterpret_cast(ssl_conn); + char const *sni = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); + if (sni) { + if (_table.find(sni) != _table.end()) { + TSAcceptor na = TSAcceptorGet(vc); + int nid = TSAcceptorIDGet(na); + TSNextProtocolSet ps = ProtoSetTable[nid]; + TSRegisterProtocolSet(vc, ps); + } + } + + TSVConnReenable(vc); + return TS_SUCCESS; +} + +int +CB_NetAcceptReady(TSCont contp, TSEvent event, void *cb_data) +{ + switch (event) { + case TS_EVENT_LIFECYCLE_PORTS_READY: + for (int i = 0, totalNA = TSAcceptorCount(); i < totalNA; ++i) { + TSAcceptor netaccept = TSAcceptorGetbyID(i); + // get a clone of the protoset associated with the netaccept + TSNextProtocolSet nps = TSGetcloneProtoSet(netaccept); + TSUnregisterProtocol(nps, TS_ALPN_PROTOCOL_HTTP_2_0); + ProtoSetTable[i] = nps; + } + break; + default: + break; + } + return 0; +} + +void +TSPluginInit(int argc, char const *argv[]) +{ + int ret = -999, i; + TSPluginRegistrationInfo info; + info.plugin_name = PLNAME; + info.vendor_name = "Yahoo!"; + info.support_email = "persia@yahoo-inc.com"; + ret = TSPluginRegister(&info); + + if (ret != TS_SUCCESS) { + TSError("Plugin registration failed."); + return; + } else { + if (argc < 2) { + TSError("[%s] Usage %s servername1 servername2 .... ", PLTAG, PLTAG); + return; + } + TSDebug(PLTAG, "Plugin registration succeeded."); + } + + for (i = 1; i < argc; i++) { + TSDebug(PLTAG, "%s added to the No-H2 list", argv[i]); + _table.emplace(std::string(argv[i], strlen(argv[i]))); + } + // This should not modify any state so no lock is needed. + TSCont cb_sni = TSContCreate(&CB_SNI, NULL); + TSCont cb_netacc = TSContCreate(&CB_NetAcceptReady, NULL); + + TSHttpHookAdd(TS_SSL_SERVERNAME_HOOK, cb_sni); + TSLifecycleHookAdd(TS_LIFECYCLE_PORTS_READY_HOOK, cb_netacc); +} diff --git a/example/secure-link/secure-link.c b/example/secure-link/secure-link.c index 7837bf1b9c2..d2b691a5bb1 100644 --- a/example/secure-link/secure-link.c +++ b/example/secure-link/secure-link.c @@ -50,7 +50,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn rh, TSRemapRequestInfo *rri) const char *qh, *ph, *ip; unsigned char md[MD5_DIGEST_LENGTH]; secure_link_info *sli = (secure_link_info *)ih; - char *token = NULL, *expire = NULL, *path = NULL; + char *token = NULL, *tokenptr = NULL, *expire = NULL, *expireptr = NULL, *path = NULL; char *s, *ptr, *saveptr = NULL, *val, hash[32] = ""; in = (struct sockaddr_in *)TSHttpTxnClientAddrGet(rh); @@ -67,15 +67,17 @@ TSRemapDoRemap(void *ih, TSHttpTxn rh, TSRemapRequestInfo *rri) if ((val = strchr(ptr, '=')) != NULL) { *val++ = '\0'; if (strcmp(ptr, "st") == 0) { - token = TSstrdup(val); + tokenptr = val; } else if (strcmp(ptr, "ex") == 0) { - expire = TSstrdup(val); + expireptr = val; } } else { TSError("[secure_link] Invalid parameter [%s]", ptr); break; } } while ((ptr = strtok_r(NULL, "&", &saveptr)) != NULL); + token = (NULL == tokenptr ? NULL : TSstrdup(tokenptr)); + expire = (NULL == expireptr ? NULL : TSstrdup(expireptr)); } else { TSError("[secure_link] strtok didn't find a & in the query string"); /* this is just example, so set fake params to prevent plugin crash */ diff --git a/example/ssl-preaccept/ssl-preaccept.cc b/example/ssl-preaccept/ssl-preaccept.cc index a051843f90f..f725f01a079 100644 --- a/example/ssl-preaccept/ssl-preaccept.cc +++ b/example/ssl-preaccept/ssl-preaccept.cc @@ -25,9 +25,9 @@ limitations under the License. */ -#include +#include #include -#include +#include #include #include #include @@ -43,7 +43,7 @@ namespace { std::string ConfigPath; typedef std::pair IpRange; -typedef std::deque IpRangeQueue; +using IpRangeQueue = std::deque; IpRangeQueue ClientBlindTunnelIp; Configuration Config; // global configuration @@ -54,7 +54,7 @@ Parse_Addr_String(ts::ConstBuffer const &text, IpRange &range) IpAddr newAddr; std::string textstr(text._ptr, text._size); // Is there a hyphen? - size_t hyphen_pos = textstr.find("-"); + size_t hyphen_pos = textstr.find('-'); if (hyphen_pos != std::string::npos) { std::string addr1 = textstr.substr(0, hyphen_pos); std::string addr2 = textstr.substr(hyphen_pos + 1); diff --git a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc b/example/ssl-sni-whitelist/ssl-sni-whitelist.cc index 9f3d8f10efe..65e0a2b76bd 100644 --- a/example/ssl-sni-whitelist/ssl-sni-whitelist.cc +++ b/example/ssl-sni-whitelist/ssl-sni-whitelist.cc @@ -23,9 +23,9 @@ limitations under the License. */ -#include +#include #include -#include +#include #include #include "ts/ink_config.h" #include diff --git a/example/ssl-sni/ssl-sni.cc b/example/ssl-sni/ssl-sni.cc index f4015ee4f04..9c0cca45a07 100644 --- a/example/ssl-sni/ssl-sni.cc +++ b/example/ssl-sni/ssl-sni.cc @@ -25,9 +25,9 @@ limitations under the License. */ -#include +#include #include -#include +#include #include #include "ts/ink_config.h" #include diff --git a/example/statistic/statistic.cc b/example/statistic/statistic.cc index bf8dc1a2ce1..0414f86bc87 100644 --- a/example/statistic/statistic.cc +++ b/example/statistic/statistic.cc @@ -28,8 +28,8 @@ // doc/developer-guide/plugins/adding-statistics.en.rst #include -#include -#include +#include +#include #define PLUGIN_NAME "statistics" diff --git a/iocore/aio/AIO.cc b/iocore/aio/AIO.cc index 1ce548ad157..f9efaa07da3 100644 --- a/iocore/aio/AIO.cc +++ b/iocore/aio/AIO.cc @@ -131,7 +131,7 @@ AIOTestData::ink_aio_stats(int event, void *d) * Common */ AIOCallback * -new_AIOCallback(void) +new_AIOCallback() { return new AIOCallbackInternal; } @@ -399,7 +399,7 @@ aio_queue_req(AIOCallbackInternal *op, int fromAPI = 0) static inline int cache_op(AIOCallbackInternal *op) { - bool read = (op->aiocb.aio_lio_opcode == LIO_READ) ? 1 : 0; + bool read = (op->aiocb.aio_lio_opcode == LIO_READ) ? true : false; for (; op; op = (AIOCallbackInternal *)op->then) { ink_aiocb_t *a = &op->aiocb; ssize_t err, res = 0; @@ -513,7 +513,7 @@ aio_thread_main(void *arg) else op->thread->schedule_imm_signal(op); ink_mutex_acquire(&my_aio_req->aio_mutex); - } while (1); + } while (true); timespec timedwait_msec = ink_hrtime_to_timespec(Thread::get_hrtime_updated() + HRTIME_MSECONDS(net_config_poll_timeout)); ink_cond_timedwait(&my_aio_req->aio_cond, &my_aio_req->aio_mutex, &timedwait_msec); } diff --git a/iocore/aio/test_AIO.cc b/iocore/aio/test_AIO.cc index a3d427fa1f5..b6ec70de5e2 100644 --- a/iocore/aio/test_AIO.cc +++ b/iocore/aio/test_AIO.cc @@ -149,7 +149,7 @@ struct AIO_Device : public Continuation { }; void -dump_summary(void) +dump_summary() { /* dump timing info */ printf("Writing summary info\n"); diff --git a/iocore/cache/Cache.cc b/iocore/cache/Cache.cc index bdda7b70fbf..9e4263e4a54 100644 --- a/iocore/cache/Cache.cc +++ b/iocore/cache/Cache.cc @@ -57,10 +57,10 @@ static short int const CACHE_DB_MAJOR_VERSION_COMPATIBLE = 21; // Configuration int64_t cache_config_ram_cache_size = AUTO_SIZE_RAM_CACHE; -int cache_config_ram_cache_algorithm = 0; +int cache_config_ram_cache_algorithm = 1; int cache_config_ram_cache_compress = 0; int cache_config_ram_cache_compress_percent = 90; -int cache_config_ram_cache_use_seen_filter = 0; +int cache_config_ram_cache_use_seen_filter = 1; int cache_config_http_max_alts = 3; int cache_config_dir_sync_frequency = 60; int cache_config_permit_pinning = 0; @@ -130,9 +130,9 @@ struct VolInitInfo { ~VolInitInfo() { - for (int i = 0; i < 4; i++) { - vol_aio[i].action = nullptr; - vol_aio[i].mutex.clear(); + for (auto &i : vol_aio) { + i.action = nullptr; + i.mutex.clear(); } free(vol_h_f); } @@ -1236,7 +1236,7 @@ bool CacheProcessor::IsCacheReady(CacheFragType type) { if (IsCacheEnabled() != CACHE_INITIALIZED) - return 0; + return false; return (bool)(cache_ready & (1 << type)); } @@ -1539,12 +1539,12 @@ Vol::handle_recover_from_data(int event, void * /* data ATS_UNUSED */) return handle_recover_write_dir(EVENT_IMMEDIATE, nullptr); } // initialize - recover_wrapped = 0; + recover_wrapped = false; last_sync_serial = 0; last_write_serial = 0; recover_pos = header->last_write_pos; if (recover_pos >= skip + len) { - recover_wrapped = 1; + recover_wrapped = true; recover_pos = start; } io.aiocb.aio_buf = (char *)ats_memalign(ats_pagesize(), RECOVERY_SIZE); @@ -1647,7 +1647,7 @@ Vol::handle_recover_from_data(int event, void * /* data ATS_UNUSED */) // (doc->sync_serial > header->sync_serial + 1). // if we are too close to the end, wrap around else if (recover_pos - (e - s) > (skip + len) - AGG_SIZE) { - recover_wrapped = 1; + recover_wrapped = true; recover_pos = start; io.aiocb.aio_nbytes = RECOVERY_SIZE; @@ -1662,7 +1662,7 @@ Vol::handle_recover_from_data(int event, void * /* data ATS_UNUSED */) // from the end, then wrap around recover_pos -= e - s; if (recover_pos > (skip + len) - AGG_SIZE) { - recover_wrapped = 1; + recover_wrapped = true; recover_pos = start; io.aiocb.aio_nbytes = RECOVERY_SIZE; @@ -1686,7 +1686,7 @@ Vol::handle_recover_from_data(int event, void * /* data ATS_UNUSED */) s -= round_to_approx_size(doc->len); recover_pos -= e - s; if (recover_pos >= skip + len) { - recover_wrapped = 1; + recover_wrapped = true; recover_pos = start; } io.aiocb.aio_nbytes = RECOVERY_SIZE; @@ -1799,9 +1799,9 @@ Vol::handle_header_read(int event, void *data) switch (event) { case AIO_EVENT_DONE: op = (AIOCallback *)data; - for (int i = 0; i < 4; i++) { + for (auto &i : hf) { ink_assert(op != nullptr); - hf[i] = (VolHeaderFooter *)(op->aiocb.aio_buf); + i = (VolHeaderFooter *)(op->aiocb.aio_buf); if ((size_t)op->aio_result != (size_t)op->aiocb.aio_nbytes) { clear_dir(); return EVENT_DONE; @@ -1856,9 +1856,9 @@ Vol::dir_init_done(int /* event ATS_UNUSED */, void * /* data ATS_UNUSED */) gvol[vol_no] = this; SET_HANDLER(&Vol::aggWrite); if (fd == -1) - cache->vol_initialized(0); + cache->vol_initialized(false); else - cache->vol_initialized(1); + cache->vol_initialized(true); return EVENT_DONE; } } @@ -2567,10 +2567,10 @@ CacheVC::removeEvent(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) if (vol->open_write(this, true, 1)) { // writer exists ink_release_assert(od = vol->open_read(&key)); - od->dont_update_directory = 1; + od->dont_update_directory = true; od = nullptr; } else { - od->dont_update_directory = 1; + od->dont_update_directory = true; } f.remove_aborted_writers = 1; } diff --git a/iocore/cache/CacheDir.cc b/iocore/cache/CacheDir.cc index dfe1d47844d..7e058fd6c63 100644 --- a/iocore/cache/CacheDir.cc +++ b/iocore/cache/CacheDir.cc @@ -92,10 +92,10 @@ OpenDir::open_write(CacheVC *cont, int allow_if_writers, int max_writers) od->num_writers = 1; od->max_writers = max_writers; od->vector.data.data = &od->vector.data.fast_data[0]; - od->dont_update_directory = 0; - od->move_resident_alt = 0; - od->reading_vec = 0; - od->writing_vec = 0; + od->dont_update_directory = false; + od->move_resident_alt = false; + od->reading_vec = false; + od->writing_vec = false; dir_clear(&od->first_dir); cont->od = od; cont->write_vector = &od->vector; @@ -796,14 +796,14 @@ void dir_lookaside_cleanup(Vol *d) { ink_assert(d->mutex->thread_holding == this_ethread()); - for (int i = 0; i < LOOKASIDE_SIZE; i++) { - EvacuationBlock *b = d->lookaside[i].head; + for (auto &i : d->lookaside) { + EvacuationBlock *b = i.head; while (b) { if (!dir_valid(d, &b->new_dir)) { EvacuationBlock *nb = b->link.next; DDebug("dir_lookaside", "cleanup %X %X cleaned up", b->evac_frags.earliest_key.slice32(0), b->evac_frags.earliest_key.slice32(1)); - d->lookaside[i].remove(b); + i.remove(b); free_CacheVC(b->earliest_evacuator); free_EvacuationBlock(b, d->mutex->thread_holding); b = nb; @@ -890,7 +890,7 @@ dir_entries_used(Vol *d) */ void -sync_cache_dir_on_shutdown(void) +sync_cache_dir_on_shutdown() { Debug("cache_dir_sync", "sync started"); char *buf = nullptr; @@ -1058,7 +1058,7 @@ CacheSync::mainEvent(int event, Event *e) } if (vol->is_io_in_progress() || vol->agg_buf_pos) { Debug("cache_dir_sync", "Dir %s: waiting for agg buffer", vol->hash_text.get()); - vol->dir_sync_waiting = 1; + vol->dir_sync_waiting = true; if (!vol->is_io_in_progress()) vol->aggWrite(EVENT_IMMEDIATE, nullptr); return EVENT_CONT; @@ -1087,7 +1087,7 @@ CacheSync::mainEvent(int event, Event *e) vol->footer->sync_serial = vol->header->sync_serial; CHECK_DIR(d); memcpy(buf, vol->raw_dir, dirlen); - vol->dir_sync_in_progress = 1; + vol->dir_sync_in_progress = true; } size_t B = vol->header->sync_serial & 1; off_t start = vol->skip + (B ? dirlen : 0); @@ -1109,7 +1109,7 @@ CacheSync::mainEvent(int event, Event *e) aio_write(vol->fd, buf + writepos, headerlen, start + writepos); writepos += headerlen; } else { - vol->dir_sync_in_progress = 0; + vol->dir_sync_in_progress = false; CACHE_INCREMENT_DYN_STAT(cache_directory_sync_count_stat); CACHE_SUM_DYN_STAT(cache_directory_sync_time_stat, Thread::get_hrtime() - start_time); start_time = 0; diff --git a/iocore/cache/CacheHosting.cc b/iocore/cache/CacheHosting.cc index 2cac146acf9..b18b287e1de 100644 --- a/iocore/cache/CacheHosting.cc +++ b/iocore/cache/CacheHosting.cc @@ -28,7 +28,7 @@ extern int gndisks; -matcher_tags CacheHosting_tags = {"hostname", "domain", nullptr, nullptr, nullptr, nullptr, 0}; +matcher_tags CacheHosting_tags = {"hostname", "domain", nullptr, nullptr, nullptr, nullptr, false}; /************************************************************* * Begin class HostMatcher @@ -503,7 +503,7 @@ CacheHostRecord::Init(matcher_line *line_info, CacheType typ) cp = (CacheVol **)ats_malloc(num_cachevols * sizeof(CacheVol *)); memset(cp, 0, num_cachevols * sizeof(CacheVol *)); num_cachevols = 0; - while (1) { + while (true) { char c = *s; if ((c == ',') || (c == '\0')) { *s = '\0'; @@ -639,7 +639,7 @@ ConfigVolumes::BuildListFromString(char *config_file_path, char *file_buf) int size = 0; int in_percent = 0; - while (1) { + while (true) { // skip all blank spaces at beginning of line while (*tmp && isspace(*tmp)) { tmp++; @@ -740,9 +740,9 @@ ConfigVolumes::BuildListFromString(char *config_file_path, char *file_buf) configp->number = volume_number; if (in_percent) { configp->percent = size; - configp->in_percent = 1; + configp->in_percent = true; } else { - configp->in_percent = 0; + configp->in_percent = false; } configp->scheme = scheme; configp->size = size; @@ -828,7 +828,7 @@ create_config(RegressionTest *t, int num) cp->number = vol_num++; cp->scheme = CACHE_HTTP_TYPE; cp->size = 128; - cp->in_percent = 0; + cp->in_percent = false; cp->cachep = nullptr; config_volumes.cp_queue.enqueue(cp); config_volumes.num_volumes++; @@ -867,7 +867,7 @@ create_config(RegressionTest *t, int num) cp->scheme = CACHE_HTTP_TYPE; cp->size = 10; cp->percent = 10; - cp->in_percent = 1; + cp->in_percent = true; cp->cachep = nullptr; config_volumes.cp_queue.enqueue(cp); config_volumes.num_volumes++; @@ -925,7 +925,7 @@ create_config(RegressionTest *t, int num) cp->scheme = scheme; cp->size = random_size >> 20; cp->percent = 0; - cp->in_percent = 0; + cp->in_percent = false; cp->cachep = nullptr; config_volumes.cp_queue.enqueue(cp); config_volumes.num_volumes++; diff --git a/iocore/cache/CacheHttp.cc b/iocore/cache/CacheHttp.cc index b142125d4b6..da29ee3b33d 100644 --- a/iocore/cache/CacheHttp.cc +++ b/iocore/cache/CacheHttp.cc @@ -22,7 +22,7 @@ */ #include "ts/ink_config.h" -#include +#include #include "P_Cache.h" /*------------------------------------------------------------------------- diff --git a/iocore/cache/CachePages.cc b/iocore/cache/CachePages.cc index 74aef253984..71870a52b7e 100644 --- a/iocore/cache/CachePages.cc +++ b/iocore/cache/CachePages.cc @@ -153,7 +153,7 @@ struct ShowCache : public ShowCont { SET_HANDLER(&ShowCache::showMain); } - ~ShowCache() + ~ShowCache() override { if (show_cache_urlstrs) delete[] show_cache_urlstrs; diff --git a/iocore/cache/CachePagesInternal.cc b/iocore/cache/CachePagesInternal.cc index 34a17718200..7cf8de1debe 100644 --- a/iocore/cache/CachePagesInternal.cc +++ b/iocore/cache/CachePagesInternal.cc @@ -49,7 +49,7 @@ struct ShowCacheInternal : public ShowCont { SET_HANDLER(&ShowCacheInternal::showMain); } - ~ShowCacheInternal() {} + ~ShowCacheInternal() override {} }; extern ShowCacheInternal *theshowcacheInternal; Action *register_ShowCacheInternal(Continuation *c, HTTPHdr *h); diff --git a/iocore/cache/CacheRead.cc b/iocore/cache/CacheRead.cc index ec3cf3d40ab..a9986bd361d 100644 --- a/iocore/cache/CacheRead.cc +++ b/iocore/cache/CacheRead.cc @@ -871,7 +871,7 @@ CacheVC::openReadStartEarliest(int /* event ATS_UNUSED */, Event * /* e ATS_UNUS // the evacuator changes the od->first_dir to the new directory // that it inserted od->first_dir = first_dir; - od->writing_vec = 1; + od->writing_vec = true; earliest_key = zero_key; // set up this VC as a alternate delete write_vc @@ -884,7 +884,7 @@ CacheVC::openReadStartEarliest(int /* event ATS_UNUSED */, Event * /* e ATS_UNUS // when another alternate does not exist. // ///////////////////////////////////////////////////////////////// if (doc1->total_len > 0) { - od->move_resident_alt = 1; + od->move_resident_alt = true; od->single_doc_key = doc1->key; dir_assign(&od->single_doc_dir, &dir); dir_set_tag(&od->single_doc_dir, od->single_doc_key.slice32(2)); @@ -923,7 +923,7 @@ CacheVC::openReadVecWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */ cancel_trigger(); set_io_not_in_progress(); ink_assert(od); - od->writing_vec = 0; + od->writing_vec = false; if (_action.cancelled) return openWriteCloseDir(EVENT_IMMEDIATE, nullptr); { diff --git a/iocore/cache/CacheTest.cc b/iocore/cache/CacheTest.cc index 67bf496f620..3aca888872c 100644 --- a/iocore/cache/CacheTest.cc +++ b/iocore/cache/CacheTest.cc @@ -26,6 +26,7 @@ #include "P_CacheTest.h" #include "api/ts/ts.h" #include +#include using namespace std; diff --git a/iocore/cache/CacheVol.cc b/iocore/cache/CacheVol.cc index b53f412b6ad..ce641c8b4e9 100644 --- a/iocore/cache/CacheVol.cc +++ b/iocore/cache/CacheVol.cc @@ -223,7 +223,7 @@ CacheVC::scanObject(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) goto Lskip; last_collision = nullptr; - while (1) { + while (true) { if (!dir_probe(&doc->first_key, vol, &dir, &last_collision)) goto Lskip; if (!dir_agg_valid(vol, &dir) || !dir_head(&dir) || @@ -251,7 +251,7 @@ CacheVC::scanObject(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) if (this->load_http_info(&vector, doc) != doc->hlen) goto Lskip; changed = false; - hostinfo_copied = 0; + hostinfo_copied = false; for (i = 0; i < vector.count(); i++) { if (!vector.get(i)->valid()) goto Lskip; @@ -259,7 +259,7 @@ CacheVC::scanObject(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) memccpy(hname, vector.get(i)->request_get()->host_get(&hlen), 0, 500); hname[hlen] = 0; Debug("cache_scan", "hostname = '%s', hostlen = %d", hname, hlen); - hostinfo_copied = 1; + hostinfo_copied = true; } vector.get(i)->object_key_get(&key); alternate_index = i; @@ -424,7 +424,7 @@ CacheVC::scanOpenWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) for (int i = 0; i < alt_count; i++) { write_vector->insert(vector.get(i)); } - od->writing_vec = 1; + od->writing_vec = true; vector.clear(false); // check that the directory entry was not overwritten // if so return failure @@ -441,10 +441,10 @@ CacheVC::scanOpenWrite(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) dir_assign(&od->single_doc_dir, &dir); dir_set_tag(&od->single_doc_dir, doc->key.slice32(2)); od->single_doc_key = doc->key; - od->move_resident_alt = 1; + od->move_resident_alt = true; } - while (1) { + while (true) { if (!dir_probe(&first_key, vol, &d, &l)) { vol->close_write(this); _action.continuation->handleEvent(CACHE_EVENT_SCAN_OPERATION_FAILED, nullptr); diff --git a/iocore/cache/CacheWrite.cc b/iocore/cache/CacheWrite.cc index 5a4cbeef698..a9c5a26487a 100644 --- a/iocore/cache/CacheWrite.cc +++ b/iocore/cache/CacheWrite.cc @@ -89,11 +89,11 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) } } if (update_key == od->single_doc_key && (total_len || !vec)) - od->move_resident_alt = 0; + od->move_resident_alt = false; } if (cache_config_http_max_alts > 1 && write_vector->count() >= cache_config_http_max_alts && alternate_index < 0) { if (od->move_resident_alt && get_alternate_index(write_vector, od->single_doc_key) == 0) - od->move_resident_alt = 0; + od->move_resident_alt = false; write_vector->remove(0, true); } if (vec) { @@ -122,7 +122,7 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) // for multiple fragment document, we must have done // CacheVC:openWriteCloseDataDone ink_assert(!fragment || f.data_done); - od->move_resident_alt = 0; + od->move_resident_alt = false; f.rewrite_resident_alt = 1; write_len = doc->data_len(); Debug("cache_update_alt", "rewriting resident alt size: %d key: %X, first_key: %X", write_len, doc->key.slice32(0), @@ -130,7 +130,7 @@ CacheVC::updateVector(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) } } header_len = write_vector->marshal_length(); - od->writing_vec = 1; + od->writing_vec = true; f.use_first_key = 1; SET_HANDLER(&CacheVC::openWriteCloseHeadDone); ret = do_write_call(); @@ -352,7 +352,7 @@ Vol::aggWriteDone(int event, Event *e) } } if (dir_sync_waiting) { - dir_sync_waiting = 0; + dir_sync_waiting = false; cacheDirSync->handleEvent(EVENT_IMMEDIATE, nullptr); } if (agg.head || sync.head) @@ -1128,7 +1128,7 @@ CacheVC::openWriteCloseHeadDone(int event, Event *e) CACHE_TRY_LOCK(lock, vol->mutex, mutex->thread_holding); if (!lock.is_locked()) VC_LOCK_RETRY_EVENT(); - od->writing_vec = 0; + od->writing_vec = false; if (!io.ok()) goto Lclose; ink_assert(f.use_first_key); @@ -1142,13 +1142,13 @@ CacheVC::openWriteCloseHeadDone(int event, Event *e) if (od->move_resident_alt) { if (dir_valid(vol, &od->single_doc_dir)) dir_insert(&od->single_doc_key, vol, &od->single_doc_dir); - od->move_resident_alt = 0; + od->move_resident_alt = false; } } od->first_dir = dir; if (frag_type == CACHE_FRAG_TYPE_HTTP && f.single_fragment) { // fragment is tied to the vector - od->move_resident_alt = 1; + od->move_resident_alt = true; if (!f.rewrite_resident_alt) { od->single_doc_key = earliest_key; } @@ -1489,7 +1489,7 @@ CacheVC::openWriteStartDone(int event, Event *e) first_dir = dir; if (doc->single_fragment()) { // fragment is tied to the vector - od->move_resident_alt = 1; + od->move_resident_alt = true; od->single_doc_key = doc->key; dir_assign(&od->single_doc_dir, &dir); dir_set_tag(&od->single_doc_dir, od->single_doc_key.slice32(2)); @@ -1511,7 +1511,7 @@ CacheVC::openWriteStartDone(int event, Event *e) } // check for collision if (dir_probe(&first_key, vol, &dir, &last_collision)) { - od->reading_vec = 1; + od->reading_vec = true; int ret = do_read_call(&first_key); if (ret == EVENT_RETURN) goto Lcallreturn; @@ -1523,7 +1523,7 @@ CacheVC::openWriteStartDone(int event, Event *e) } } Lsuccess: - od->reading_vec = 0; + od->reading_vec = false; if (_action.cancelled) goto Lcancel; SET_HANDLER(&CacheVC::openWriteMain); @@ -1534,7 +1534,7 @@ CacheVC::openWriteStartDone(int event, Event *e) _action.continuation->handleEvent(CACHE_EVENT_OPEN_WRITE_FAILED, (void *)-err); Lcancel: if (od) { - od->reading_vec = 0; + od->reading_vec = false; return openWriteCloseDir(event, e); } else return free_CacheVC(this); @@ -1730,7 +1730,7 @@ Cache::open_write(Continuation *cont, const CacheKey *key, CacheHTTPInfo *info, // document doesn't exist, begin write goto Lmiss; } else { - c->od->reading_vec = 1; + c->od->reading_vec = true; // document exists, read vector SET_CONTINUATION_HANDLER(c, &CacheVC::openWriteStartDone); switch (c->do_read_call(&c->first_key)) { diff --git a/iocore/cache/RamCacheCLFUS.cc b/iocore/cache/RamCacheCLFUS.cc index 9a8b2605b3a..eb6d6d5aa8e 100644 --- a/iocore/cache/RamCacheCLFUS.cc +++ b/iocore/cache/RamCacheCLFUS.cc @@ -76,12 +76,12 @@ struct RamCacheCLFUS : public RamCache { int64_t objects; // returns 1 on found/stored, 0 on not found/stored, if provided auxkey1 and auxkey2 must match - int get(INK_MD5 *key, Ptr *ret_data, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0); - int put(INK_MD5 *key, IOBufferData *data, uint32_t len, bool copy = false, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0); - int fixup(const INK_MD5 *key, uint32_t old_auxkey1, uint32_t old_auxkey2, uint32_t new_auxkey1, uint32_t new_auxkey2); - int64_t size() const; + int get(INK_MD5 *key, Ptr *ret_data, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0) override; + int put(INK_MD5 *key, IOBufferData *data, uint32_t len, bool copy = false, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0) override; + int fixup(const INK_MD5 *key, uint32_t old_auxkey1, uint32_t old_auxkey2, uint32_t new_auxkey1, uint32_t new_auxkey2) override; + int64_t size() const override; - void init(int64_t max_bytes, Vol *vol); + void init(int64_t max_bytes, Vol *vol) override; // private Vol *vol; // for stats @@ -624,7 +624,7 @@ RamCacheCLFUS::put(INK_MD5 *key, IOBufferData *data, uint32_t len, bool copy, ui return 0; } } - while (1) { + while (true) { victim = lru[0].dequeue(); if (!victim) { if (bytes + size <= max_bytes) diff --git a/iocore/cache/RamCacheLRU.cc b/iocore/cache/RamCacheLRU.cc index 74c83c112c2..31c3d8e98c2 100644 --- a/iocore/cache/RamCacheLRU.cc +++ b/iocore/cache/RamCacheLRU.cc @@ -40,12 +40,12 @@ struct RamCacheLRU : public RamCache { int64_t objects; // returns 1 on found/stored, 0 on not found/stored, if provided auxkey1 and auxkey2 must match - int get(INK_MD5 *key, Ptr *ret_data, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0); - int put(INK_MD5 *key, IOBufferData *data, uint32_t len, bool copy = false, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0); - int fixup(const INK_MD5 *key, uint32_t old_auxkey1, uint32_t old_auxkey2, uint32_t new_auxkey1, uint32_t new_auxkey2); - int64_t size() const; + int get(INK_MD5 *key, Ptr *ret_data, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0) override; + int put(INK_MD5 *key, IOBufferData *data, uint32_t len, bool copy = false, uint32_t auxkey1 = 0, uint32_t auxkey2 = 0) override; + int fixup(const INK_MD5 *key, uint32_t old_auxkey1, uint32_t old_auxkey2, uint32_t new_auxkey1, uint32_t new_auxkey2) override; + int64_t size() const override; - void init(int64_t max_bytes, Vol *vol); + void init(int64_t max_bytes, Vol *vol) override; // private uint16_t *seen; diff --git a/iocore/cluster/ClusterAPI.cc b/iocore/cluster/ClusterAPI.cc index 8926debf68a..f8132b979e8 100644 --- a/iocore/cluster/ClusterAPI.cc +++ b/iocore/cluster/ClusterAPI.cc @@ -72,7 +72,7 @@ typedef struct RPCHandle { #define RPC_HANDLE_MAGIC 0x12345678 class MachineStatusSM; -typedef int (MachineStatusSM::*MachineStatusSMHandler)(int, void *); +using MachineStatusSMHandler = int (MachineStatusSM::*)(int, void *); class MachineStatusSM : public Continuation { public: @@ -94,7 +94,7 @@ class MachineStatusSM : public Continuation { SET_HANDLER((MachineStatusSMHandler)&MachineStatusSM::MachineStatusSMEvent); } - ~MachineStatusSM() {} + ~MachineStatusSM() override {} int MachineStatusSMEvent(Event *e, void *d); private: @@ -189,7 +189,7 @@ MachineStatusSM::MachineStatusSMEvent(Event * /* e ATS_UNUSED */, void * /* d AT } class ClusterAPIPeriodicSM; -typedef int (ClusterAPIPeriodicSM::*ClusterAPIPeriodicSMHandler)(int, void *); +using ClusterAPIPeriodicSMHandler = int (ClusterAPIPeriodicSM::*)(int, void *); class ClusterAPIPeriodicSM : public Continuation { public: @@ -197,7 +197,7 @@ class ClusterAPIPeriodicSM : public Continuation { SET_HANDLER((ClusterAPIPeriodicSMHandler)&ClusterAPIPeriodicSM::ClusterAPIPeriodicSMEvent); } - ~ClusterAPIPeriodicSM() {} + ~ClusterAPIPeriodicSM() override {} int ClusterAPIPeriodicSMEvent(int, void *); MachineStatusSM *GetNextSM(); @@ -214,7 +214,7 @@ ClusterAPIPeriodicSM::GetNextSM() MachineStatusSM *msmp; MachineStatusSM *msmp_next; - while (1) { + while (true) { msmp = status_callout_q.pop(); if (!msmp) { msmp = (MachineStatusSM *)ink_atomiclist_popall(&status_callout_atomic_q); @@ -242,7 +242,7 @@ ClusterAPIPeriodicSM::ClusterAPIPeriodicSMEvent(int e, void *d) // Maintain node status event order by serializing the processing. int ret; - while (1) { + while (true) { if (_active_msmp) { ret = _active_msmp->handleEvent(e, d); if (ret != EVENT_DONE) { diff --git a/iocore/cluster/ClusterCache.cc b/iocore/cluster/ClusterCache.cc index 68abf241da0..a4c5828401b 100644 --- a/iocore/cluster/ClusterCache.cc +++ b/iocore/cluster/ClusterCache.cc @@ -176,7 +176,7 @@ class ClusterVConnectionCache INK_MD5 key; ClusterVConnection *vc; - Entry() : mark_for_delete(0), vc(nullptr) {} + Entry() : mark_for_delete(false), vc(nullptr) {} ~Entry() {} }; @@ -1775,7 +1775,7 @@ CacheContinuation::tunnelClosedEvent(int /* event ATS_UNUSED */, void *c) // Retry DisposeOfDataBuffer continuation //////////////////////////////////////////////////////////// struct retryDisposeOfDataBuffer; -typedef int (retryDisposeOfDataBuffer::*rtryDisOfDBufHandler)(int, void *); +using rtryDisOfDBufHandler = int (retryDisposeOfDataBuffer::*)(int, void *); struct retryDisposeOfDataBuffer : public Continuation { CacheContinuation *c; @@ -2216,9 +2216,9 @@ CacheContinuation::remoteOpEvent(int event_code, Event *e) have_all_data = pToken->is_clear(); // no conn implies all data if (have_all_data) { - read_cluster_vc->have_all_data = 1; + read_cluster_vc->have_all_data = true; } else { - read_cluster_vc->have_all_data = 0; + read_cluster_vc->have_all_data = false; } // Move CacheHTTPInfo reply data into VC read_cluster_vc->marshal_buf = this->getMsgBufferIOBData(); diff --git a/iocore/cluster/ClusterConfig.cc b/iocore/cluster/ClusterConfig.cc index 6894c7ecbac..9d5854fe431 100644 --- a/iocore/cluster/ClusterConfig.cc +++ b/iocore/cluster/ClusterConfig.cc @@ -233,7 +233,7 @@ ClusterConfiguration::ClusterConfiguration() : n_machines(0), changed(0) // ConfigurationContinuation member functions (Internal Class) /*************************************************************************/ struct ConfigurationContinuation; -typedef int (ConfigurationContinuation::*CfgContHandler)(int, void *); +using CfgContHandler = int (ConfigurationContinuation::*)(int, void *); struct ConfigurationContinuation : public Continuation { ClusterConfiguration *c; ClusterConfiguration *prev; @@ -380,7 +380,7 @@ cluster_machine_at_depth(unsigned int hash, int *pprobe_depth, ClusterMachine ** } #endif // CLUSTER_TEST - while (1) { + while (true) { // If we are out of our depth, fail // if (probe_depth > CONFIGURATION_HISTORY_PROBE_DEPTH) diff --git a/iocore/cluster/ClusterHandler.cc b/iocore/cluster/ClusterHandler.cc index bf2f1144113..09f1db94aae 100644 --- a/iocore/cluster/ClusterHandler.cc +++ b/iocore/cluster/ClusterHandler.cc @@ -162,7 +162,7 @@ ClusterHandler::ClusterHandler() last_cluster_op_enable(0), last_trace_dump(0), clm(nullptr), - disable_remote_cluster_ops(0), + disable_remote_cluster_ops(false), pw_write_descriptors_built(0), pw_freespace_descriptors_built(0), pw_controldata_descriptors_built(0), @@ -2436,7 +2436,7 @@ ClusterHandler::mainClusterEvent(int event, Event *e) while (io_activity) { io_activity = 0; - only_write_control_msgs = 0; + only_write_control_msgs = false; if (downing) { machine_down(); diff --git a/iocore/cluster/ClusterMachine.cc b/iocore/cluster/ClusterMachine.cc index f809b25b78e..ad4f004cd9a 100644 --- a/iocore/cluster/ClusterMachine.cc +++ b/iocore/cluster/ClusterMachine.cc @@ -168,7 +168,7 @@ ClusterMachine::~ClusterMachine() } struct MachineTimeoutContinuation; -typedef int (MachineTimeoutContinuation::*McTimeoutContHandler)(int, void *); +using McTimeoutContHandler = int (MachineTimeoutContinuation::*)(int, void *); struct MachineTimeoutContinuation : public Continuation { ClusterMachine *m; int diff --git a/iocore/cluster/ClusterProcessor.cc b/iocore/cluster/ClusterProcessor.cc index 2daf5043e18..64e4282df9b 100644 --- a/iocore/cluster/ClusterProcessor.cc +++ b/iocore/cluster/ClusterProcessor.cc @@ -645,7 +645,7 @@ ClusterProcessor::init() // function added to adhere to the name calling convention of init functions int -init_clusterprocessor(void) +init_clusterprocessor() { return clusterProcessor.init(); } diff --git a/iocore/cluster/ClusterVConnection.cc b/iocore/cluster/ClusterVConnection.cc index a1a606e6563..f15a5840d44 100644 --- a/iocore/cluster/ClusterVConnection.cc +++ b/iocore/cluster/ClusterVConnection.cc @@ -203,8 +203,8 @@ ClusterVConnection::ClusterVConnection(int is_new_connect_read) n_set_data_msgs(0), n_recv_set_data_msgs(0), pending_remote_fill(0), - remote_ram_cache_hit(0), - have_all_data(0), + remote_ram_cache_hit(false), + have_all_data(false), initial_data_bytes(0), current_cont(nullptr), iov_map(CLUSTER_IOV_NOT_OPEN), diff --git a/iocore/dns/DNS.cc b/iocore/dns/DNS.cc index 39dc8ff2093..3d854ec5031 100644 --- a/iocore/dns/DNS.cc +++ b/iocore/dns/DNS.cc @@ -606,7 +606,7 @@ void DNSHandler::switch_named(int ndx) { for (DNSEntry *e = entries.head; e; e = (DNSEntry *)e->link.next) { - e->written_flag = 0; + e->written_flag = false; if (e->retries < dns_retries) ++(e->retries); // give them another chance } @@ -681,7 +681,7 @@ DNSHandler::rr_failure(int ndx) // actual retries will be done in retry_named called from mainEvent // mark any outstanding requests as not sent for later retry for (DNSEntry *e = entries.head; e; e = (DNSEntry *)e->link.next) { - e->written_flag = 0; + e->written_flag = false; if (e->retries < dns_retries) ++(e->retries); // give them another chance --in_flight; @@ -691,7 +691,7 @@ DNSHandler::rr_failure(int ndx) // move outstanding requests that were sent to this nameserver to another for (DNSEntry *e = entries.head; e; e = (DNSEntry *)e->link.next) { if (e->which_ns == ndx) { - e->written_flag = 0; + e->written_flag = false; if (e->retries < dns_retries) ++(e->retries); // give them another chance --in_flight; @@ -727,7 +727,7 @@ DNSHandler::recv_dns(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) ip_text_buffer ipbuff1, ipbuff2; while ((dnsc = (DNSConnection *)triggered.dequeue())) { - while (1) { + while (true) { IpEndpoint from_ip; socklen_t from_length = sizeof(from_ip); @@ -841,10 +841,10 @@ get_dns(DNSHandler *h, uint16_t id) { for (DNSEntry *e = h->entries.head; e; e = (DNSEntry *)e->link.next) { if (e->once_written_flag) { - for (int j = 0; j < MAX_DNS_RETRIES; j++) { - if (e->id[j] == id) { + for (int j : e->id) { + if (j == id) { return e; - } else if (e->id[j] < 0) { + } else if (j < 0) { goto Lnext; } } @@ -1213,17 +1213,17 @@ dns_result(DNSHandler *h, DNSEntry *e, HostEnt *ent, bool retry) Debug("dns", "failed lock for result %s", e->qname); goto Lretry; } - for (int i = 0; i < MAX_DNS_RETRIES; i++) { - if (e->id[i] < 0) + for (int i : e->id) { + if (i < 0) break; - h->release_query_id(e->id[i]); + h->release_query_id(i); } e->postEvent(0, nullptr); } else { - for (int i = 0; i < MAX_DNS_RETRIES; i++) { - if (e->id[i] < 0) + for (int i : e->id) { + if (i < 0) break; - h->release_query_id(e->id[i]); + h->release_query_id(i); } e->mutex = e->action.mutex; SET_CONTINUATION_HANDLER(e, &DNSEntry::postEvent); @@ -1654,7 +1654,7 @@ ink_dns_init(ModuleVersion v) #ifdef TS_HAS_TESTS struct DNSRegressionContinuation; -typedef int (DNSRegressionContinuation::*DNSRegContHandler)(int, void *); +using DNSRegContHandler = int (DNSRegressionContinuation::*)(int, void *); struct DNSRegressionContinuation : public Continuation { int hosts; diff --git a/iocore/eventsystem/I_EThread.h b/iocore/eventsystem/I_EThread.h index 777c759b3b2..a502c5c9f10 100644 --- a/iocore/eventsystem/I_EThread.h +++ b/iocore/eventsystem/I_EThread.h @@ -53,7 +53,7 @@ enum ThreadType { DEDICATED, }; -extern bool shutdown_event_system; +extern volatile bool shutdown_event_system; /** Event System specific type of thread. diff --git a/iocore/eventsystem/P_IOBuffer.h b/iocore/eventsystem/P_IOBuffer.h index 64fb6fb41b8..dc299376a3f 100644 --- a/iocore/eventsystem/P_IOBuffer.h +++ b/iocore/eventsystem/P_IOBuffer.h @@ -682,7 +682,8 @@ IOBufferReader::consume(int64_t n) TS_INLINE char &IOBufferReader::operator[](int64_t i) { - IOBufferBlock *b = block.get(); + static char default_ret = '\0'; // This is just to avoid compiler warnings... + IOBufferBlock *b = block.get(); i += start_offset; while (b) { @@ -694,6 +695,8 @@ TS_INLINE char &IOBufferReader::operator[](int64_t i) } ink_release_assert(!"out of range"); + // Never used, just to satisfy compilers not undersatnding the fatality of ink_release_assert(). + return default_ret; } TS_INLINE void diff --git a/iocore/eventsystem/UnixEThread.cc b/iocore/eventsystem/UnixEThread.cc index 8206e3f9fe4..6fdc4804b3b 100644 --- a/iocore/eventsystem/UnixEThread.cc +++ b/iocore/eventsystem/UnixEThread.cc @@ -39,7 +39,7 @@ struct AIOCallback; #define THREAD_MAX_HEARTBEAT_MSECONDS 60 #define NO_ETHREAD_ID -1 -bool shutdown_event_system = false; +volatile bool shutdown_event_system = false; EThread::EThread() : generator((uint64_t)Thread::get_hrtime_updated() ^ (uint64_t)(uintptr_t)this), diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc index f31fa376d03..b70768c2608 100644 --- a/iocore/hostdb/HostDB.cc +++ b/iocore/hostdb/HostDB.cc @@ -29,6 +29,7 @@ #include "Show.h" #include "ts/Tokenizer.h" +#include #include #include @@ -269,9 +270,9 @@ struct HostDBSync : public HostDBBackgroundTask { std::string storage_path; std::string full_path; HostDBSync(int frequency, std::string storage_path, std::string full_path) - : HostDBBackgroundTask(frequency), storage_path(storage_path), full_path(full_path){}; + : HostDBBackgroundTask(frequency), storage_path(std::move(storage_path)), full_path(std::move(full_path)){}; int - sync_event(int, void *) + sync_event(int, void *) override { SET_HANDLER(&HostDBSync::wait_event); start_time = Thread::get_hrtime(); @@ -2099,7 +2100,7 @@ HostDBContinuation::master_machine(ClusterConfiguration *cc) } struct ShowHostDB; -typedef int (ShowHostDB::*ShowHostDBEventHandler)(int event, Event *data); +using ShowHostDBEventHandler = int (ShowHostDB::*)(int, Event *); struct ShowHostDB : public ShowCont { char *name; uint16_t port; @@ -2319,7 +2320,8 @@ struct ShowHostDB : public ShowCont { return complete(event, e); } - ShowHostDB(Continuation *c, HTTPHdr *h) : ShowCont(c, h), name(nullptr), port(0), force(0), output_json(false), records_seen(0) + ShowHostDB(Continuation *c, HTTPHdr *h) + : ShowCont(c, h), name(nullptr), port(0), force(false), output_json(false), records_seen(0) { ats_ip_invalidate(&ip); SET_HANDLER(&ShowHostDB::showMain); @@ -2384,7 +2386,7 @@ register_ShowHostDB(Continuation *c, HTTPHdr *h) #define HOSTDB_TEST_LENGTH 100000 struct HostDBTestReverse; -typedef int (HostDBTestReverse::*HostDBTestReverseHandler)(int, void *); +using HostDBTestReverseHandler = int (HostDBTestReverse::*)(int, void *); struct HostDBTestReverse : public Continuation { RegressionTest *test; int type; @@ -2488,18 +2490,18 @@ ink_hostdb_init(ModuleVersion v) /// Pair of IP address and host name from a host file. struct HostFilePair { - typedef HostFilePair self; + using self = HostFilePair; IpAddr ip; const char *name; }; struct HostDBFileContinuation : public Continuation { - typedef HostDBFileContinuation self; + using self = HostDBFileContinuation; int idx; ///< Working index. const char *name; ///< Host name (just for debugging) INK_MD5 md5; ///< Key for entry. - typedef std::vector Keys; + using Keys = std::vector; Keys *keys; ///< Entries from file. ats_scoped_str path; ///< Used to keep the host file name around. @@ -2605,7 +2607,8 @@ ParseHostFile(const char *path, unsigned int hostdb_hostfile_check_interval) } // Swap the pointer - hostDB.hosts_file_ptr = parsed_hosts_file_ptr; + if (parsed_hosts_file_ptr != nullptr) + hostDB.hosts_file_ptr = parsed_hosts_file_ptr; // Mark this one as completed, so we can allow another update to happen HostDBFileUpdateActive = 0; } diff --git a/iocore/hostdb/test_RefCountCache.cc b/iocore/hostdb/test_RefCountCache.cc index f1d85c8c853..a709690999a 100644 --- a/iocore/hostdb/test_RefCountCache.cc +++ b/iocore/hostdb/test_RefCountCache.cc @@ -59,7 +59,7 @@ class ExampleStruct : public RefCountObj // Really free the memory, we can use asan leak detection to verify it was freed void - free() + free() override { this->idx = -1; items_freed.insert(this); diff --git a/iocore/net/BIO_fastopen.cc b/iocore/net/BIO_fastopen.cc index d73f696706c..c471b5b7db2 100644 --- a/iocore/net/BIO_fastopen.cc +++ b/iocore/net/BIO_fastopen.cc @@ -27,16 +27,69 @@ #include "BIO_fastopen.h" -static int -fastopen_create(BIO *bio) +// For BoringSSL, which for some reason doesn't have this function. +// (In BoringSSL, sock_read() and sock_write() use the internal +// bio_fd_non_fatal_error() instead.) #1437 +// +// The following is copied from +// https://github.com/openssl/openssl/blob/3befffa39dbaf2688d823fcf2bdfc07d2487be48/crypto/bio/bss_sock.c +// Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. +#ifndef HAVE_BIO_SOCK_NON_FATAL_ERROR +int +BIO_sock_non_fatal_error(int err) { - bio->init = 0; - bio->num = NO_FD; - bio->flags = 0; - bio->ptr = nullptr; + switch (err) { +#if defined(OPENSSL_SYS_WINDOWS) +#if defined(WSAEWOULDBLOCK) + case WSAEWOULDBLOCK: +#endif +#endif + +#ifdef EWOULDBLOCK +#ifdef WSAEWOULDBLOCK +#if WSAEWOULDBLOCK != EWOULDBLOCK + case EWOULDBLOCK: +#endif +#else + case EWOULDBLOCK: +#endif +#endif + +#if defined(ENOTCONN) + case ENOTCONN: +#endif + +#ifdef EINTR + case EINTR: +#endif + +#ifdef EAGAIN +#if EWOULDBLOCK != EAGAIN + case EAGAIN: +#endif +#endif + +#ifdef EPROTO + case EPROTO: +#endif + +#ifdef EINPROGRESS + case EINPROGRESS: +#endif - return 1; +#ifdef EALREADY + case EALREADY: +#endif + return (1); + /* break; */ + default: + break; + } + return (0); } +#endif + +static int (*fastopen_create)(BIO *) = BIO_meth_get_create(const_cast(BIO_s_socket())); static int fastopen_destroy(BIO *bio) @@ -44,11 +97,10 @@ fastopen_destroy(BIO *bio) if (bio) { // We expect this BIO to not own the socket, so we must always // be in NOCLOSE mode. - ink_assert(bio->shutdown == BIO_NOCLOSE); - fastopen_create(bio); + ink_assert(BIO_get_shutdown(bio) == BIO_NOCLOSE); } - return 1; + return BIO_meth_get_destroy(const_cast(BIO_s_socket()))(bio); } static int @@ -58,26 +110,27 @@ fastopen_bwrite(BIO *bio, const char *in, int insz) errno = 0; BIO_clear_retry_flags(bio); - ink_assert(bio->num != NO_FD); + int fd = BIO_get_fd(bio, nullptr); + ink_assert(fd != NO_FD); - if (bio->ptr) { + if (BIO_get_data(bio)) { // On the first write only, make a TFO request if TFO is enabled. // The best documentation on the behavior of the Linux API is in // RFC 7413. If we get EINPROGRESS it means that the SYN has been // sent without data and we should retry. - const sockaddr *dst = reinterpret_cast(bio->ptr); + const sockaddr *dst = reinterpret_cast(BIO_get_data(bio)); ProxyMutex *mutex = this_ethread()->mutex.get(); NET_INCREMENT_DYN_STAT(net_fastopen_attempts_stat); - err = socketManager.sendto(bio->num, (void *)in, insz, MSG_FASTOPEN, dst, ats_ip_size(dst)); + err = socketManager.sendto(fd, (void *)in, insz, MSG_FASTOPEN, dst, ats_ip_size(dst)); if (err >= 0) { NET_INCREMENT_DYN_STAT(net_fastopen_successes_stat); } - bio->ptr = nullptr; + BIO_set_data(bio, nullptr); } else { - err = socketManager.write(bio->num, (void *)in, insz); + err = socketManager.write(fd, (void *)in, insz); } if (err < 0) { @@ -97,11 +150,12 @@ fastopen_bread(BIO *bio, char *out, int outsz) errno = 0; BIO_clear_retry_flags(bio); - ink_assert(bio->num != NO_FD); + int fd = BIO_get_fd(bio, nullptr); + ink_assert(fd != NO_FD); // TODO: If we haven't done the fastopen, ink_abort(). - err = socketManager.read(bio->num, out, outsz); + err = socketManager.read(fd, out, outsz); if (err < 0) { errno = -err; if (BIO_sock_non_fatal_error(errno)) { @@ -116,39 +170,18 @@ static long fastopen_ctrl(BIO *bio, int cmd, long larg, void *ptr) { switch (cmd) { - case BIO_C_SET_FD: - ink_assert(larg == BIO_CLOSE || larg == BIO_NOCLOSE); - ink_assert(bio->num == NO_FD); - - bio->init = 1; - bio->shutdown = larg; - bio->num = *reinterpret_cast(ptr); - return 0; - case BIO_C_SET_CONNECT: // We only support BIO_set_conn_address(), which sets a sockaddr. ink_assert(larg == 2); - bio->ptr = ptr; - return 0; - - // We are unbuffered so unconditionally succeed on BIO_flush(). - case BIO_CTRL_FLUSH: - return 1; - - case BIO_CTRL_PUSH: - case BIO_CTRL_POP: - return 0; - - default: -#if DEBUG - ink_abort("unsupported BIO control cmd=%d larg=%ld ptr=%p", cmd, larg, ptr); -#endif - + BIO_set_data(bio, ptr); return 0; } + + return BIO_meth_get_ctrl(const_cast(BIO_s_socket()))(bio, cmd, larg, ptr); } -static const BIO_METHOD fastopen_methods = { +#ifndef HAVE_BIO_METH_NEW +static const BIO_METHOD fastopen_methods[] = {{ .type = BIO_TYPE_SOCKET, .name = "fastopen", .bwrite = fastopen_bwrite, @@ -159,10 +192,21 @@ static const BIO_METHOD fastopen_methods = { .create = fastopen_create, .destroy = fastopen_destroy, .callback_ctrl = nullptr, -}; +}}; +#else +static const BIO_METHOD *fastopen_methods = [] { + BIO_METHOD *fastopen_methods = BIO_meth_new(BIO_TYPE_SOCKET, "fastopen"); + BIO_meth_set_write(fastopen_methods, fastopen_bwrite); + BIO_meth_set_read(fastopen_methods, fastopen_bread); + BIO_meth_set_ctrl(fastopen_methods, fastopen_ctrl); + BIO_meth_set_create(fastopen_methods, fastopen_create); + BIO_meth_set_destroy(fastopen_methods, fastopen_destroy); + return fastopen_methods; +}(); +#endif const BIO_METHOD * BIO_s_fastopen() { - return &fastopen_methods; + return fastopen_methods; } diff --git a/iocore/net/Connection.cc b/iocore/net/Connection.cc index f7f72070b6b..e52af10b60e 100644 --- a/iocore/net/Connection.cc +++ b/iocore/net/Connection.cc @@ -40,7 +40,7 @@ #define ROUNDUP(x, y) ((((x) + ((y)-1)) / (y)) * (y)) int -get_listen_backlog(void) +get_listen_backlog() { int listen_backlog; diff --git a/iocore/net/I_NetVConnection.h b/iocore/net/I_NetVConnection.h index c7725571c0b..a815cf36e74 100644 --- a/iocore/net/I_NetVConnection.h +++ b/iocore/net/I_NetVConnection.h @@ -33,6 +33,7 @@ #include "I_IOBuffer.h" #include "I_Socks.h" #include +#include #define CONNECT_SUCCESS 1 #define CONNECT_FAILURE 0 @@ -233,9 +234,9 @@ struct NetVCOptions { return *this; } - const char *get_family_string() const; + ts::StringView get_family_string() const; - const char *get_proto_string() const; + ts::StringView get_proto_string() const; /// @name Debugging //@{ @@ -260,7 +261,7 @@ class NetVConnection : public VConnection public: // How many bytes have been queued to the OS for sending by haven't been sent yet // Not all platforms support this, and if they don't we'll return -1 for them - virtual const int64_t + virtual int64_t outstanding() { return -1; @@ -591,7 +592,7 @@ class NetVConnection : public VConnection virtual int set_tcp_init_cwnd(int init_cwnd) = 0; /** Set the TCP congestion control algorithm */ - virtual int set_tcp_congestion_control(const char *name, int len) = 0; + virtual int set_tcp_congestion_control(int side) = 0; /** Set local sock addr struct. */ virtual void set_local_addr() = 0; @@ -626,13 +627,13 @@ class NetVConnection : public VConnection } virtual int - populate_protocol(const char **results, int n) const + populate_protocol(ts::StringView *results, int n) const { return 0; } virtual const char * - protocol_contains(const char *tag) const + protocol_contains(ts::StringView prefix) const { return nullptr; } diff --git a/iocore/net/I_SessionAccept.h b/iocore/net/I_SessionAccept.h index 4c677c35e39..3527f3dddb1 100644 --- a/iocore/net/I_SessionAccept.h +++ b/iocore/net/I_SessionAccept.h @@ -28,7 +28,7 @@ #include "I_VConnection.h" struct AclRecord; - +struct HttpProxyPort; /** The base class SessionAccept can not be used directly. The inherited class of SessionAccept (ex. HttpSessionAccept) is designed to: @@ -71,7 +71,7 @@ class SessionAccept : public Continuation */ virtual bool accept(NetVConnection *, MIOBuffer *, IOBufferReader *) = 0; - + HttpProxyPort *proxyPort; /* Returns nullptr if the specified client_ip is not allowed by ip_allow * Returns a pointer to the relevant IP policy for later processing otherwise */ static const AclRecord *testIpAllowPolicy(sockaddr const *client_ip); diff --git a/iocore/net/Net.cc b/iocore/net/Net.cc index f032c6f98f9..6e6c35e1d4d 100644 --- a/iocore/net/Net.cc +++ b/iocore/net/Net.cc @@ -41,7 +41,7 @@ int net_retry_delay = 10; int net_throttle_delay = 50; /* milliseconds */ static inline void -configure_net(void) +configure_net() { REC_RegisterConfigUpdateFunc("proxy.config.net.connections_throttle", change_net_connections_throttle, nullptr); REC_ReadConfigInteger(fds_throttle, "proxy.config.net.connections_throttle"); diff --git a/iocore/net/NetVConnection.cc b/iocore/net/NetVConnection.cc index b73ea290ba7..68a950746ce 100644 --- a/iocore/net/NetVConnection.cc +++ b/iocore/net/NetVConnection.cc @@ -45,33 +45,30 @@ NetVConnection::cancel_OOB() return; } -const char * +ts::StringView NetVCOptions::get_proto_string() const { switch (ip_proto) { case USE_TCP: - return TS_PROTO_TAG_TCP; + return IP_PROTO_TAG_TCP; case USE_UDP: - return TS_PROTO_TAG_UDP; + return IP_PROTO_TAG_UDP; default: - return nullptr; + break; } + return nullptr; } -const char * +ts::StringView NetVCOptions::get_family_string() const { - const char *retval; switch (ip_family) { case AF_INET: - retval = TS_PROTO_TAG_IPV4; - break; + return IP_PROTO_TAG_IPV4; case AF_INET6: - retval = TS_PROTO_TAG_IPV6; - break; + return IP_PROTO_TAG_IPV6; default: - retval = nullptr; break; } - return retval; + return nullptr; } diff --git a/iocore/net/OCSPStapling.cc b/iocore/net/OCSPStapling.cc index 6d24f7e72fa..5d061b2bee3 100644 --- a/iocore/net/OCSPStapling.cc +++ b/iocore/net/OCSPStapling.cc @@ -37,6 +37,7 @@ struct certinfo { unsigned char idx[20]; // Index in session cache SHA1 hash of certificate OCSP_CERTID *cid; // Certificate ID for OCSP requests or nullptr if ID cannot be determined char *uri; // Responder details + char *certname; ink_mutex stapling_mutex; unsigned char resp_der[MAX_STAPLING_DER]; unsigned int resp_derlen; @@ -53,6 +54,8 @@ certinfo_free(void * /*parent*/, void *ptr, CRYPTO_EX_DATA * /*ad*/, int /*idx*/ return; if (cinf->uri) OPENSSL_free(cinf->uri); + if (cinf->certname) + ats_free(cinf->certname); ink_mutex_destroy(&cinf->stapling_mutex); OPENSSL_free(cinf); } @@ -60,7 +63,7 @@ certinfo_free(void * /*parent*/, void *ptr, CRYPTO_EX_DATA * /*ad*/, int /*idx*/ static int ssl_stapling_index = -1; void -ssl_stapling_ex_init(void) +ssl_stapling_ex_init() { if (ssl_stapling_index != -1) return; @@ -110,25 +113,26 @@ ssl_stapling_init_cert(SSL_CTX *ctx, X509 *cert, const char *certname) STACK_OF(OPENSSL_STRING) *aia = nullptr; if (!cert) { - Debug("ssl_ocsp", "Null cert passed in"); + Error("null cert passed in for %s", certname); return false; } cinf = (certinfo *)SSL_CTX_get_ex_data(ctx, ssl_stapling_index); if (cinf) { - Debug("ssl_ocsp", "certificate already initialized!"); + Note("certificate already initialized for %s", certname); return false; } cinf = (certinfo *)OPENSSL_malloc(sizeof(certinfo)); if (!cinf) { - Debug("ssl_ocsp", "error allocating memory!"); + Error("error allocating memory for %s", certname); return false; } // Initialize certinfo cinf->cid = nullptr; cinf->uri = nullptr; + cinf->certname = ats_strdup(certname); cinf->resp_derlen = 0; ink_mutex_init(&cinf->stapling_mutex, "stapling_mutex"); cinf->is_expire = true; @@ -138,7 +142,7 @@ ssl_stapling_init_cert(SSL_CTX *ctx, X509 *cert, const char *certname) issuer = stapling_get_issuer(ctx, cert); if (issuer == nullptr) { - Debug("ssl_ocsp", "cannot get issuer certificate from %s!", certname); + Note("cannot get issuer certificate from %s", certname); return false; } @@ -151,12 +155,12 @@ ssl_stapling_init_cert(SSL_CTX *ctx, X509 *cert, const char *certname) if (aia) cinf->uri = sk_OPENSSL_STRING_pop(aia); if (!cinf->uri) { - Debug("ssl_ocsp", "no responder URI in %s", certname); + Note("no responder URI for %s", certname); } if (aia) X509_email_free(aia); - Debug("ssl_ocsp", "success to init certinfo into SSL_CTX: %p", ctx); + Note("successfully initialized certinfo for %s into SSL_CTX: %p", certname, ctx); return true; } @@ -183,12 +187,12 @@ stapling_cache_response(OCSP_RESPONSE *rsp, certinfo *cinf) resp_derlen = i2d_OCSP_RESPONSE(rsp, &p); if (resp_derlen == 0) { - Error("stapling_cache_response: can not encode OCSP stapling response"); + Error("stapling_cache_response: cannot decode OCSP response for %s", cinf->certname); return false; } if (resp_derlen > MAX_STAPLING_DER) { - Error("stapling_cache_response: OCSP stapling response too big (%u bytes)", resp_derlen); + Error("stapling_cache_response: OCSP response too big (%u bytes) for %s", resp_derlen, cinf->certname); return false; } @@ -220,12 +224,12 @@ stapling_check_response(certinfo *cinf, OCSP_RESPONSE *rsp) bs = OCSP_response_get1_basic(rsp); if (bs == nullptr) { // If we can't parse response just pass it back to client - Error("stapling_check_response: can not parsing response"); + Error("stapling_check_response: cannot parse response for %s", cinf->certname); return SSL_TLSEXT_ERR_OK; } if (!OCSP_resp_find_status(bs, cinf->cid, &status, &reason, &rev, &thisupd, &nextupd)) { // If ID not present just pass it back to client - Error("stapling_check_response: certificate ID not present in response"); + Error("stapling_check_response: certificate ID not present in response for %s", cinf->certname); } else { OCSP_check_validity(thisupd, nextupd, 300, -1); } @@ -290,7 +294,7 @@ process_responder(OCSP_REQUEST *req, char *host, char *path, char *port, int req BIO_set_nbio(cbio, 1); if (BIO_do_connect(cbio) <= 0 && !BIO_should_retry(cbio)) { - Debug("ssl_ocsp", "process_responder: fail to connect to OCSP respond server"); + Error("process_responder: failed to connect to OCSP response server. host=%s port=%s path=%s", host, port, path); goto end; } resp = query_responder(cbio, host, path, req, req_timeout); @@ -338,13 +342,14 @@ stapling_refresh_response(certinfo *cinf, OCSP_RESPONSE **prsp) Debug("ssl_ocsp", "stapling_refresh_response: query response received"); stapling_check_response(cinf, *prsp); } else { + // TODO: We should log the actual openssl error Error("stapling_refresh_response: responder error"); } if (!stapling_cache_response(*prsp, cinf)) { Error("stapling_refresh_response: can not cache response"); } else { - Debug("ssl_ocsp", "stapling_refresh_response: success to refresh response"); + Debug("ssl_ocsp", "stapling_refresh_response: successful refresh OCSP response"); } done: @@ -356,7 +361,7 @@ stapling_refresh_response(certinfo *cinf, OCSP_RESPONSE **prsp) err: rv = false; - Debug("ssl_ocsp", "stapling_refresh_response: fail to refresh response"); + Error("stapling_refresh_response: failed to refresh OCSP response"); goto done; } @@ -382,10 +387,10 @@ ocsp_update() if (cinf->resp_derlen == 0 || cinf->is_expire || cinf->expire_time < current_time) { ink_mutex_release(&cinf->stapling_mutex); if (stapling_refresh_response(cinf, &resp)) { - Debug("ssl_ocsp", "Successful OCSP refresh response for 1 certificate"); + Debug("Successfully refreshed OCSP for %s certificate. url=%s", cinf->certname, cinf->uri); SSL_INCREMENT_DYN_STAT(ssl_ocsp_refreshed_cert_stat); } else { - Debug("ssl_ocsp", "Failed to refresh OCSP for 1 certificate"); + Error("Failed to refresh OCSP for %s certificate. url=%s", cinf->certname, cinf->uri); SSL_INCREMENT_DYN_STAT(ssl_ocsp_refresh_cert_failure_stat); } } else { @@ -408,7 +413,7 @@ ssl_callback_ocsp_stapling(SSL *ssl) // originally was, cinf = stapling_get_cert_info(ssl->ctx); cinf = stapling_get_cert_info(SSL_get_SSL_CTX(ssl)); if (cinf == nullptr) { - Debug("ssl_ocsp", "ssl_callback_ocsp_stapling: fail to get certificate information"); + Error("ssl_callback_ocsp_stapling: failed to get certificate information"); return SSL_TLSEXT_ERR_NOACK; } @@ -416,7 +421,7 @@ ssl_callback_ocsp_stapling(SSL *ssl) current_time = time(nullptr); if (cinf->resp_derlen == 0 || cinf->is_expire || cinf->expire_time < current_time) { ink_mutex_release(&cinf->stapling_mutex); - Debug("ssl_ocsp", "ssl_callback_ocsp_stapling: fail to get certificate status"); + Error("ssl_callback_ocsp_stapling: failed to get certificate status for %s", cinf->certname); return SSL_TLSEXT_ERR_NOACK; } else { unsigned char *p = (unsigned char *)OPENSSL_malloc(cinf->resp_derlen); @@ -424,7 +429,7 @@ ssl_callback_ocsp_stapling(SSL *ssl) memcpy(p, cinf->resp_der, cinf->resp_derlen); ink_mutex_release(&cinf->stapling_mutex); SSL_set_tlsext_status_ocsp_resp(ssl, p, len); - Debug("ssl_ocsp", "ssl_callback_ocsp_stapling: success to get certificate status"); + Debug("ssl_ocsp", "ssl_callback_ocsp_stapling: successfully got certificate status for %s", cinf->certname); return SSL_TLSEXT_ERR_OK; } } diff --git a/iocore/net/P_NetAccept.h b/iocore/net/P_NetAccept.h index 3c14b874f1a..73e070a39e2 100644 --- a/iocore/net/P_NetAccept.h +++ b/iocore/net/P_NetAccept.h @@ -44,6 +44,7 @@ struct NetAccept; class Event; +class SSLNextProtocolAccept; // // Default accept function // Accepts as many connections as possible, returning the number accepted @@ -80,13 +81,16 @@ struct NetAcceptAction : public Action, public RefCountObj { // Handles accepting connections. // struct NetAccept : public Continuation { - ink_hrtime period; + ink_hrtime period = 0; Server server; - AcceptFunctionPtr accept_fn; - int ifd; + AcceptFunctionPtr accept_fn = nullptr; + int ifd = NO_FD; + int id = -1; Ptr action_; + SSLNextProtocolAccept *snpa = nullptr; EventIO ep; + HttpProxyPort *proxyPort = nullptr; NetProcessor::AcceptOptions opt; virtual NetProcessor *getNetProcessor() const; diff --git a/iocore/net/P_SSLConfig.h b/iocore/net/P_SSLConfig.h index 2977464f1e6..7728a412090 100644 --- a/iocore/net/P_SSLConfig.h +++ b/iocore/net/P_SSLConfig.h @@ -70,7 +70,6 @@ struct SSLConfigParams : public ConfigInfo { char *cipherSuite; char *client_cipherSuite; char *ticket_key_filename; - ssl_ticket_key_block *default_global_keyblock; int configExitOnLoadError; int clientCertLevel; int verify_depth; @@ -142,7 +141,6 @@ struct SSLConfig { static void reconfigure(); static SSLConfigParams *acquire(); static void release(SSLConfigParams *params); - typedef ConfigProcessor::scoped_config scoped_config; private: @@ -161,6 +159,37 @@ struct SSLCertificateConfig { static int configid; }; +struct SSLTicketParams : public ConfigInfo { + ssl_ticket_key_block *default_global_keyblock; + char *ticket_key_filename; + void LoadTicket(); + void cleanup(); + + ~SSLTicketParams() { cleanup(); } +}; + +struct SSLTicketKeyConfig { + static void startup(); + static bool reconfigure(); + + static SSLTicketParams * + acquire() + { + return static_cast(configProcessor.get(configid)); + } + + static void + release(SSLTicketParams *params) + { + configProcessor.release(configid, params); + } + + typedef ConfigProcessor::scoped_config scoped_config; + +private: + static int configid; +}; + extern SSLSessionCache *session_cache; #endif diff --git a/iocore/net/P_SSLNetVConnection.h b/iocore/net/P_SSLNetVConnection.h index ae09bd8b514..1878400573e 100644 --- a/iocore/net/P_SSLNetVConnection.h +++ b/iocore/net/P_SSLNetVConnection.h @@ -37,6 +37,7 @@ #include "P_UnixNetVConnection.h" #include "P_UnixNet.h" #include "ts/apidefs.h" +#include #include #include @@ -66,6 +67,7 @@ #define SSL_DEF_TLS_RECORD_MSEC_THRESHOLD 1000 class SSLNextProtocolSet; +class SSLNextProtocolAccept; struct SSLCertLookup; typedef enum { @@ -87,8 +89,8 @@ class SSLNetVConnection : public UnixNetVConnection typedef UnixNetVConnection super; ///< Parent type. public: - virtual int sslStartHandShake(int event, int &err); - virtual void free(EThread *t); + int sslStartHandShake(int event, int &err) override; + void free(EThread *t) override; virtual void enableRead() @@ -97,8 +99,8 @@ class SSLNetVConnection : public UnixNetVConnection write.enabled = 1; } - virtual bool - getSSLHandShakeComplete() const + bool + getSSLHandShakeComplete() const override { return sslHandShakeComplete; } @@ -123,10 +125,10 @@ class SSLNetVConnection : public UnixNetVConnection int sslServerHandShakeEvent(int &err); int sslClientHandShakeEvent(int &err); - virtual void net_read_io(NetHandler *nh, EThread *lthread); - virtual int64_t load_buffer_and_write(int64_t towrite, MIOBufferAccessor &buf, int64_t &total_written, int &needs); + void net_read_io(NetHandler *nh, EThread *lthread) override; + int64_t load_buffer_and_write(int64_t towrite, MIOBufferAccessor &buf, int64_t &total_written, int &needs) override; void registerNextProtocolSet(const SSLNextProtocolSet *); - virtual void do_io_close(int lerrno = -1); + void do_io_close(int lerrno = -1) override; //////////////////////////////////////////////////////////// // Instances of NetVConnection should be allocated // @@ -240,15 +242,15 @@ class SSLNetVConnection : public UnixNetVConnection return ssl ? SSL_get_cipher_name(ssl) : nullptr; } - int populate_protocol(const char **results, int n) const; - const char *protocol_contains(const char *tag) const; + int populate_protocol(ts::StringView *results, int n) const override; + const char *protocol_contains(ts::StringView tag) const override; /** * Populate the current object based on the socket information in in the * con parameter and the ssl object in the arg parameter * This is logic is invoked when the NetVC object is created in a new thread context */ - virtual int populate(Connection &con, Continuation *c, void *arg); + int populate(Connection &con, Continuation *c, void *arg) override; SSL *ssl; ink_hrtime sslHandshakeBeginTime; @@ -262,7 +264,8 @@ class SSLNetVConnection : public UnixNetVConnection SSLNetVConnection(const SSLNetVConnection &); SSLNetVConnection &operator=(const SSLNetVConnection &); - const char *map_tls_protocol_to_tag(const char *proto_string) const; + ts::StringView map_tls_protocol_to_tag(const char *proto_string) const; + bool update_rbio(bool move_to_socket); bool sslHandShakeComplete; bool sslClientRenegotiationAbort; diff --git a/iocore/net/P_SSLNextProtocolAccept.h b/iocore/net/P_SSLNextProtocolAccept.h index 94b2553bb47..61364e3cabb 100644 --- a/iocore/net/P_SSLNextProtocolAccept.h +++ b/iocore/net/P_SSLNextProtocolAccept.h @@ -49,6 +49,8 @@ class SSLNextProtocolAccept : public SessionAccept bool unregisterEndpoint(const char *protocol, Continuation *handler); SLINK(SSLNextProtocolAccept, link); + SSLNextProtocolSet *getProtoSet(); + SSLNextProtocolSet *cloneProtoSet(); private: int mainEvent(int event, void *netvc); diff --git a/iocore/net/P_SSLNextProtocolSet.h b/iocore/net/P_SSLNextProtocolSet.h index 9a043701e30..87979612d2b 100644 --- a/iocore/net/P_SSLNextProtocolSet.h +++ b/iocore/net/P_SSLNextProtocolSet.h @@ -37,7 +37,9 @@ class SSLNextProtocolSet bool registerEndpoint(const char *, Continuation *); bool unregisterEndpoint(const char *, Continuation *); + bool unregisterEndpoint(const char *proto); bool advertiseProtocols(const unsigned char **out, unsigned *len) const; + SSLNextProtocolSet *clone() const; Continuation *findEndpoint(const unsigned char *, unsigned) const; diff --git a/iocore/net/P_UnixNetState.h b/iocore/net/P_UnixNetState.h index ca807afb20d..5bdb6d00cd2 100644 --- a/iocore/net/P_UnixNetState.h +++ b/iocore/net/P_UnixNetState.h @@ -48,14 +48,13 @@ class UnixNetVConnection; struct NetState { volatile int enabled; - volatile int error; VIO vio; Link ready_link; SLink enable_link; int in_enabled_list; int triggered; - NetState() : enabled(0), error(0), vio(VIO::NONE), in_enabled_list(0), triggered(0) {} + NetState() : enabled(0), vio(VIO::NONE), in_enabled_list(0), triggered(0) {} }; #endif diff --git a/iocore/net/P_UnixNetVConnection.h b/iocore/net/P_UnixNetVConnection.h index 0fa1411826f..8a25d6405ac 100644 --- a/iocore/net/P_UnixNetVConnection.h +++ b/iocore/net/P_UnixNetVConnection.h @@ -36,6 +36,7 @@ #include "I_NetVConnection.h" #include "P_UnixNetState.h" #include "P_Connection.h" +#include "P_NetAccept.h" class UnixNetVConnection; class NetHandler; @@ -98,17 +99,19 @@ struct OOB_callback : public Continuation { } }; +enum tcp_congestion_control_t { CLIENT_SIDE, SERVER_SIDE }; + class UnixNetVConnection : public NetVConnection { public: - virtual const int64_t outstanding(); - virtual VIO *do_io_read(Continuation *c, int64_t nbytes, MIOBuffer *buf); - virtual VIO *do_io_write(Continuation *c, int64_t nbytes, IOBufferReader *buf, bool owner = false); + int64_t outstanding() override; + VIO *do_io_read(Continuation *c, int64_t nbytes, MIOBuffer *buf) override; + VIO *do_io_write(Continuation *c, int64_t nbytes, IOBufferReader *buf, bool owner = false) override; - virtual bool get_data(int id, void *data); + bool get_data(int id, void *data) override; - virtual Action *send_OOB(Continuation *cont, char *buf, int len); - virtual void cancel_OOB(); + Action *send_OOB(Continuation *cont, char *buf, int len) override; + void cancel_OOB() override; virtual void setSSLHandshakeWantsRead(bool /* flag */) @@ -132,8 +135,8 @@ class UnixNetVConnection : public NetVConnection return false; } - virtual void do_io_close(int lerrno = -1); - virtual void do_io_shutdown(ShutdownHowTo_t howto); + void do_io_close(int lerrno = -1) override; + void do_io_shutdown(ShutdownHowTo_t howto) override; //////////////////////////////////////////////////////////// // Set the timeouts associated with this connection. // @@ -147,21 +150,21 @@ class UnixNetVConnection : public NetVConnection // called when handing an event from this NetVConnection,// // or the NetVConnection creation callback. // //////////////////////////////////////////////////////////// - virtual void set_active_timeout(ink_hrtime timeout_in); - virtual void set_inactivity_timeout(ink_hrtime timeout_in); - virtual void cancel_active_timeout(); - virtual void cancel_inactivity_timeout(); - virtual void set_action(Continuation *c); - virtual void add_to_keep_alive_queue(); - virtual void remove_from_keep_alive_queue(); - virtual bool add_to_active_queue(); + void set_active_timeout(ink_hrtime timeout_in) override; + void set_inactivity_timeout(ink_hrtime timeout_in) override; + void cancel_active_timeout() override; + void cancel_inactivity_timeout() override; + void set_action(Continuation *c) override; + void add_to_keep_alive_queue() override; + void remove_from_keep_alive_queue() override; + bool add_to_active_queue() override; virtual void remove_from_active_queue(); // The public interface is VIO::reenable() - virtual void reenable(VIO *vio); - virtual void reenable_re(VIO *vio); + void reenable(VIO *vio) override; + void reenable_re(VIO *vio) override; - virtual SOCKET get_socket(); + SOCKET get_socket() override; virtual ~UnixNetVConnection(); @@ -173,33 +176,31 @@ class UnixNetVConnection : public NetVConnection UnixNetVConnection(); int - populate_protocol(const char **results, int n) const + populate_protocol(ts::StringView *results, int n) const override { int retval = 0; - if (n > 0) { - results[retval++] = options.get_proto_string(); - if (n > 1) { - results[retval++] = options.get_family_string(); + if (n > retval) { + if (!(results[retval] = options.get_proto_string()).isEmpty()) + ++retval; + if (n > retval) { + if (!(results[retval] = options.get_family_string()).isEmpty()) + ++retval; } } return retval; } const char * - protocol_contains(const char *tag) const + protocol_contains(ts::StringView tag) const override { - const char *retval = nullptr; - unsigned int tag_len = strlen(tag); - const char *test_tag = options.get_proto_string(); - if (strncmp(tag, test_tag, tag_len) == 0) { - retval = test_tag; - } else { - test_tag = options.get_family_string(); - if (strncmp(tag, test_tag, tag_len) == 0) { - retval = test_tag; + ts::StringView retval = options.get_proto_string(); + if (strncmp(tag.ptr(), retval.ptr(), tag.size()) != 0) { + retval = options.get_family_string(); + if (strncmp(tag.ptr(), retval.ptr(), tag.size()) != 0) { + retval.clear(); } } - return retval; + return retval.ptr(); } private: @@ -235,7 +236,6 @@ class UnixNetVConnection : public NetVConnection virtual int64_t load_buffer_and_write(int64_t towrite, MIOBufferAccessor &buf, int64_t &total_written, int &needs); void readDisable(NetHandler *nh); void readSignalError(NetHandler *nh, int err); - void writeSignalError(NetHandler *nh, int err); int readSignalDone(int event, NetHandler *nh); int readSignalAndUpdate(int event); void readReschedule(NetHandler *nh); @@ -290,6 +290,7 @@ class UnixNetVConnection : public NetVConnection ink_hrtime submit_time; OOB_callback *oob_ptr; bool from_accept_thread; + NetAccept *accept_object; // es - origin_trace associated connections bool origin_trace; @@ -308,14 +309,14 @@ class UnixNetVConnection : public NetVConnection virtual int populate(Connection &con, Continuation *c, void *arg); virtual void free(EThread *t); - virtual ink_hrtime get_inactivity_timeout(); - virtual ink_hrtime get_active_timeout(); + ink_hrtime get_inactivity_timeout() override; + ink_hrtime get_active_timeout() override; - virtual void set_local_addr(); - virtual void set_remote_addr(); - virtual int set_tcp_init_cwnd(int init_cwnd); - virtual int set_tcp_congestion_control(const char *name, int len); - virtual void apply_options(); + void set_local_addr() override; + void set_remote_addr() override; + int set_tcp_init_cwnd(int init_cwnd) override; + int set_tcp_congestion_control(int side) override; + void apply_options() override; friend void write_to_net_io(NetHandler *, UnixNetVConnection *, EThread *); @@ -445,20 +446,36 @@ UnixNetVConnection::set_tcp_init_cwnd(int init_cwnd) } TS_INLINE int -UnixNetVConnection::set_tcp_congestion_control(const char *name, int len) +UnixNetVConnection::set_tcp_congestion_control(int side) { #ifdef TCP_CONGESTION - int rv = 0; - rv = setsockopt(con.fd, IPPROTO_TCP, TCP_CONGESTION, reinterpret_cast(const_cast(name)), len); - if (rv < 0) { - Error("Unable to set TCP congestion control on socket %d to \"%.*s\", errno=%d (%s)", con.fd, len, name, errno, - strerror(errno)); + RecString congestion_control; + int ret; + + if (side == CLIENT_SIDE) { + ret = REC_ReadConfigStringAlloc(congestion_control, "proxy.config.net.tcp_congestion_control_in"); } else { - Debug("socket", "Setting TCP congestion control on socket [%d] to \"%.*s\" -> %d", con.fd, len, name, rv); + ret = REC_ReadConfigStringAlloc(congestion_control, "proxy.config.net.tcp_congestion_control_out"); } - return rv; + + if (ret == REC_ERR_OKAY) { + int len = strlen(congestion_control); + if (len > 0) { + int rv = 0; + rv = setsockopt(con.fd, IPPROTO_TCP, TCP_CONGESTION, reinterpret_cast(congestion_control), len); + if (rv < 0) { + Error("Unable to set TCP congestion control on socket %d to \"%.*s\", errno=%d (%s)", con.fd, len, congestion_control, + errno, strerror(errno)); + } else { + Debug("socket", "Setting TCP congestion control on socket [%d] to \"%.*s\" -> %d", con.fd, len, congestion_control, rv); + } + } + ats_free(congestion_control); + return 0; + } + return -1; #else - Debug("socket", "Setting TCP congestion control %.*s is not supported on this platform.", len, name); + Debug("socket", "Setting TCP congestion control is not supported on this platform."); return -1; #endif } diff --git a/iocore/net/SSLCertLookup.cc b/iocore/net/SSLCertLookup.cc index 1e4fb09d94d..a1dbe5ae0c8 100644 --- a/iocore/net/SSLCertLookup.cc +++ b/iocore/net/SSLCertLookup.cc @@ -306,7 +306,7 @@ SSLCertLookup::get(unsigned i) const struct ats_wildcard_matcher { ats_wildcard_matcher() { - if (regex.compile("^\\*\\.[^\\*.]+") != 0) { + if (regex.compile(R"(^\*\.[^\*.]+)") != 0) { Fatal("failed to compile TLS wildcard matching regex"); } } diff --git a/iocore/net/SSLClientUtils.cc b/iocore/net/SSLClientUtils.cc index f7af713e2ca..b4ee3959d9d 100644 --- a/iocore/net/SSLClientUtils.cc +++ b/iocore/net/SSLClientUtils.cc @@ -30,7 +30,7 @@ #include #if (OPENSSL_VERSION_NUMBER >= 0x10000000L) // openssl returns a const SSL_METHOD -typedef const SSL_METHOD *ink_ssl_method_t; +using ink_ssl_method_t = const SSL_METHOD *; #else typedef SSL_METHOD *ink_ssl_method_t; #endif diff --git a/iocore/net/SSLConfig.cc b/iocore/net/SSLConfig.cc index 18b9a14793d..03c42e07a43 100644 --- a/iocore/net/SSLConfig.cc +++ b/iocore/net/SSLConfig.cc @@ -32,7 +32,8 @@ #include "ts/ink_platform.h" #include "ts/I_Layout.h" -#include +#include +#include #include "P_Net.h" #include "P_SSLConfig.h" #include "P_SSLUtils.h" @@ -42,6 +43,7 @@ int SSLConfig::configid = 0; int SSLCertificateConfig::configid = 0; +int SSLTicketKeyConfig::configid = 0; int SSLConfigParams::ssl_maxrecord = 0; bool SSLConfigParams::ssl_allow_client_renegotiation = false; bool SSLConfigParams::ssl_ocsp_enabled = false; @@ -89,7 +91,6 @@ SSLConfigParams::reset() serverCertPathOnly = serverCertChainFilename = configFilePath = serverCACertFilename = serverCACertPath = clientCertPath = clientKeyPath = clientCACertFilename = clientCACertPath = cipherSuite = client_cipherSuite = dhparamsFile = serverKeyPathOnly = ticket_key_filename = nullptr; - default_global_keyblock = nullptr; client_ctx = nullptr; clientCertLevel = client_verify_depth = verify_depth = clientVerify = 0; ssl_ctx_options = SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3; @@ -121,7 +122,7 @@ SSLConfigParams::cleanup() dhparamsFile = (char *)ats_free_null(dhparamsFile); ssl_wire_trace_ip = (IpAddr *)ats_free_null(ssl_wire_trace_ip); ticket_key_filename = (char *)ats_free_null(ticket_key_filename); - ticket_block_free(default_global_keyblock); + freeCTXmap(); SSLReleaseContext(client_ctx); reset(); @@ -261,17 +262,6 @@ SSLConfigParams::initialize() ats_free(ssl_server_ca_cert_filename); ats_free(CACertRelativePath); -#if HAVE_OPENSSL_SESSION_TICKETS - - if (REC_ReadConfigStringAlloc(ticket_key_filename, "proxy.config.ssl.server.ticket_key.filename") == REC_ERR_OKAY && - this->ticket_key_filename != nullptr) { - ats_scoped_str ticket_key_path(Layout::relative_to(this->serverCertPathOnly, this->ticket_key_filename)); - default_global_keyblock = ssl_create_ticket_keyblock(ticket_key_path); - } else { - default_global_keyblock = ssl_create_ticket_keyblock(nullptr); - } -#endif - // SSL session cache configurations REC_ReadConfigInteger(ssl_session_cache, "proxy.config.ssl.session_cache"); REC_ReadConfigInteger(ssl_session_cache_size, "proxy.config.ssl.session_cache.size"); @@ -307,7 +297,9 @@ SSLConfigParams::initialize() ssl_client_cert_path = nullptr; REC_ReadConfigStringAlloc(ssl_client_cert_filename, "proxy.config.ssl.client.cert.filename"); REC_ReadConfigStringAlloc(ssl_client_cert_path, "proxy.config.ssl.client.cert.path"); - set_paths_helper(ssl_client_cert_path, ssl_client_cert_filename, nullptr, &clientCertPath); + if (ssl_client_cert_filename && ssl_client_cert_path) { + set_paths_helper(ssl_client_cert_path, ssl_client_cert_filename, nullptr, &clientCertPath); + } ats_free_null(ssl_client_cert_filename); ats_free_null(ssl_client_cert_path); @@ -351,9 +343,9 @@ SSLConfigParams::initialize() client_ctx = SSLInitClientContext(this); if (!client_ctx) { SSLError("Can't initialize the SSL client, HTTPS in remap rules will not function"); + } else { + InsertCTX(this->clientCertPath, this->client_ctx); } - - InsertCTX(this->clientCertPath, this->client_ctx); } // getCTX: returns the context attached to the given certificate @@ -430,7 +422,7 @@ SSLConfigParams::deleteKey(cchar *key) const } SSL_CTX * -SSLConfigParams::getClientSSL_CTX(void) const +SSLConfigParams::getClientSSL_CTX() const { return client_ctx; } @@ -467,12 +459,10 @@ SSLCertificateConfig::startup() { sslCertUpdate = new ConfigUpdateHandler(); sslCertUpdate->attach("proxy.config.ssl.server.multicert.filename"); - sslCertUpdate->attach("proxy.config.ssl.server.ticket_key.filename"); sslCertUpdate->attach("proxy.config.ssl.server.cert.path"); sslCertUpdate->attach("proxy.config.ssl.server.private_key.path"); sslCertUpdate->attach("proxy.config.ssl.server.cert_chain.filename"); sslCertUpdate->attach("proxy.config.ssl.server.session_ticket.enable"); - // Exit if there are problems on the certificate loading and the // proxy.config.ssl.server.multicert.exit_on_load_fail is true SSLConfig::scoped_config params; @@ -525,3 +515,56 @@ SSLCertificateConfig::release(SSLCertLookup *lookup) { configProcessor.release(configid, lookup); } + +void +SSLTicketParams::LoadTicket() +{ + cleanup(); + +#if HAVE_OPENSSL_SESSION_TICKETS + + SSLConfig::scoped_config params; + + if (REC_ReadConfigStringAlloc(ticket_key_filename, "proxy.config.ssl.server.ticket_key.filename") == REC_ERR_OKAY && + ticket_key_filename != nullptr) { + ats_scoped_str ticket_key_path(Layout::relative_to(params->serverCertPathOnly, ticket_key_filename)); + default_global_keyblock = ssl_create_ticket_keyblock(ticket_key_path); + } else { + default_global_keyblock = ssl_create_ticket_keyblock(nullptr); + } + if (!default_global_keyblock) { + Fatal("Could not load Ticket Key from %s", ticket_key_filename); + return; + } + Debug("ssl", "ticket key reloaded from %s", ticket_key_filename); + +#endif +} + +void +SSLTicketKeyConfig::startup() +{ + auto sslTicketKey = new ConfigUpdateHandler(); + + sslTicketKey->attach("proxy.config.ssl.server.ticket_key.filename"); + reconfigure(); +} + +bool +SSLTicketKeyConfig::reconfigure() +{ + SSLTicketParams *ticketKey = new SSLTicketParams(); + + if (ticketKey) + ticketKey->LoadTicket(); + + configid = configProcessor.set(configid, ticketKey); + return true; +} + +void +SSLTicketParams::cleanup() +{ + ticket_block_free(default_global_keyblock); + ticket_key_filename = (char *)ats_free_null(ticket_key_filename); +} diff --git a/iocore/net/SSLNetProcessor.cc b/iocore/net/SSLNetProcessor.cc index 1aa942d7d5c..18bd2517218 100644 --- a/iocore/net/SSLNetProcessor.cc +++ b/iocore/net/SSLNetProcessor.cc @@ -49,7 +49,7 @@ struct OCSPContinuation : public Continuation { #endif /* HAVE_OPENSSL_OCSP_STAPLING */ void -SSLNetProcessor::cleanup(void) +SSLNetProcessor::cleanup() { } @@ -63,6 +63,8 @@ SSLNetProcessor::start(int, size_t stacksize) if (!SSLCertificateConfig::startup()) return -1; + SSLTicketKeyConfig::startup(); + // Acquire a SSLConfigParams instance *after* we start SSL up. // SSLConfig::scoped_config params; diff --git a/iocore/net/SSLNetVConnection.cc b/iocore/net/SSLNetVConnection.cc index 5ad895b9eb0..daf89834c23 100644 --- a/iocore/net/SSLNetVConnection.cc +++ b/iocore/net/SSLNetVConnection.cc @@ -206,11 +206,17 @@ ssl_read_from_net(SSLNetVConnection *sslvc, EThread *lthread, int64_t &ret) while (sslErr == SSL_ERROR_NONE) { int64_t block_write_avail = buf.writer()->block_write_avail(); if (block_write_avail <= 0) { - buf.writer()->add_block(); - block_write_avail = buf.writer()->block_write_avail(); - if (block_write_avail <= 0) { - Warning("Cannot add new block"); + // If we filled up one block, give back to the event loop so we don't + // overbuffer. + if (bytes_read > 0) { break; + } else { // Make sure there is a block to write into + buf.writer()->add_block(); + block_write_avail = buf.writer()->block_write_avail(); + if (block_write_avail <= 0) { + Warning("Cannot add new block"); + break; + } } } @@ -239,6 +245,8 @@ ssl_read_from_net(SSLNetVConnection *sslvc, EThread *lthread, int64_t &ret) bytes_read += nread; if (nread > 0) { buf.writer()->fill(nread); // Tell the buffer, we've used the bytes + sslvc->netActivity(lthread); + //Warning("set next_inactivity %" PRId64 " current time %" PRId64, sslvc->next_inactivity_timeout_at, Thread::get_hrtime()); } break; case SSL_ERROR_WANT_WRITE: @@ -302,6 +310,10 @@ ssl_read_from_net(SSLNetVConnection *sslvc, EThread *lthread, int64_t &ret) ret = bytes_read; event = (s->vio.ntodo() <= 0) ? SSL_READ_COMPLETE : SSL_READ_READY; + if (sslErr == SSL_ERROR_NONE && s->vio.ntodo() > 0) { + // We stopped with data on the wire (to avoid overbuffering). Make sure we are triggered + sslvc->read.triggered = 1; + } } else { // if( bytes_read > 0 ) #if defined(_DEBUG) if (bytes_read == 0) { @@ -322,80 +334,103 @@ ssl_read_from_net(SSLNetVConnection *sslvc, EThread *lthread, int64_t &ret) int64_t SSLNetVConnection::read_raw_data() { - int64_t r = 0; - int64_t toread = INT_MAX; - // read data - int64_t rattempted = 0, total_read = 0; - unsigned niov = 0; - IOVec tiovec[NET_MAX_IOV]; - if (toread) { - IOBufferBlock *b = this->handShakeBuffer->first_write_block(); - do { - niov = 0; - rattempted = 0; - while (b && niov < NET_MAX_IOV) { - int64_t a = b->write_avail(); - if (a > 0) { - tiovec[niov].iov_base = b->_end; - int64_t togo = toread - total_read - rattempted; - if (a > togo) - a = togo; - tiovec[niov].iov_len = a; - rattempted += a; - niov++; - if (a >= togo) - break; - } - b = b->next.get(); - } - - // If there was no room to write into the buffer then skip the read - if (niov > 0) { - ink_assert(niov > 0); - ink_assert(niov <= countof(tiovec)); - r = socketManager.readv(this->con.fd, &tiovec[0], niov); + int64_t r = 0; + int64_t total_read = 0; + int64_t rattempted = 0; + char *buffer = 0; + int buf_len; + IOBufferBlock *b = this->handShakeBuffer->first_write_block(); + + rattempted = b->write_avail(); + while (rattempted) { + buffer = b->_end; + buf_len = rattempted; + b = b->next.get(); + + r = socketManager.read(this->con.fd, buffer, buf_len); + NET_INCREMENT_DYN_STAT(net_calls_to_read_stat); + total_read += rattempted; + + // last read failed or was incomplete + if (r != rattempted || !b) + break; - NET_INCREMENT_DYN_STAT(net_calls_to_read_stat); - total_read += rattempted; - } else { // No more space to write, break out - r = 0; - break; - } - } while (rattempted && r == rattempted && total_read < toread); + rattempted = b->write_avail(); + } - // if we have already moved some bytes successfully, summarize in r - if (total_read != rattempted) { - if (r <= 0) - r = total_read - rattempted; - else - r = total_read - rattempted + r; - } - // check for errors - if (r <= 0) { - if (r == -EAGAIN || r == -ENOTCONN) { - NET_INCREMENT_DYN_STAT(net_calls_to_read_nodata_stat); - } - return r; + // If we have already moved some bytes successfully, adjust total_read to reflect reality + // If any read succeeded, we should return success + if (r != rattempted) { + // If the first read failds, we should return error + if (r <= 0 && total_read > rattempted) { + r = total_read - rattempted; + } else { + r = total_read - rattempted + r; } - NET_SUM_DYN_STAT(net_read_bytes_stat, r); + } + NET_SUM_DYN_STAT(net_read_bytes_stat, r); + if (r > 0) { this->handShakeBuffer->fill(r); + + char *start = this->handShakeReader->start(); + char *end = this->handShakeReader->end(); + this->handShakeBioStored = end - start; + + // Sets up the buffer as a read only bio target + // Must be reset on each read + BIO *rbio = BIO_new_mem_buf(start, this->handShakeBioStored); + BIO_set_mem_eof_return(rbio, -1); + SSL_set0_rbio(this->ssl, rbio); } - char *start = this->handShakeReader->start(); - char *end = this->handShakeReader->end(); - this->handShakeBioStored = end - start; + Debug("ssl", "%p read r=%" PRId64 " total=%" PRId64 " bio=%d\n", this, r, total_read, this->handShakeBioStored); - // Sets up the buffer as a read only bio target - // Must be reset on each read - BIO *rbio = BIO_new_mem_buf(start, this->handShakeBioStored); - BIO_set_mem_eof_return(rbio, -1); - SSL_set0_rbio(this->ssl, rbio); + // check for errors + if (r <= 0) { + if (r == -EAGAIN || r == -ENOTCONN) { + NET_INCREMENT_DYN_STAT(net_calls_to_read_nodata_stat); + } + } return r; } +// +// Return true if we updated the rbio with another +// memory chunk (should be ready for another read right away) +// +bool +SSLNetVConnection::update_rbio(bool move_to_socket) +{ + bool retval = false; + if (BIO_eof(SSL_get_rbio(this->ssl))) { + this->handShakeReader->consume(this->handShakeBioStored); + this->handShakeBioStored = 0; + // Load up the next block if present + if (this->handShakeReader->is_read_avail_more_than(0)) { + // Setup the next iobuffer block to drain + char *start = this->handShakeReader->start(); + char *end = this->handShakeReader->end(); + this->handShakeBioStored = end - start; + + // Sets up the buffer as a read only bio target + // Must be reset on each read + BIO *rbio = BIO_new_mem_buf(start, this->handShakeBioStored); + BIO_set_mem_eof_return(rbio, -1); + SSL_set0_rbio(this->ssl, rbio); + retval = true; + } else if (move_to_socket) { // Handshake buffer is empty, move to the socket rbio + BIO *rbio = BIO_new_fd(this->get_socket(), BIO_NOCLOSE); + BIO_set_mem_eof_return(rbio, -1); + SSL_set0_rbio(this->ssl, rbio); + free_handshake_buffers(); + } + } + return retval; +} + // changed by YTS Team, yamsat void SSLNetVConnection::net_read_io(NetHandler *nh, EThread *lthread) @@ -452,45 +487,44 @@ SSLNetVConnection::net_read_io(NetHandler *nh, EThread *lthread) ret = sslStartHandShake(SSL_EVENT_SERVER, err); } // If we have flipped to blind tunnel, don't read ahead - if (this->handShakeReader && this->attributes != HttpProxyPort::TRANSPORT_BLIND_TUNNEL) { - // Check and consume data that has been read - if (BIO_eof(SSL_get_rbio(this->ssl))) { - this->handShakeReader->consume(this->handShakeBioStored); - this->handShakeBioStored = 0; - } - } else if (this->attributes == HttpProxyPort::TRANSPORT_BLIND_TUNNEL) { - // Now in blind tunnel. Set things up to read what is in the buffer - // Must send the READ_COMPLETE here before considering - // forwarding on the handshake buffer, so the - // SSLNextProtocolTrampoline has a chance to do its - // thing before forwarding the buffers. - this->readSignalDone(VC_EVENT_READ_COMPLETE, nh); - - // If the handshake isn't set yet, this means the tunnel - // decision was make in the SNI callback. We must move - // the client hello message back into the standard read.vio - // so it will get forwarded onto the origin server - if (!this->getSSLHandShakeComplete()) { - this->sslHandShakeComplete = 1; - - // Copy over all data already read in during the SSL_accept - // (the client hello message) - NetState *s = &this->read; - MIOBufferAccessor &buf = s->vio.buffer; - int64_t r = buf.writer()->write(this->handShakeHolder); - s->vio.nbytes += r; - s->vio.ndone += r; - - // Clean up the handshake buffers - this->free_handshake_buffers(); - - if (r > 0) { - // Kick things again, so the data that was copied into the - // vio.read buffer gets processed - this->readSignalDone(VC_EVENT_READ_COMPLETE, nh); + if (this->handShakeReader) { + if (this->attributes != HttpProxyPort::TRANSPORT_BLIND_TUNNEL) { + // Check and consume data that has been read + update_rbio(false); + } else { + // Now in blind tunnel. Set things up to read what is in the buffer + // Must send the READ_COMPLETE here before considering + // forwarding on the handshake buffer, so the + // SSLNextProtocolTrampoline has a chance to do its + // thing before forwarding the buffers. + this->readSignalDone(VC_EVENT_READ_COMPLETE, nh); + + // If the handshake isn't set yet, this means the tunnel + // decision was make in the SNI callback. We must move + // the client hello message back into the standard read.vio + // so it will get forwarded onto the origin server + if (!this->getSSLHandShakeComplete()) { + this->sslHandShakeComplete = 1; + + // Copy over all data already read in during the SSL_accept + // (the client hello message) + NetState *s = &this->read; + MIOBufferAccessor &buf = s->vio.buffer; + int64_t r = buf.writer()->write(this->handShakeHolder); + s->vio.nbytes += r; + s->vio.ndone += r; + + // Clean up the handshake buffers + this->free_handshake_buffers(); + + if (r > 0) { + // Kick things again, so the data that was copied into the + // vio.read buffer gets processed + this->readSignalDone(VC_EVENT_READ_COMPLETE, nh); + } } + return; // Leave if we are tunneling } - return; } if (ret == EVENT_ERROR) { this->read.triggered = 0; @@ -508,8 +542,16 @@ SSLNetVConnection::net_read_io(NetHandler *nh, EThread *lthread) return; } } - read.triggered = 0; - nh->read_ready_list.remove(this); + // move over to the socket if we haven't already + if (this->handShakeBuffer) { + read.triggered = update_rbio(true); + } else { + Debug("ssl", "Want read from socket"); + read.triggered = 0; + } + if (!read.triggered) { + nh->read_ready_list.remove(this); + } readReschedule(nh); } else if (ret == SSL_HANDSHAKE_WANT_CONNECT || ret == SSL_HANDSHAKE_WANT_WRITE) { write.triggered = 0; @@ -541,36 +583,7 @@ SSLNetVConnection::net_read_io(NetHandler *nh, EThread *lthread) } // At this point we are at the post-handshake SSL processing - // If the read BIO is not already a socket, consider changing it - if (this->handShakeReader) { - // Check out if there is anything left in the current bio - if (!BIO_eof(SSL_get_rbio(this->ssl))) { - // Still data remaining in the current BIO block - } else { - // Consume what SSL has read so far. - this->handShakeReader->consume(this->handShakeBioStored); - - // If we are empty now, switch over - if (this->handShakeReader->read_avail() <= 0) { - // Switch the read bio over to a socket bio - SSL_set_rfd(this->ssl, this->get_socket()); - this->free_handshake_buffers(); - } else { - // Setup the next iobuffer block to drain - char *start = this->handShakeReader->start(); - char *end = this->handShakeReader->end(); - this->handShakeBioStored = end - start; - - // Sets up the buffer as a read only bio target - // Must be reset on each read - BIO *rbio = BIO_new_mem_buf(start, this->handShakeBioStored); - BIO_set_mem_eof_return(rbio, -1); - SSL_set0_rbio(this->ssl, rbio); - } - } - } - // Otherwise, we already replaced the buffer bio with a socket bio - + // // not sure if this do-while loop is really needed here, please replace // this comment if you know do { @@ -850,8 +863,8 @@ SSLNetVConnection::do_io_close(int lerrno) void SSLNetVConnection::free(EThread *t) { - got_remote_addr = 0; - got_local_addr = 0; + got_remote_addr = false; + got_local_addr = false; read.vio.mutex.clear(); write.vio.mutex.clear(); this->mutex.clear(); @@ -950,7 +963,7 @@ SSLNetVConnection::sslStartHandShake(int event, int &err) // directly into blind tunnel mode if (cc && SSLCertContext::OPT_TUNNEL == cc->opt && this->is_transparent) { this->attributes = HttpProxyPort::TRANSPORT_BLIND_TUNNEL; - sslHandShakeComplete = 1; + sslHandShakeComplete = true; SSL_free(this->ssl); this->ssl = nullptr; return EVENT_DONE; @@ -1072,22 +1085,37 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err) // over the buffered handshake packets to the O.S. return EVENT_DONE; } else if (SSL_HOOK_OP_TERMINATE == hookOpRequested) { - sslHandShakeComplete = 1; + sslHandShakeComplete = true; return EVENT_DONE; } - - int retval = 1; // Initialze with a non-error value - // All the pre-accept hooks have completed, proceed with the actual accept. - if (BIO_eof(SSL_get_rbio(this->ssl))) { // No more data in the buffer - // Read from socket to fill in the BIO buffer with the - // raw handshake data before calling the ssl accept calls. - retval = this->read_raw_data(); - if (retval == 0) { - // EOF, go away, we stopped in the handshake - SSLDebugVC(this, "SSL handshake error: EOF"); - return EVENT_ERROR; - } + if (this->handShakeReader) { + if (BIO_eof(SSL_get_rbio(this->ssl))) { // No more data in the buffer + // Is this the first read? + if (!this->handShakeReader->is_read_avail_more_than(0)) { + Debug("ssl", "%p first read\n", this); + // Read from socket to fill in the BIO buffer with the + // raw handshake data before calling the ssl accept calls. + int retval = this->read_raw_data(); + if (retval < 0) { + if (retval == -EAGAIN) { + // No data at the moment, hang tight + // SSLDebugVC(this, "SSL handshake: EAGAIN"); + return SSL_HANDSHAKE_WANT_READ; + } else { + // An error, make us go away + SSLDebugVC(this, "SSL handshake error: read_retval=%d", retval); + return EVENT_ERROR; + } + } else if (retval == 0) { + // EOF, go away, we stopped in the handshake + SSLDebugVC(this, "SSL handshake error: EOF"); + return EVENT_ERROR; + } + } else { + update_rbio(false); + } + } // Still data in the BIO } ssl_error_t ssl_error = SSLAccept(ssl); @@ -1098,11 +1126,11 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err) SSLDebugVC(this, "SSL handshake error: %s (%d), errno=%d", SSLErrorName(ssl_error), ssl_error, err); // start a blind tunnel if tr-pass is set and data does not look like ClientHello - char *buf = handShakeBuffer->buf(); + char *buf = handShakeBuffer ? handShakeBuffer->buf() : NULL; if (getTransparentPassThrough() && buf && *buf != SSL_OP_HANDSHAKE) { SSLDebugVC(this, "Data does not look like SSL handshake, starting blind tunnel"); this->attributes = HttpProxyPort::TRANSPORT_BLIND_TUNNEL; - sslHandShakeComplete = 0; + sslHandShakeComplete = false; return EVENT_CONT; } } @@ -1184,15 +1212,6 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err) case SSL_ERROR_WANT_READ: TraceIn(trace, get_remote_addr(), get_remote_port(), "SSL server handshake ERROR_WANT_READ"); - if (retval == -EAGAIN) { - // No data at the moment, hang tight - SSLDebugVC(this, "SSL handshake: EAGAIN"); - return SSL_HANDSHAKE_WANT_READ; - } else if (retval < 0) { - // An error, make us go away - SSLDebugVC(this, "SSL handshake error: read_retval=%d", retval); - return EVENT_ERROR; - } return SSL_HANDSHAKE_WANT_READ; // This value is only defined in openssl has been patched to @@ -1210,7 +1229,7 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err) #if defined(SSL_ERROR_WANT_SNI_RESOLVE) || defined(SSL_ERROR_WANT_X509_LOOKUP) if (this->attributes == HttpProxyPort::TRANSPORT_BLIND_TUNNEL || SSL_HOOK_OP_TUNNEL == hookOpRequested) { this->attributes = HttpProxyPort::TRANSPORT_BLIND_TUNNEL; - sslHandShakeComplete = 0; + sslHandShakeComplete = false; return EVENT_CONT; } else { // Stopping for some other reason, perhaps loading certificate @@ -1321,7 +1340,7 @@ SSLNetVConnection::sslClientHandShakeEvent(int &err) case SSL_ERROR_SSL: default: { - err = errno; + err = (errno) ? errno : -ENET_CONNECT_FAILED; // FIXME -- This triggers a retry on cases of cert validation errors.... Debug("ssl", "SSLNetVConnection::sslClientHandShakeEvent, SSL_ERROR_SSL"); SSL_CLR_ERR_INCR_DYN_STAT(this, ssl_error_ssl, "SSLNetVConnection::sslClientHandShakeEvent, SSL_ERROR_SSL errno=%d", errno); @@ -1340,7 +1359,6 @@ SSLNetVConnection::sslClientHandShakeEvent(int &err) void SSLNetVConnection::registerNextProtocolSet(const SSLNextProtocolSet *s) { - ink_release_assert(this->npnSet == nullptr); this->npnSet = s; } @@ -1353,7 +1371,6 @@ SSLNetVConnection::advertise_next_protocol(SSL *ssl, const unsigned char **out, SSLNetVConnection *netvc = SSLNetVCAccess(ssl); ink_release_assert(netvc != nullptr); - if (netvc->npnSet && netvc->npnSet->advertiseProtocols(out, outlen)) { // Successful return tells OpenSSL to advertise. return SSL_TLSEXT_ERR_OK; @@ -1373,7 +1390,6 @@ SSLNetVConnection::select_next_protocol(SSL *ssl, const unsigned char **out, uns unsigned npnsz = 0; ink_release_assert(netvc != nullptr); - if (netvc->npnSet && netvc->npnSet->advertiseProtocols(&npn, &npnsz)) { // SSL_select_next_proto chooses the first server-offered protocol that appears in the clients protocol set, ie. the // server selects the protocol. This is a n^2 search, so it's preferable to keep the protocol set short. @@ -1438,17 +1454,22 @@ bool SSLNetVConnection::callHooks(TSEvent eventId) { // Only dealing with the SNI/CERT hook so far. - ink_assert(eventId == TS_EVENT_SSL_CERT); + ink_assert(eventId == TS_EVENT_SSL_CERT || eventId == TS_EVENT_SSL_SERVERNAME); Debug("ssl", "callHooks sslHandshakeHookState=%d", this->sslHandshakeHookState); // First time through, set the type of the hook that is currently being invoked - if (HANDSHAKE_HOOKS_PRE == sslHandshakeHookState) { + if ((this->sslHandshakeHookState == HANDSHAKE_HOOKS_PRE || this->sslHandshakeHookState == HANDSHAKE_HOOKS_DONE) && + eventId == TS_EVENT_SSL_CERT) { // the previous hook should be DONE and set curHook to nullptr before trigger the sni hook. ink_assert(curHook == nullptr); // set to HOOKS_CERT means CERT/SNI hooks has called by SSL_accept() this->sslHandshakeHookState = HANDSHAKE_HOOKS_CERT; // get Hooks curHook = ssl_hooks->get(TS_SSL_CERT_INTERNAL_HOOK); + } else if (eventId == TS_EVENT_SSL_SERVERNAME) { + if (!curHook) { + curHook = ssl_hooks->get(TS_SSL_SERVERNAME_INTERNAL_HOOK); + } } else { // Not in the right state // reenable and continue @@ -1457,14 +1478,20 @@ SSLNetVConnection::callHooks(TSEvent eventId) bool reenabled = true; if (curHook != nullptr) { - // Otherwise, we have plugin hooks to run this->sslHandshakeHookState = HANDSHAKE_HOOKS_INVOKE; curHook->invoke(eventId, this); - reenabled = (this->sslHandshakeHookState != HANDSHAKE_HOOKS_INVOKE); - } else { - // no SNI-Hooks set, set state to HOOKS_DONE - // no plugins registered for this hook, return (reenabled == true) - sslHandshakeHookState = HANDSHAKE_HOOKS_DONE; + reenabled = eventId != TS_EVENT_SSL_CERT || (this->sslHandshakeHookState != HANDSHAKE_HOOKS_INVOKE); + } + + // All done with the current hook chain + if (curHook == nullptr) { + if (eventId == TS_EVENT_SSL_CERT) { + // Set the HookState to done because we are all done with the CERT/SERVERNAME hook chains + sslHandshakeHookState = HANDSHAKE_HOOKS_DONE; + } else if (eventId == TS_EVENT_SSL_SERVERNAME) { + // Reset the HookState to PRE, so the cert hook chain can start + sslHandshakeHookState = HANDSHAKE_HOOKS_PRE; + } } return reenabled; } @@ -1534,21 +1561,34 @@ SSLNetVConnection::populate(Connection &con, Continuation *c, void *arg) return EVENT_DONE; } -const char * +ts::StringView SSLNetVConnection::map_tls_protocol_to_tag(const char *proto_string) const { - const char *retval = nullptr; - const char *ssl_proto = getSSLProtocol(); - if (ssl_proto && strncmp(ssl_proto, "TLSv1", 5) == 0) { - if (ssl_proto[5] == '\0') { - retval = TS_PROTO_TAG_TLS_1_0; - } else if (ssl_proto[5] == '.') { - if (ssl_proto[6] == '1' && ssl_proto[7] == '\0') { - retval = TS_PROTO_TAG_TLS_1_1; - } else if (ssl_proto[6] == '2' && ssl_proto[7] == '\0') { - retval = TS_PROTO_TAG_TLS_1_2; - } else if (ssl_proto[6] == '3' && ssl_proto[7] == '\0') { - retval = TS_PROTO_TAG_TLS_1_3; + // tag to use if something goes wrong with fetching the TLS protocol string. + static constexpr ts::StringView UNKNOWN("tls/?.?", ts::StringView::literal); + + ts::StringView retval{UNKNOWN}; // return this if the protocol lookup doesn't work. + + if (proto_string) { + // openSSL guarantees the case of the protocol string. + if (proto_string[0] == 'T' && proto_string[1] == 'L' && proto_string[2] == 'S' && proto_string[3] == 'v' && + proto_string[4] == '1') { + if (proto_string[5] == 0) { + retval = IP_PROTO_TAG_TLS_1_0; + } else if (proto_string[5] == '.' && proto_string[7] == 0) { + switch (proto_string[6]) { + case '1': + retval = IP_PROTO_TAG_TLS_1_1; + break; + case '2': + retval = IP_PROTO_TAG_TLS_1_2; + break; + case '3': + retval = IP_PROTO_TAG_TLS_1_3; + break; + default: + break; + } } } } @@ -1556,14 +1596,13 @@ SSLNetVConnection::map_tls_protocol_to_tag(const char *proto_string) const } int -SSLNetVConnection::populate_protocol(const char **results, int n) const +SSLNetVConnection::populate_protocol(ts::StringView *results, int n) const { int retval = 0; - if (n > 0) { - results[0] = map_tls_protocol_to_tag(getSSLProtocol()); - if (results[0] != nullptr) { - retval++; - } + if (n > retval) { + results[retval] = map_tls_protocol_to_tag(getSSLProtocol()); + if (results[retval]) + ++retval; if (n > retval) { retval += super::populate_protocol(results + retval, n - retval); } @@ -1572,15 +1611,14 @@ SSLNetVConnection::populate_protocol(const char **results, int n) const } const char * -SSLNetVConnection::protocol_contains(const char *tag) const +SSLNetVConnection::protocol_contains(ts::StringView prefix) const { - const char *retval = nullptr; - const char *tls_tag = map_tls_protocol_to_tag(getSSLProtocol()); - unsigned int tag_len = strlen(tag); - if (tag_len <= strlen(tls_tag) && strncmp(tag, tls_tag, tag_len) == 0) { - retval = tls_tag; + const char *retval = nullptr; + ts::StringView tag = map_tls_protocol_to_tag(getSSLProtocol()); + if (prefix.size() <= tag.size() && strncmp(tag.ptr(), prefix.ptr(), prefix.size()) == 0) { + retval = tag.ptr(); } else { - retval = super::protocol_contains(tag); + retval = super::protocol_contains(prefix); } return retval; } diff --git a/iocore/net/SSLNextProtocolAccept.cc b/iocore/net/SSLNextProtocolAccept.cc index 47068c32580..5ec4d72bb91 100644 --- a/iocore/net/SSLNextProtocolAccept.cc +++ b/iocore/net/SSLNextProtocolAccept.cc @@ -125,7 +125,6 @@ SSLNextProtocolAccept::mainEvent(int event, void *edata) SSLNetVConnection *netvc = ssl_netvc_cast(event, edata); Debug("ssl", "[SSLNextProtocolAccept:mainEvent] event %d netvc %p", event, netvc); - switch (event) { case NET_EVENT_ACCEPT: ink_release_assert(netvc != nullptr); @@ -170,6 +169,18 @@ SSLNextProtocolAccept::SSLNextProtocolAccept(Continuation *ep, bool transparent_ SET_HANDLER(&SSLNextProtocolAccept::mainEvent); } +SSLNextProtocolSet * +SSLNextProtocolAccept::getProtoSet() +{ + return &this->protoset; +} + +SSLNextProtocolSet * +SSLNextProtocolAccept::cloneProtoSet() +{ + return this->protoset.clone(); +} + SSLNextProtocolAccept::~SSLNextProtocolAccept() { free_MIOBuffer(this->buffer); diff --git a/iocore/net/SSLNextProtocolSet.cc b/iocore/net/SSLNextProtocolSet.cc index 68663490e18..38267a001dd 100644 --- a/iocore/net/SSLNextProtocolSet.cc +++ b/iocore/net/SSLNextProtocolSet.cc @@ -62,7 +62,7 @@ create_npn_advertisement(const SSLNextProtocolSet::NextProtocolEndpoint::list_ty } for (ep = endpoints.head; ep != nullptr; ep = endpoints.next(ep)) { - Debug("ssl", "advertising protocol %s", ep->protocol); + Debug("ssl", "advertising protocol %s, %p", ep->protocol, ep->endpoint); advertised = append_protocol(ep->protocol, advertised); } @@ -75,6 +75,19 @@ create_npn_advertisement(const SSLNextProtocolSet::NextProtocolEndpoint::list_ty return false; } +// copies th eprotocols but not the endpoints + +SSLNextProtocolSet * +SSLNextProtocolSet::clone() const +{ + const SSLNextProtocolSet::NextProtocolEndpoint *ep; + SSLNextProtocolSet *newProtoSet = new SSLNextProtocolSet(); + for (ep = this->endpoints.head; ep != nullptr; ep = this->endpoints.next(ep)) { + newProtoSet->registerEndpoint(ep->protocol, ep->endpoint); + } + return newProtoSet; +} + bool SSLNextProtocolSet::advertiseProtocols(const unsigned char **out, unsigned *len) const { @@ -118,7 +131,7 @@ bool SSLNextProtocolSet::unregisterEndpoint(const char *proto, Continuation *ep) { for (NextProtocolEndpoint *e = this->endpoints.head; e; e = this->endpoints.next(e)) { - if (strcmp(proto, e->protocol) == 0 && e->endpoint == ep) { + if (strcmp(proto, e->protocol) == 0 && (ep == nullptr || e->endpoint == ep)) { // Protocol must be registered only once; no need to remove // any more entries. this->endpoints.remove(e); diff --git a/iocore/net/SSLUtils.cc b/iocore/net/SSLUtils.cc index 99d98fc8972..3b2c2a08701 100644 --- a/iocore/net/SSLUtils.cc +++ b/iocore/net/SSLUtils.cc @@ -67,7 +67,6 @@ #define SSL_ACTION_TAG "action" #define SSL_ACTION_TUNNEL_TAG "tunnel" #define SSL_SESSION_TICKET_ENABLED "ssl_ticket_enabled" -#define SSL_SESSION_TICKET_KEY_FILE_TAG "ticket_key_name" #define SSL_KEY_DIALOG "ssl_key_dialog" #define SSL_CERT_SEPARATE_DELIM ',' @@ -84,12 +83,6 @@ #endif #endif -#if (OPENSSL_VERSION_NUMBER >= 0x10000000L) // openssl returns a const SSL_METHOD -typedef const SSL_METHOD *ink_ssl_method_t; -#else -typedef SSL_METHOD *ink_ssl_method_t; -#endif - /* * struct ssl_user_config: gather user provided settings from ssl_multicert.config in to this single struct * ssl_ticket_enabled - session ticket enabled @@ -106,8 +99,6 @@ struct ssl_user_config { ssl_user_config() : opt(SSLCertContext::OPT_NONE) { REC_ReadConfigInt32(session_ticket_enabled, "proxy.config.ssl.server.session_ticket.enable"); - REC_ReadConfigStringAlloc(ticket_key_filename, "proxy.config.ssl.server.ticket_key.filename"); - Debug("ssl", "ticket key filename %s", (const char *)ticket_key_filename); } int session_ticket_enabled; @@ -116,7 +107,6 @@ struct ssl_user_config { ats_scoped_str first_cert; ats_scoped_str ca; ats_scoped_str key; - ats_scoped_str ticket_key_filename; ats_scoped_str dialog; SSLCertContext::Option opt; }; @@ -149,15 +139,11 @@ static InkHashTable *ssl_cipher_name_table = nullptr; * may use pthreads and openssl without confusing us here. (TS-2271). */ -// Only define this function if the version of openssl really has a -// CRYPTO_THREADID_set_callback function. openssl 1.1.0 defines it to 0 -#if OPENSSL_VERSION_NUMBER < 0x10100000L static void SSL_pthreads_thread_id(CRYPTO_THREADID *id) { CRYPTO_THREADID_set_numeric(id, (unsigned long)pthread_self()); } -#endif // The locking callback goes away with openssl 1.1 and CRYPTO_LOCK is on longer defined #ifdef CRYPTO_LOCK @@ -228,10 +214,10 @@ ssl_session_timed_out(SSL_SESSION *session) static void ssl_rm_cached_session(SSL_CTX *ctx, SSL_SESSION *sess); static SSL_SESSION * -#if OPENSSL_VERSION_NUMBER > 0x10100000L -ssl_get_cached_session(SSL *ssl, const unsigned char *id, int len, int *copy) -#else +#if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) ssl_get_cached_session(SSL *ssl, unsigned char *id, int len, int *copy) +#else +ssl_get_cached_session(SSL *ssl, const unsigned char *id, int len, int *copy) #endif { SSLSessionID sid(id, len); @@ -291,8 +277,6 @@ ssl_new_cached_session(SSL *ssl, SSL_SESSION *sess) static void ssl_rm_cached_session(SSL_CTX *ctx, SSL_SESSION *sess) { - SSL_CTX_remove_session(ctx, sess); - unsigned int len = 0; const unsigned char *id = SSL_SESSION_get_id(sess, &len); SSLSessionID sid(id, len); @@ -414,9 +398,21 @@ ssl_cert_callback(SSL *ssl, void * /*arg*/) // Return 1 for success, 0 for error, or -1 to pause return retval; } + +/* + * Cannot stop this callback. Always reeneabled + */ +static int +ssl_servername_only_callback(SSL *ssl, int * /* ad */, void * /*arg*/) +{ + SSLNetVConnection *netvc = SSLNetVCAccess(ssl); + netvc->callHooks(TS_EVENT_SSL_SERVERNAME); + return SSL_TLSEXT_ERR_OK; +} + #else static int -ssl_servername_callback(SSL *ssl, int * /* ad */, void * /*arg*/) +ssl_servername_and_cert_callback(SSL *ssl, int * /* ad */, void * /*arg*/) { SSLNetVConnection *netvc = SSLNetVCAccess(ssl); bool reenabled; @@ -578,16 +574,13 @@ ssl_context_enable_tickets(SSL_CTX *ctx, const char *ticket_key_path) // so that we don't leave a ticket_key pointer attached if it fails. if (SSL_CTX_set_tlsext_ticket_key_cb(ctx, ssl_callback_session_ticket) == 0) { Error("failed to set session ticket callback"); - goto fail; + ticket_block_free(keyblock); + return nullptr; } SSL_CTX_clear_options(ctx, SSL_OP_NO_TICKET); return keyblock; -fail: - ticket_block_free(keyblock); - return nullptr; - #else /* !HAVE_OPENSSL_SESSION_TICKETS */ (void)ticket_key_path; return nullptr; @@ -794,68 +787,47 @@ SSLRecRawStatSyncCount(const char *name, RecDataT data_type, RecData *data, RecR return RecRawStatSyncCount(name, data_type, data, rsb, id); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L +#if OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) +#define ssl_malloc(size, file, line) ssl_malloc(size) +#define ssl_realloc(ptr, size, file, line) ssl_realloc(ptr, size) +#define ssl_free(ptr, file, line) ssl_free(ptr) +#define ssl_track_malloc(size, file, line) ssl_track_malloc(size) +#define ssl_track_realloc(ptr, size, file, line) ssl_track_realloc(ptr, size) +#define ssl_track_free(ptr, file, line) ssl_track_free(ptr) +#endif + void * ssl_malloc(size_t size, const char * /*filename */, int /*lineno*/) -#else -void * -ssl_malloc(size_t size) -#endif { return ats_malloc(size); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L void * ssl_realloc(void *ptr, size_t size, const char * /*filename*/, int /*lineno*/) -#else -void * -ssl_realloc(void *ptr, size_t size) -#endif { return ats_realloc(ptr, size); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L void ssl_free(void *ptr, const char * /*filename*/, int /*lineno*/) -#else -void -ssl_free(void *ptr) -#endif { ats_free(ptr); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L void * ssl_track_malloc(size_t size, const char * /*filename*/, int /*lineno*/) -#else -void * -ssl_track_malloc(size_t size) -#endif { return ats_track_malloc(size, &ssl_memory_allocated); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L void * ssl_track_realloc(void *ptr, size_t size, const char * /*filename*/, int /*lineno*/) -#else -void * -ssl_track_realloc(void *ptr, size_t size) -#endif { return ats_track_realloc(ptr, size, &ssl_memory_allocated, &ssl_memory_freed); } -#if OPENSSL_VERSION_NUMBER > 0x10100000L void ssl_track_free(void *ptr, const char * /*filename*/, int /*lineno*/) -#else -void -ssl_track_free(void *ptr) -#endif { ats_track_free(ptr, &ssl_memory_freed); } @@ -865,7 +837,7 @@ SSLInitializeLibrary() { if (!open_ssl_initialized) { // BoringSSL does not have the memory functions -#ifndef OPENSSL_IS_BORINGSSL +#ifdef HAVE_CRYPTO_SET_MEM_FUNCTIONS if (res_track_memory >= 2) { CRYPTO_set_mem_functions(ssl_track_malloc, ssl_track_realloc, ssl_track_free); } else { @@ -1221,7 +1193,7 @@ SSLDiagnostic(const SourceLocation &loc, bool debug, SSLNetVConnection *vc, cons while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) != 0) { if (debug) { if (unlikely(diags->on())) { - diags->log("ssl", DL_Debug, &loc, "SSL::%lu:%s:%s:%d%s%s%s%s", es, ERR_error_string(l, buf), file, line, + diags->log("ssl-diag", DL_Debug, &loc, "SSL::%lu:%s:%s:%d%s%s%s%s", es, ERR_error_string(l, buf), file, line, (flags & ERR_TXT_STRING) ? ":" : "", (flags & ERR_TXT_STRING) ? data : "", vc ? ": peer address is " : "", ip_buf); } @@ -1245,7 +1217,7 @@ SSLDiagnostic(const SourceLocation &loc, bool debug, SSLNetVConnection *vc, cons va_start(ap, fmt); if (debug) { - diags->log_va("ssl", DL_Debug, &loc, fmt, ap); + diags->log_va("ssl-diag", DL_Debug, &loc, fmt, ap); } else { diags->error_va(DL_Error, &loc, fmt, ap); } @@ -1283,10 +1255,7 @@ SSLDebugBufferPrint(const char *tag, const char *buffer, unsigned buflen, const SSL_CTX * SSLDefaultServerContext() { - ink_ssl_method_t meth = nullptr; - - meth = SSLv23_server_method(); - return SSL_CTX_new(meth); + return SSL_CTX_new(SSLv23_server_method()); } static bool @@ -1489,8 +1458,9 @@ ssl_set_handshake_callbacks(SSL_CTX *ctx) // Make sure the callbacks are set #if TS_USE_CERT_CB SSL_CTX_set_cert_cb(ctx, ssl_cert_callback, nullptr); + SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_only_callback); #else - SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_callback); + SSL_CTX_set_tlsext_servername_callback(ctx, ssl_servername_and_cert_callback); #endif #endif } @@ -1501,7 +1471,7 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu int server_verify_client; ats_scoped_str completeServerCertPath; SSL_CTX *ctx = SSLDefaultServerContext(); - EVP_MD_CTX *digest = EVP_MD_CTX_create(); + EVP_MD_CTX *digest = EVP_MD_CTX_new(); STACK_OF(X509_NAME) *ca_list = nullptr; unsigned char hash_buf[EVP_MAX_MD_SIZE]; unsigned int hash_len = 0; @@ -1712,7 +1682,6 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu SSL_CTX_set_client_CA_list(ctx, ca_list); } } - EVP_MD_CTX_init(digest); if (EVP_DigestInit_ex(digest, evp_md_func, nullptr) == 0) { SSLError("EVP_DigestInit_ex failed"); @@ -1744,6 +1713,8 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu SSLError("EVP_DigestFinal_ex failed"); goto fail; } + EVP_MD_CTX_free(digest); + digest = NULL; if (SSL_CTX_set_session_id_context(ctx, hash_buf, hash_len) == 0) { SSLError("SSL_CTX_set_session_id_context failed"); @@ -1798,8 +1769,8 @@ SSLInitServerContext(const SSLConfigParams *params, const ssl_user_config *sslMu return ctx; fail: - // EVP_MD_CTX_destroy calls EVP_MD_CTX_cleanup too - EVP_MD_CTX_destroy(digest); + if (digest) + EVP_MD_CTX_free(digest); SSL_CLEAR_PW_REFERENCES(ctx) SSLReleaseContext(ctx); for (unsigned int i = 0; i < certList.length(); i++) { @@ -1841,11 +1812,8 @@ ssl_store_ssl_context(const SSLConfigParams *params, SSLCertLookup *lookup, cons } } - // Load the session ticket key if session tickets are not disabled and we have key name. - if (sslMultCertSettings->session_ticket_enabled != 0 && sslMultCertSettings->ticket_key_filename) { - ats_scoped_str ticket_key_path(Layout::relative_to(params->serverCertPathOnly, sslMultCertSettings->ticket_key_filename)); - keyblock = ssl_context_enable_tickets(ctx, ticket_key_path); - } else if (sslMultCertSettings->session_ticket_enabled != 0) { + // Load the session ticket key if session tickets are not disabled + if (sslMultCertSettings->session_ticket_enabled != 0) { keyblock = ssl_context_enable_tickets(ctx, nullptr); } @@ -1967,10 +1935,6 @@ ssl_extract_certificate(const matcher_line *line_info, ssl_user_config &sslMultC sslMultCertSettings.session_ticket_enabled = atoi(value); } - if (strcasecmp(label, SSL_SESSION_TICKET_KEY_FILE_TAG) == 0) { - sslMultCertSettings.ticket_key_filename = ats_strdup(value); - } - if (strcasecmp(label, SSL_KEY_DIALOG) == 0) { sslMultCertSettings.dialog = ats_strdup(value); } @@ -2089,7 +2053,7 @@ ssl_callback_session_ticket(SSL *ssl, unsigned char *keyname, unsigned char *iv, int enc) { SSLCertificateConfig::scoped_config lookup; - SSLConfig::scoped_config params; + SSLTicketKeyConfig::scoped_config params; SSLNetVConnection *netvc = SSLNetVCAccess(ssl); // Get the IP address to look up the keyblock diff --git a/iocore/net/Socks.cc b/iocore/net/Socks.cc index 30e385510e3..331914a101a 100644 --- a/iocore/net/Socks.cc +++ b/iocore/net/Socks.cc @@ -84,22 +84,25 @@ void SocksEntry::findServer() { nattempts++; + unsigned int fail_threshold = server_params->policy.FailThreshold; + unsigned int retry_time = server_params->policy.ParentRetryTime; #ifdef SOCKS_WITH_TS if (nattempts == 1) { ink_assert(server_result.result == PARENT_UNDEFINED); - server_params->findParent(&req_data, &server_result); + server_params->findParent(&req_data, &server_result, fail_threshold, retry_time); } else { socks_conf_struct *conf = netProcessor.socks_conf_stuff; if ((nattempts - 1) % conf->per_server_connection_attempts) return; // attempt again - server_params->markParentDown(&server_result); + server_params->markParentDown(&server_result, fail_threshold, retry_time); - if (nattempts > conf->connection_attempts) + if (nattempts > conf->connection_attempts) { server_result.result = PARENT_FAIL; - else - server_params->nextParent(&req_data, &server_result); + } else { + server_params->nextParent(&req_data, &server_result, fail_threshold, retry_time); + } } switch (server_result.result) { @@ -288,7 +291,7 @@ SocksEntry::mainEvent(int event, void *data) timeout = this_ethread()->schedule_in(this, HRTIME_SECONDS(netProcessor.socks_conf_stuff->socks_timeout)); } - netVConnection->do_io_write(this, n_bytes, reader, 0); + netVConnection->do_io_write(this, n_bytes, reader, false); // Debug("Socks", "Sent the request to the SOCKS server"); ret = EVENT_CONT; diff --git a/iocore/net/UnixConnection.cc b/iocore/net/UnixConnection.cc index 742f150de18..783bf510622 100644 --- a/iocore/net/UnixConnection.cc +++ b/iocore/net/UnixConnection.cc @@ -177,8 +177,8 @@ namespace @endcode */ template struct cleaner { - T *obj; ///< Object instance. - typedef void (T::*method)(); ///< Method signature. + T *obj; ///< Object instance. + using method = void (T::*)(); ///< Method signature. method m; cleaner(T *_obj, method _method) : obj(_obj), m(_method) {} diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc index 85d7527ec9e..7b50136be7f 100644 --- a/iocore/net/UnixNet.cc +++ b/iocore/net/UnixNet.cc @@ -243,7 +243,7 @@ initialize_thread_for_net(EThread *thread) thread->ep = (EventIO *)ats_malloc(sizeof(EventIO)); thread->ep->type = EVENTIO_ASYNC_SIGNAL; #if HAVE_EVENTFD - thread->ep->start(pd, thread->evfd, 0, EVENTIO_READ); + thread->ep->start(pd, thread->evfd, nullptr, EVENTIO_READ); #else thread->ep->start(pd, thread->evpipe[0], nullptr, EVENTIO_READ); #endif @@ -450,13 +450,8 @@ NetHandler::mainNetEvent(int event, Event *e) if (cop_list.in(vc)) { cop_list.remove(vc); } - if (get_ev_events(pd, x) & EVENTIO_READ) { + if (get_ev_events(pd, x) & (EVENTIO_READ | EVENTIO_ERROR)) { vc->read.triggered = 1; - if (get_ev_events(pd, x) & EVENTIO_ERROR) { - vc->read.error = 1; - } else { - vc->read.error = 0; - } if (!read_ready_list.in(vc)) { read_ready_list.enqueue(vc); } else if (get_ev_events(pd, x) & EVENTIO_ERROR) { @@ -466,13 +461,8 @@ NetHandler::mainNetEvent(int event, Event *e) } } vc = epd->data.vc; - if (get_ev_events(pd, x) & EVENTIO_WRITE) { + if (get_ev_events(pd, x) & (EVENTIO_WRITE | EVENTIO_ERROR)) { vc->write.triggered = 1; - if (get_ev_events(pd, x) & EVENTIO_ERROR) { - vc->write.error = 1; - } else { - vc->write.error = 0; - } if (!write_ready_list.in(vc)) { write_ready_list.enqueue(vc); } else if (get_ev_events(pd, x) & EVENTIO_ERROR) { @@ -505,7 +495,7 @@ NetHandler::mainNetEvent(int event, Event *e) set_cont_flags(vc->control_flags); if (vc->closed) close_UnixNetVConnection(vc, trigger_event->ethread); - else if ((vc->read.enabled || vc->read.error) && vc->read.triggered) + else if (vc->read.enabled && vc->read.triggered) vc->net_read_io(this, trigger_event->ethread); else if (!vc->read.enabled) { read_ready_list.remove(vc); @@ -522,7 +512,7 @@ NetHandler::mainNetEvent(int event, Event *e) set_cont_flags(vc->control_flags); if (vc->closed) close_UnixNetVConnection(vc, trigger_event->ethread); - else if ((vc->write.enabled || vc->write.error) && vc->write.triggered) + else if (vc->write.enabled && vc->write.triggered) write_to_net(this, vc, trigger_event->ethread); else if (!vc->write.enabled) { write_ready_list.remove(vc); @@ -540,7 +530,7 @@ NetHandler::mainNetEvent(int event, Event *e) diags->set_override(vc->control.debug_override); if (vc->closed) close_UnixNetVConnection(vc, trigger_event->ethread); - else if ((vc->read.enabled || vc->read.error) && vc->read.triggered) + else if (vc->read.enabled && vc->read.triggered) vc->net_read_io(this, trigger_event->ethread); else if (!vc->read.enabled) vc->ep.modify(-EVENTIO_READ); @@ -549,7 +539,7 @@ NetHandler::mainNetEvent(int event, Event *e) diags->set_override(vc->control.debug_override); if (vc->closed) close_UnixNetVConnection(vc, trigger_event->ethread); - else if ((vc->write.enabled || vc->write.error) && vc->write.triggered) + else if (vc->write.enabled && vc->write.triggered) write_to_net(this, vc, trigger_event->ethread); else if (!vc->write.enabled) vc->ep.modify(-EVENTIO_WRITE); diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc index d30ebe55e90..f0bc3c86ded 100644 --- a/iocore/net/UnixNetAccept.cc +++ b/iocore/net/UnixNetAccept.cc @@ -28,10 +28,11 @@ #endif #define ROUNDUP(x, y) ((((x) + ((y)-1)) / (y)) * (y)) -typedef int (NetAccept::*NetAcceptHandler)(int, void *); +using NetAcceptHandler = int (NetAccept::*)(int, void *); volatile int dummy_volatile = 0; int accept_till_done = 1; +std::vector naVec; static void safe_delay(int msec) { @@ -102,6 +103,12 @@ net_accept(NetAccept *na, void *ep, bool blockable) return count; } +NetAccept * +getNetAccept(int ID) +{ + return naVec.at(ID); +} + // // Initialize the NetAccept for execution in its own thread. // This should be done for low latency, high connection rate sockets. @@ -270,8 +277,10 @@ NetAccept::do_blocking_accept(EThread *t) vc->set_is_transparent(opt.f_inbound_transparent); vc->options.packet_mark = opt.packet_mark; vc->options.packet_tos = opt.packet_tos; + vc->options.ip_family = opt.ip_family; vc->apply_options(); vc->set_context(NET_VCONNECTION_IN); + vc->accept_object = this; SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::acceptEvent); // eventProcessor.schedule_imm(vc, getEtype()); eventProcessor.schedule_imm_signal(vc, opt.etype); @@ -413,6 +422,7 @@ NetAccept::acceptFastEvent(int event, void *ep) vc->set_is_transparent(opt.f_inbound_transparent); vc->options.packet_mark = opt.packet_mark; vc->options.packet_tos = opt.packet_tos; + vc->options.ip_family = opt.ip_family; vc->apply_options(); vc->set_context(NET_VCONNECTION_IN); SET_CONTINUATION_HANDLER(vc, (NetVConnHandler)&UnixNetVConnection::mainEvent); @@ -481,8 +491,7 @@ NetAccept::acceptLoopEvent(int event, Event *e) // // -NetAccept::NetAccept(const NetProcessor::AcceptOptions &_opt) - : Continuation(nullptr), period(0), accept_fn(nullptr), ifd(NO_FD), opt(_opt) +NetAccept::NetAccept(const NetProcessor::AcceptOptions &_opt) : Continuation(nullptr), opt(_opt) { } diff --git a/iocore/net/UnixNetPages.cc b/iocore/net/UnixNetPages.cc index a20e1a8b5d7..06a7a61d0aa 100644 --- a/iocore/net/UnixNetPages.cc +++ b/iocore/net/UnixNetPages.cc @@ -27,7 +27,7 @@ #include "I_Tasks.h" struct ShowNet; -typedef int (ShowNet::*ShowNetEventHandler)(int event, Event *data); +using ShowNetEventHandler = int (ShowNet::*)(int, Event *); struct ShowNet : public ShowCont { int ithread; IpEndpoint addr; diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc index 490d92e963a..905d268cef0 100644 --- a/iocore/net/UnixNetProcessor.cc +++ b/iocore/net/UnixNetProcessor.cc @@ -24,10 +24,13 @@ #include "P_Net.h" #include "ts/InkErrno.h" #include "ts/ink_sock.h" +#include "P_SSLNextProtocolAccept.h" // For Stat Pages #include "StatPages.h" +volatile int net_accept_number = 0; +extern std::vector naVec; NetProcessor::AcceptOptions const NetProcessor::DEFAULT_ACCEPT_OPTIONS; NetProcessor::AcceptOptions & @@ -91,6 +94,8 @@ UnixNetProcessor::accept_internal(Continuation *cont, int fd, AcceptOptions cons char thr_name[MAX_THREAD_NAME_LENGTH]; NetAccept *na = createNetAccept(opt); + na->id = ink_atomic_increment(&net_accept_number, 1); + Debug("iocore_net_accept", "creating new net accept number %d", na->id); // Fill in accept thread from configuration if necessary. if (opt.accept_threads < 0) { @@ -125,6 +130,10 @@ UnixNetProcessor::accept_internal(Continuation *cont, int fd, AcceptOptions cons if (should_filter_int > 0 && opt.etype == ET_NET) na->server.http_accept_filter = true; + SessionAccept *sa = dynamic_cast(cont); + na->proxyPort = sa ? sa->proxyPort : nullptr; + na->snpa = dynamic_cast(cont); + na->action_ = new NetAcceptAction(); *na->action_ = cont; na->action_->server = &na->server; @@ -138,7 +147,6 @@ UnixNetProcessor::accept_internal(Continuation *cont, int fd, AcceptOptions cons if (0 == na->do_listen(BLOCKING)) { for (int i = 1; i < accept_threads; ++i) { NetAccept *a = na->clone(); - snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[ACCEPT %d:%d]", i - 1, ats_ip_port_host_order(&accept_ip)); a->init_accept_loop(thr_name); Debug("iocore_net_accept", "Created accept thread #%d for port %d", i, ats_ip_port_host_order(&accept_ip)); @@ -164,7 +172,7 @@ UnixNetProcessor::accept_internal(Continuation *cont, int fd, AcceptOptions cons } else { na->init_accept(nullptr); } - + naVec.push_back(na); #ifdef TCP_DEFER_ACCEPT // set tcp defer accept timeout if it is configured, this will not trigger an accept until there is // data on the socket ready to be read @@ -258,7 +266,12 @@ UnixNetProcessor::connect_re_internal(Continuation *cont, sockaddr const *target } } } - t->schedule_imm(vc); + // Try to stay on the current thread if it is the right type + if (t->is_event_type(opt->etype)) { + t->schedule_imm(vc); + } else { // Otherwise, pass along to another thread of the right type + eventProcessor.schedule_imm(vc, opt->etype); + } if (using_socks) { return &socksEntry->action_; } else @@ -366,7 +379,7 @@ struct CheckConnect : public Continuation { reader = buf->alloc_reader(); } - ~CheckConnect() + ~CheckConnect() override { buf->dealloc_all_readers(); buf->clear(); diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc index f96dadc21e4..73f162bf0b8 100644 --- a/iocore/net/UnixNetVConnection.cc +++ b/iocore/net/UnixNetVConnection.cc @@ -263,19 +263,6 @@ read_from_net(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) return; } - if (!s->enabled && vc->read.error) { - int err = 0, errlen = sizeof(int); - if (getsockopt(vc->con.fd, SOL_SOCKET, SO_ERROR, &err, (socklen_t *)&errlen) == -1) { - err = errno; - } - - // if it is a non-temporary error, we should die appropriately - if (err && err != EAGAIN && err != EINTR) { - read_signal_error(nh, vc, err); - return; - } - } - // if it is not enabled. if (!s->enabled || s->vio.op != VIO::READ) { read_disable(nh, vc); @@ -443,18 +430,6 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) return; } - if (!s->enabled && vc->write.error) { - int err = 0, errlen = sizeof(int); - if (getsockopt(vc->con.fd, SOL_SOCKET, SO_ERROR, &err, (socklen_t *)&errlen) == -1) { - err = errno; - } - - if (err && err != EAGAIN && err != EINTR) { - write_signal_error(nh, vc, err); - return; - } - } - // This function will always return true unless // vc is an SSLNetVConnection. if (!vc->getSSLHandShakeComplete()) { @@ -548,6 +523,7 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) if (total_written > 0) { NET_SUM_DYN_STAT(net_write_bytes_stat, total_written); s->vio.ndone += total_written; + net_activity(vc, thread); } // A write of 0 makes no sense since we tried to write more than 0. @@ -583,8 +559,6 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) vc->write_buffer_empty_event = 0; } - net_activity(vc, thread); - // If there are no more bytes to write, signal write complete, ink_assert(ntodo >= 0); if (s->vio.ntodo() <= 0) { @@ -656,7 +630,7 @@ UnixNetVConnection::get_data(int id, void *data) } } -const int64_t +int64_t UnixNetVConnection::outstanding() { int n; @@ -755,6 +729,7 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) read.enabled = 0; read.vio.buffer.clear(); read.vio.nbytes = 0; + read.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_READ; break; case IO_SHUTDOWN_WRITE: @@ -762,6 +737,7 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) write.enabled = 0; write.vio.buffer.clear(); write.vio.nbytes = 0; + write.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_WRITE; break; case IO_SHUTDOWN_READWRITE: @@ -772,6 +748,8 @@ UnixNetVConnection::do_io_shutdown(ShutdownHowTo_t howto) read.vio.nbytes = 0; write.vio.buffer.clear(); write.vio.nbytes = 0; + read.vio._cont = nullptr; + write.vio._cont = nullptr; f.shutdown = NET_VC_SHUTDOWN_READ | NET_VC_SHUTDOWN_WRITE; break; default: @@ -949,6 +927,7 @@ UnixNetVConnection::UnixNetVConnection() submit_time(0), oob_ptr(nullptr), from_accept_thread(false), + accept_object(nullptr), origin_trace(false), origin_trace_addr(nullptr), origin_trace_port(0) @@ -1102,12 +1081,6 @@ UnixNetVConnection::readSignalError(NetHandler *nh, int err) read_signal_error(nh, this, err); } -void -UnixNetVConnection::writeSignalError(NetHandler *nh, int err) -{ - write_signal_error(nh, this, err); -} - int UnixNetVConnection::readSignalDone(int event, NetHandler *nh) { @@ -1261,6 +1234,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) // ink_assert(next_inactivity_timeout_at < Thread::get_hrtime()); if (!inactivity_timeout_in || next_inactivity_timeout_at > Thread::get_hrtime()) return EVENT_CONT; + Warning("next_inactivity %" PRId64 " current time %" PRId64, next_inactivity_timeout_at, Thread::get_hrtime()); signal_event = VC_EVENT_INACTIVITY_TIMEOUT; signal_timeout_at = &next_inactivity_timeout_at; } else { @@ -1424,8 +1398,8 @@ UnixNetVConnection::free(EThread *t) // clear variables for reuse this->mutex.clear(); action_.mutex.clear(); - got_remote_addr = 0; - got_local_addr = 0; + got_remote_addr = false; + got_local_addr = false; attributes = 0; read.vio.mutex.clear(); write.vio.mutex.clear(); @@ -1495,9 +1469,10 @@ UnixNetVConnection::set_inactivity_timeout(ink_hrtime timeout_in) #else if (timeout_in == 0) { // set default inactivity timeout - inactivity_timeout_in = timeout_in = HRTIME_SECONDS(nh->default_inactivity_timeout); + timeout_in = HRTIME_SECONDS(nh->default_inactivity_timeout); } - next_inactivity_timeout_at = Thread::get_hrtime() + timeout_in; + inactivity_timeout_in = timeout_in; + next_inactivity_timeout_at = Thread::get_hrtime() + inactivity_timeout_in; #endif } diff --git a/iocore/net/UnixUDPNet.cc b/iocore/net/UnixUDPNet.cc index b05023a7159..da5a8780bb6 100644 --- a/iocore/net/UnixUDPNet.cc +++ b/iocore/net/UnixUDPNet.cc @@ -32,7 +32,7 @@ #include "P_Net.h" #include "P_UDPNet.h" -typedef int (UDPNetHandler::*UDPNetContHandler)(int, void *); +using UDPNetContHandler = int (UDPNetHandler::*)(int, void *); inkcoreapi ClassAllocator udpPacketAllocator("udpPacketAllocator"); EventType ET_UDP; @@ -176,8 +176,8 @@ class UDPReadContinuation : public Continuation public: UDPReadContinuation(Event *completionToken); UDPReadContinuation(); - ~UDPReadContinuation(); - inline void free(void); + ~UDPReadContinuation() override; + inline void free(); inline void init_token(Event *completionToken); inline void init_read(int fd, IOBufferBlock *buf, int len, struct sockaddr *fromaddr, socklen_t *fromaddrlen); @@ -249,7 +249,7 @@ UDPReadContinuation::UDPReadContinuation() } inline void -UDPReadContinuation::free(void) +UDPReadContinuation::free() { ink_assert(event != nullptr); completionUtil::destroy(event); @@ -776,7 +776,7 @@ UDPQueue::SendUDPPacket(UDPPacketInternal *p, int32_t /* pktLen ATS_UNUSED */) msg.msg_iovlen = iov_len; count = 0; - while (1) { + while (true) { // stupid Linux problem: sendmsg can return EAGAIN n = ::sendmsg(p->conn->getFd(), &msg, 0); if ((n >= 0) || ((n < 0) && (errno != EAGAIN))) diff --git a/iocore/utils/OneWayTunnel.cc b/iocore/utils/OneWayTunnel.cc index d35c45d7586..ff6c0113708 100644 --- a/iocore/utils/OneWayTunnel.cc +++ b/iocore/utils/OneWayTunnel.cc @@ -70,10 +70,10 @@ OneWayTunnel::OneWayTunnel() manipulate_fn(nullptr), n_connections(0), lerrno(0), - single_buffer(0), - close_source(0), - close_target(0), - tunnel_till_done(0), + single_buffer(false), + close_source(false), + close_target(false), + tunnel_till_done(false), tunnel_peer(nullptr), free_vcs(true) { @@ -159,7 +159,7 @@ OneWayTunnel::init(VConnection *vcSource, VConnection *vcTarget, Continuation *a SCOPED_MUTEX_LOCK(lock, mutex, this_ethread()); vioSource = vcSource->do_io_read(this, nbytes, buf1); - vioTarget = vcTarget->do_io_write(this, nbytes, buf2->alloc_reader(), 0); + vioTarget = vcTarget->do_io_write(this, nbytes, buf2->alloc_reader(), false); ink_assert(vioSource && vioTarget); return; @@ -189,7 +189,7 @@ OneWayTunnel::init(VConnection *vcSource, VConnection *vcTarget, Continuation *a SCOPED_MUTEX_LOCK(lock, mutex, this_ethread()); vioSource = SourceVio; - vioTarget = vcTarget->do_io_write(this, TUNNEL_TILL_DONE, reader, 0); + vioTarget = vcTarget->do_io_write(this, TUNNEL_TILL_DONE, reader, false); ink_assert(vioSource && vioTarget); } diff --git a/lib/bindings/bindings.cc b/lib/bindings/bindings.cc index 1bc25473dbc..a50e9512e5b 100644 --- a/lib/bindings/bindings.cc +++ b/lib/bindings/bindings.cc @@ -227,7 +227,19 @@ BindingInstance::construct() luaL_openlibs(this->lua); // Push a pointer to ourself into the well-known registry key. - lua_pushlightuserdata(this->lua, this); + + // We do not use lightuserdata here because BindingInstance variables + // are often declared on stack which would make "this" a stack variable. + // While this might seem fine and actually work on many platforms, those + // 64bit platforms with split VA space where heap and stack may live in + // a separate 47bit VA will violate internal assumptions that luajit + // places on lightuserdata. Plain userdata will provide luajit-happy + // address in which we have the full 64bits to store our pointer to this. + // see: https://www.circonus.com/2016/07/luajit-illumos-vm/ + + BindingInstance **lua_surrogate; + lua_surrogate = (BindingInstance **)lua_newuserdata(this->lua, sizeof(BindingInstance *)); + *lua_surrogate = this; lua_setfield(this->lua, LUA_REGISTRYINDEX, selfkey); ink_release_assert(BindingInstance::self(this->lua) == this); @@ -253,7 +265,7 @@ BindingInstance::require(const char *path) bool BindingInstance::eval(const char *chunk) { - ink_release_assert(this->lua != NULL); + ink_release_assert(this->lua != nullptr); if (luaL_dostring(this->lua, chunk) != 0) { const char *w = lua_tostring(this->lua, -1); @@ -268,16 +280,17 @@ BindingInstance::eval(const char *chunk) BindingInstance * BindingInstance::self(lua_State *lua) { - BindingInstance *binding; + BindingInstance **binding; lua_getfield(lua, LUA_REGISTRYINDEX, selfkey); - binding = (BindingInstance *)lua_touserdata(lua, -1); + binding = (BindingInstance **)lua_touserdata(lua, -1); ink_release_assert(binding != nullptr); - ink_release_assert(binding->lua == lua); + ink_release_assert(*binding != nullptr); + ink_release_assert((*binding)->lua == lua); lua_pop(lua, 1); - return binding; + return *binding; } void diff --git a/lib/bindings/metrics.cc b/lib/bindings/metrics.cc index 7d5df843442..ae2a3a5194f 100644 --- a/lib/bindings/metrics.cc +++ b/lib/bindings/metrics.cc @@ -169,8 +169,8 @@ metrics_gc(lua_State *L) metrics_binding *m = metrics_binding::check(L, 1); // Clean up any references we stashed. - for (metrics_binding::ref_map::iterator ptr = m->refs.begin(); ptr != m->refs.end(); ++ptr) { - luaL_unref(L, LUA_REGISTRYINDEX, ptr->second); + for (auto &ref : m->refs) { + luaL_unref(L, LUA_REGISTRYINDEX, ref.second); } m->~metrics_binding(); diff --git a/lib/bindings/repl.cc b/lib/bindings/repl.cc index c6b73b58461..8175a74b2be 100644 --- a/lib/bindings/repl.cc +++ b/lib/bindings/repl.cc @@ -23,7 +23,7 @@ #include "ink_autoconf.h" #include "bindings.h" -#include +#include #if HAVE_READLINE_H #include diff --git a/lib/cppapi/AsyncHttpFetch.cc b/lib/cppapi/AsyncHttpFetch.cc index 6985a68c517..82f9ffa1f2b 100644 --- a/lib/cppapi/AsyncHttpFetch.cc +++ b/lib/cppapi/AsyncHttpFetch.cc @@ -30,6 +30,7 @@ #include #include +#include using namespace atscppapi; using std::string; @@ -54,7 +55,7 @@ struct atscppapi::AsyncHttpFetchState : noncopyable { AsyncHttpFetchState(const string &url_str, HttpMethod http_method, string request_body, AsyncHttpFetch::StreamingFlag streaming_flag) - : request_body_(request_body), + : request_body_(std::move(request_body)), result_(AsyncHttpFetch::RESULT_FAILURE), body_(nullptr), body_size_(0), diff --git a/lib/cppapi/Headers.cc b/lib/cppapi/Headers.cc index 805d218440e..c1e5b74657c 100644 --- a/lib/cppapi/Headers.cc +++ b/lib/cppapi/Headers.cc @@ -261,7 +261,7 @@ HeaderField::values(const char join) } std::string -Headers::value(const std::string key, size_type index /* = 0 */) +Headers::value(const std::string &key, size_type index /* = 0 */) { header_field_iterator iter = find(key); if (iter == end()) { diff --git a/lib/cppapi/Plugin.cc b/lib/cppapi/Plugin.cc index 118862272e2..3f7f9da26b4 100644 --- a/lib/cppapi/Plugin.cc +++ b/lib/cppapi/Plugin.cc @@ -29,7 +29,7 @@ const std::string atscppapi::HOOK_TYPE_STRINGS[] = { std::string("HOOK_CACHE_LOOKUP_COMPLETE"), std::string("HOOK_SELECT_ALT")}; void -atscppapi::RegisterGlobalPlugin(std::string name, std::string vendor, std::string email) +atscppapi::RegisterGlobalPlugin(const std::string &name, const std::string &vendor, const std::string &email) { TSPluginRegistrationInfo info; info.plugin_name = const_cast(name.c_str()); diff --git a/lib/cppapi/RemapPlugin.cc b/lib/cppapi/RemapPlugin.cc index ce582004809..4c1e1f12511 100644 --- a/lib/cppapi/RemapPlugin.cc +++ b/lib/cppapi/RemapPlugin.cc @@ -22,7 +22,7 @@ #include "atscppapi/RemapPlugin.h" #include "logging_internal.h" #include "utils_internal.h" -#include +#include #include using namespace atscppapi; diff --git a/lib/cppapi/Stat.cc b/lib/cppapi/Stat.cc index ab36c44fdc9..e3980306ceb 100644 --- a/lib/cppapi/Stat.cc +++ b/lib/cppapi/Stat.cc @@ -21,7 +21,7 @@ #include "atscppapi/Stat.h" #include -#include +#include #include #include "logging_internal.h" @@ -40,7 +40,7 @@ Stat::~Stat() } bool -Stat::init(string name, Stat::SyncType type, bool persistent) +Stat::init(const string &name, Stat::SyncType type, bool persistent) { if (TSStatFindName(name.c_str(), &stat_id_) == TS_SUCCESS) { LOG_DEBUG("Attached to stat '%s' with stat_id = %d", name.c_str(), stat_id_); diff --git a/lib/cppapi/Transaction.cc b/lib/cppapi/Transaction.cc index 66bc8172977..b7a2703a902 100644 --- a/lib/cppapi/Transaction.cc +++ b/lib/cppapi/Transaction.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include "atscppapi/Transaction.h" #include "ts/ink_memory.h" @@ -241,7 +242,7 @@ Transaction::getContextValue(const std::string &key) void Transaction::setContextValue(const std::string &key, std::shared_ptr value) { - state_->context_values_[key] = value; + state_->context_values_[key] = std::move(value); } ClientRequest & @@ -414,7 +415,7 @@ namespace class initializeHandles { public: - typedef TSReturnCode (*GetterFunction)(TSHttpTxn, TSMBuffer *, TSMLoc *); + using GetterFunction = TSReturnCode (*)(TSHttpTxn, TSMBuffer *, TSMLoc *); initializeHandles(GetterFunction getter) : getter_(getter) {} bool operator()(TSHttpTxn txn, TSMBuffer &hdr_buf, TSMLoc &hdr_loc, const char *handles_name) diff --git a/lib/cppapi/include/atscppapi/Headers.h b/lib/cppapi/include/atscppapi/Headers.h index 0090335dee0..af1e12d9636 100644 --- a/lib/cppapi/include/atscppapi/Headers.h +++ b/lib/cppapi/include/atscppapi/Headers.h @@ -536,7 +536,7 @@ class Headers : noncopyable * @param position of value * @return value */ - std::string value(const std::string key, size_type index = 0); + std::string value(const std::string &key, size_type index = 0); /** * Returns an iterator to the first HeaderField with the name key. diff --git a/lib/cppapi/include/atscppapi/Plugin.h b/lib/cppapi/include/atscppapi/Plugin.h index bf35f7aab6e..5624f13f94c 100644 --- a/lib/cppapi/include/atscppapi/Plugin.h +++ b/lib/cppapi/include/atscppapi/Plugin.h @@ -166,7 +166,7 @@ class Plugin : noncopyable /**< Human readable strings for each HookType, you can access them as HOOK_TYPE_STRINGS[HOOK_OS_DNS] for example. */ extern const std::string HOOK_TYPE_STRINGS[]; -void RegisterGlobalPlugin(std::string name, std::string vendor, std::string email); +void RegisterGlobalPlugin(const std::string &name, const std::string &vendor, const std::string &email); } /* atscppapi */ diff --git a/lib/cppapi/include/atscppapi/Stat.h b/lib/cppapi/include/atscppapi/Stat.h index 9f308ebf9b0..46246aa8e48 100644 --- a/lib/cppapi/include/atscppapi/Stat.h +++ b/lib/cppapi/include/atscppapi/Stat.h @@ -73,7 +73,7 @@ class Stat : noncopyable * * @see SyncType */ - bool init(std::string name, Stat::SyncType type = SYNC_COUNT, bool persistent = false); + bool init(const std::string &name, Stat::SyncType type = SYNC_COUNT, bool persistent = false); /** * This method allows you to increment a stat by a certain amount. diff --git a/lib/cppapi/utils_internal.cc b/lib/cppapi/utils_internal.cc index ed3740055c3..f930303d285 100644 --- a/lib/cppapi/utils_internal.cc +++ b/lib/cppapi/utils_internal.cc @@ -76,12 +76,12 @@ handleTransactionEvents(TSCont cont, TSEvent event, void *edata) case TS_EVENT_HTTP_TXN_CLOSE: { // opening scope to declare plugins variable below resetTransactionHandles(transaction, event); const std::list &plugins = utils::internal::getTransactionPlugins(transaction); - for (std::list::const_iterator iter = plugins.begin(), end = plugins.end(); iter != end; ++iter) { - std::shared_ptr trans_mutex = utils::internal::getTransactionPluginMutex(**iter); - LOG_DEBUG("Locking TransacitonPlugin mutex to delete transaction plugin at %p", *iter); + for (auto plugin : plugins) { + std::shared_ptr trans_mutex = utils::internal::getTransactionPluginMutex(*plugin); + LOG_DEBUG("Locking TransacitonPlugin mutex to delete transaction plugin at %p", plugin); trans_mutex->lock(); - LOG_DEBUG("Locked Mutex...Deleting transaction plugin at %p", *iter); - delete *iter; + LOG_DEBUG("Locked Mutex...Deleting transaction plugin at %p", plugin); + delete plugin; trans_mutex->unlock(); } delete &transaction; diff --git a/lib/perl/lib/Apache/TS/AdminClient.pm b/lib/perl/lib/Apache/TS/AdminClient.pm index aa48d9605ed..b236b8038b6 100644 --- a/lib/perl/lib/Apache/TS/AdminClient.pm +++ b/lib/perl/lib/Apache/TS/AdminClient.pm @@ -184,6 +184,16 @@ sub _do_read { while ($self->{_select}->can_read($timeout)) { my $rc = $self->{_socket}->sysread($res, 1024, length($res)); + + # If the server dies we get into a infinite loop because + # IO::Select::can_read keeps returning true + # In this condition sysread returns 0 or undef + # Also, we want to return an undef rather than a partial response + # to avoid unmarshalling errors in the callers + if (!defined($rc) || ($rc == 0)) { + $res = undef; + last; + } } return $res || undef; diff --git a/lib/records/P_RecCore.cc b/lib/records/P_RecCore.cc index d796d70614e..4edea1baff5 100644 --- a/lib/records/P_RecCore.cc +++ b/lib/records/P_RecCore.cc @@ -735,7 +735,7 @@ RecSyncConfigToTB(textBuffer *tb, bool *inc_version) if (r->data.rec_string) { tb->copyFrom(r->data.rec_string, strlen(r->data.rec_string)); } else { - tb->copyFrom("nullptr", strlen("NULL")); + tb->copyFrom("NULL", strlen("NULL")); } break; case RECD_COUNTER: @@ -994,7 +994,7 @@ RecWriteConfigFile(textBuffer *tb) } result = REC_ERR_OKAY; - } while (0); + } while (false); if (h_file != REC_HANDLE_INVALID) { RecFileClose(h_file); diff --git a/lib/records/RecConfigParse.cc b/lib/records/RecConfigParse.cc index 1c321183fb8..800ec95c57a 100644 --- a/lib/records/RecConfigParse.cc +++ b/lib/records/RecConfigParse.cc @@ -44,7 +44,7 @@ ink_mutex g_rec_config_lock; // RecConfigFileInit //------------------------------------------------------------------------- void -RecConfigFileInit(void) +RecConfigFileInit() { ink_mutex_init(&g_rec_config_lock, nullptr); g_rec_config_contents_llq = create_queue(); diff --git a/lib/records/RecCore.cc b/lib/records/RecCore.cc index 5dbf5188079..71e5fda16fd 100644 --- a/lib/records/RecCore.cc +++ b/lib/records/RecCore.cc @@ -996,7 +996,7 @@ debug_record_callback(RecT /* rec_type */, void * /* edata */, int registered, c RecDebug(DL_Note, " ([%d] '%s', '%f')", registered, name, datum->rec_float); break; case RECD_STRING: - RecDebug(DL_Note, " ([%d] '%s', '%s')", registered, name, datum->rec_string ? datum->rec_string : "nullptr"); + RecDebug(DL_Note, " ([%d] '%s', '%s')", registered, name, datum->rec_string ? datum->rec_string : "NULL"); break; case RECD_COUNTER: RecDebug(DL_Note, " ([%d] '%s', '%" PRId64 "')", registered, name, datum->rec_counter); diff --git a/lib/records/RecHttp.cc b/lib/records/RecHttp.cc index 2076f5ac9d1..29ea1fa480b 100644 --- a/lib/records/RecHttp.cc +++ b/lib/records/RecHttp.cc @@ -26,7 +26,9 @@ #include #include #include +#include #include +#include SessionProtocolNameRegistry globalSessionProtocolNameRegistry; @@ -34,10 +36,10 @@ SessionProtocolNameRegistry globalSessionProtocolNameRegistry; These are also used for NPN setup. */ -const char *const TS_ALPN_PROTOCOL_HTTP_0_9 = "http/0.9"; -const char *const TS_ALPN_PROTOCOL_HTTP_1_0 = "http/1.0"; -const char *const TS_ALPN_PROTOCOL_HTTP_1_1 = "http/1.1"; -const char *const TS_ALPN_PROTOCOL_HTTP_2_0 = "h2"; // HTTP/2 over TLS +const char *const TS_ALPN_PROTOCOL_HTTP_0_9 = IP_PROTO_TAG_HTTP_0_9.ptr(); +const char *const TS_ALPN_PROTOCOL_HTTP_1_0 = IP_PROTO_TAG_HTTP_1_0.ptr(); +const char *const TS_ALPN_PROTOCOL_HTTP_1_1 = IP_PROTO_TAG_HTTP_1_1.ptr(); +const char *const TS_ALPN_PROTOCOL_HTTP_2_0 = IP_PROTO_TAG_HTTP_2_0.ptr(); const char *const TS_ALPN_PROTOCOL_GROUP_HTTP = "http"; const char *const TS_ALPN_PROTOCOL_GROUP_HTTP2 = "http2"; @@ -45,14 +47,14 @@ const char *const TS_ALPN_PROTOCOL_GROUP_HTTP2 = "http2"; const char *const TS_PROTO_TAG_HTTP_1_0 = TS_ALPN_PROTOCOL_HTTP_1_0; const char *const TS_PROTO_TAG_HTTP_1_1 = TS_ALPN_PROTOCOL_HTTP_1_1; const char *const TS_PROTO_TAG_HTTP_2_0 = TS_ALPN_PROTOCOL_HTTP_2_0; -const char *const TS_PROTO_TAG_TLS_1_3 = "tls/1.3"; -const char *const TS_PROTO_TAG_TLS_1_2 = "tls/1.2"; -const char *const TS_PROTO_TAG_TLS_1_1 = "tls/1.1"; -const char *const TS_PROTO_TAG_TLS_1_0 = "tls/1.0"; -const char *const TS_PROTO_TAG_TCP = "tcp"; -const char *const TS_PROTO_TAG_UDP = "udp"; -const char *const TS_PROTO_TAG_IPV4 = "ipv4"; -const char *const TS_PROTO_TAG_IPV6 = "ipv6"; +const char *const TS_PROTO_TAG_TLS_1_3 = IP_PROTO_TAG_TLS_1_3.ptr(); +const char *const TS_PROTO_TAG_TLS_1_2 = IP_PROTO_TAG_TLS_1_2.ptr(); +const char *const TS_PROTO_TAG_TLS_1_1 = IP_PROTO_TAG_TLS_1_1.ptr(); +const char *const TS_PROTO_TAG_TLS_1_0 = IP_PROTO_TAG_TLS_1_0.ptr(); +const char *const TS_PROTO_TAG_TCP = IP_PROTO_TAG_TCP.ptr(); +const char *const TS_PROTO_TAG_UDP = IP_PROTO_TAG_UDP.ptr(); +const char *const TS_PROTO_TAG_IPV4 = IP_PROTO_TAG_IPV4.ptr(); +const char *const TS_PROTO_TAG_IPV6 = IP_PROTO_TAG_IPV6.ptr(); InkHashTable *TSProtoTags; @@ -386,7 +388,7 @@ HttpProxyPort::processOptions(const char *opts) if (in_ip_set_p && m_family != m_inbound_ip.family()) { Warning( "Invalid port descriptor '%s' - the inbound adddress family [%s] is not the same type as the explicit family value [%s]", - opts, ats_ip_family_name(m_inbound_ip.family()), ats_ip_family_name(m_family)); + opts, ats_ip_family_name(m_inbound_ip.family()).ptr(), ats_ip_family_name(m_family).ptr()); zret = false; } } else if (in_ip_set_p) { diff --git a/lib/records/RecLocal.cc b/lib/records/RecLocal.cc index 842ad6458af..abb80449799 100644 --- a/lib/records/RecLocal.cc +++ b/lib/records/RecLocal.cc @@ -65,7 +65,7 @@ sync_thr(void *data) textBuffer *tb = new textBuffer(65536); FileManager *configFiles = (FileManager *)data; - while (1) { + while (true) { bool inc_version; RecBool disabled = false; RecBool check = true; diff --git a/lib/records/RecProcess.cc b/lib/records/RecProcess.cc index 55899093562..c644739d706 100644 --- a/lib/records/RecProcess.cc +++ b/lib/records/RecProcess.cc @@ -179,7 +179,7 @@ struct sync_cont : public Continuation { m_tb = new textBuffer(65536); } - ~sync_cont() + ~sync_cont() override { if (m_tb != nullptr) { delete m_tb; @@ -289,7 +289,7 @@ RecProcessInitMessage(RecModeT mode_type) // RecProcessStart //------------------------------------------------------------------------- int -RecProcessStart(void) +RecProcessStart() { if (g_started) { return REC_ERR_OKAY; diff --git a/lib/records/RecUtils.cc b/lib/records/RecUtils.cc index b2807203999..77be0c6af66 100644 --- a/lib/records/RecUtils.cc +++ b/lib/records/RecUtils.cc @@ -419,7 +419,7 @@ RecDataSetFromString(RecDataT data_type, RecData *data_dst, const char *data_str data_src.rec_float = atof(data_string); break; case RECD_STRING: - if (data_string && strcmp((data_string), "nullptr") == 0) { + if (data_string && (strlen(data_string) == 4) && strncmp((data_string), "NULL", 4) == 0) { data_src.rec_string = nullptr; } else { // It's OK to cast away the const here, because RecDataSet will copy the string. diff --git a/lib/ts/Arena.cc b/lib/ts/Arena.cc index 8b03300b66b..daa52b4f61f 100644 --- a/lib/ts/Arena.cc +++ b/lib/ts/Arena.cc @@ -25,8 +25,8 @@ #include "ts/ink_memory.h" #include "ts/Allocator.h" #include "ts/Arena.h" -#include -#include +#include +#include #define DEFAULT_ALLOC_SIZE 1024 #define DEFAULT_BLOCK_SIZE (DEFAULT_ALLOC_SIZE - (sizeof(ArenaBlock) - 8)) diff --git a/lib/ts/BaseLogFile.cc b/lib/ts/BaseLogFile.cc index 062eb63c35e..bd4d2d61c0a 100644 --- a/lib/ts/BaseLogFile.cc +++ b/lib/ts/BaseLogFile.cc @@ -238,7 +238,7 @@ BaseLogFile::roll(long interval_start, long interval_end) int BaseLogFile::roll() { - long start; + time_t start; time_t now = time(nullptr); if (!m_meta_info || !m_meta_info->get_creation_time(&start)) diff --git a/lib/ts/CompileParseRules.cc b/lib/ts/CompileParseRules.cc index 52c12c8eebe..1a1a5c06b8e 100644 --- a/lib/ts/CompileParseRules.cc +++ b/lib/ts/CompileParseRules.cc @@ -33,8 +33,8 @@ unsigned int tparseRulesCType[256]; char tparseRulesCTypeToUpper[256]; char tparseRulesCTypeToLower[256]; -#include -#include +#include +#include #include "ts/ink_string.h" static char * diff --git a/lib/ts/Diags.cc b/lib/ts/Diags.cc index da8c5f2ebff..18d12360777 100644 --- a/lib/ts/Diags.cc +++ b/lib/ts/Diags.cc @@ -527,9 +527,9 @@ Diags::dump(FILE *fp) const fprintf(fp, "Diags:\n"); fprintf(fp, " debug.enabled: %d\n", config.enabled[DiagsTagType_Debug]); - fprintf(fp, " debug default tags: '%s'\n", (base_debug_tags ? base_debug_tags : "nullptr")); + fprintf(fp, " debug default tags: '%s'\n", (base_debug_tags ? base_debug_tags : "NULL")); fprintf(fp, " action.enabled: %d\n", config.enabled[DiagsTagType_Action]); - fprintf(fp, " action default tags: '%s'\n", (base_action_tags ? base_action_tags : "nullptr")); + fprintf(fp, " action default tags: '%s'\n", (base_action_tags ? base_action_tags : "NULL")); fprintf(fp, " outputs:\n"); for (i = 0; i < DiagsLevel_Count; i++) { fprintf(fp, " %10s [stdout=%d, stderr=%d, syslog=%d, diagslog=%d]\n", level_name((DiagsLevel)i), config.outputs[i].to_stdout, diff --git a/lib/ts/EventNotify.cc b/lib/ts/EventNotify.cc index df034dd9567..c26c4651eea 100644 --- a/lib/ts/EventNotify.cc +++ b/lib/ts/EventNotify.cc @@ -61,7 +61,7 @@ EventNotify::EventNotify() } void -EventNotify::signal(void) +EventNotify::signal() { #ifdef HAVE_EVENTFD uint64_t value = 1; @@ -77,7 +77,7 @@ EventNotify::signal(void) } int -EventNotify::wait(void) +EventNotify::wait() { #ifdef HAVE_EVENTFD ssize_t nr, nr_fd; @@ -140,7 +140,7 @@ int EventNotify::timedwait(int timeout) // milliseconds } void -EventNotify::lock(void) +EventNotify::lock() { #ifdef HAVE_EVENTFD // do nothing @@ -150,7 +150,7 @@ EventNotify::lock(void) } bool -EventNotify::trylock(void) +EventNotify::trylock() { #ifdef HAVE_EVENTFD return true; @@ -160,7 +160,7 @@ EventNotify::trylock(void) } void -EventNotify::unlock(void) +EventNotify::unlock() { #ifdef HAVE_EVENTFD // do nothing diff --git a/lib/ts/HashFNV.cc b/lib/ts/HashFNV.cc index 6fca238eb35..4ddd346b634 100644 --- a/lib/ts/HashFNV.cc +++ b/lib/ts/HashFNV.cc @@ -13,46 +13,46 @@ static const uint32_t FNV_INIT_32 = 0x811c9dc5u; static const uint64_t FNV_INIT_64 = 0xcbf29ce484222325ull; // FNV-1a 64bit -ATSHash32FNV1a::ATSHash32FNV1a(void) +ATSHash32FNV1a::ATSHash32FNV1a() { this->clear(); } void -ATSHash32FNV1a::final(void) +ATSHash32FNV1a::final() { } uint32_t -ATSHash32FNV1a::get(void) const +ATSHash32FNV1a::get() const { return hval; } void -ATSHash32FNV1a::clear(void) +ATSHash32FNV1a::clear() { hval = FNV_INIT_32; } // FNV-1a 64bit -ATSHash64FNV1a::ATSHash64FNV1a(void) +ATSHash64FNV1a::ATSHash64FNV1a() { this->clear(); } void -ATSHash64FNV1a::final(void) +ATSHash64FNV1a::final() { } uint64_t -ATSHash64FNV1a::get(void) const +ATSHash64FNV1a::get() const { return hval; } void -ATSHash64FNV1a::clear(void) +ATSHash64FNV1a::clear() { hval = FNV_INIT_64; } diff --git a/lib/ts/HashMD5.cc b/lib/ts/HashMD5.cc index 1ebd950144c..8c658f7dab0 100644 --- a/lib/ts/HashMD5.cc +++ b/lib/ts/HashMD5.cc @@ -22,9 +22,9 @@ #include "ts/HashMD5.h" #include "ts/ink_assert.h" -ATSHashMD5::ATSHashMD5(void) : md_len(0), finalized(false) +ATSHashMD5::ATSHashMD5() : md_len(0), finalized(false) { - ctx = EVP_MD_CTX_create(); + ctx = EVP_MD_CTX_new(); int ret = EVP_DigestInit_ex(ctx, EVP_md5(), nullptr); ink_assert(ret == 1); } @@ -39,7 +39,7 @@ ATSHashMD5::update(const void *data, size_t len) } void -ATSHashMD5::final(void) +ATSHashMD5::final() { if (!finalized) { int ret = EVP_DigestFinal_ex(ctx, md_value, &md_len); @@ -49,7 +49,7 @@ ATSHashMD5::final(void) } const void * -ATSHashMD5::get(void) const +ATSHashMD5::get() const { if (finalized) { return (void *)md_value; @@ -59,17 +59,14 @@ ATSHashMD5::get(void) const } size_t -ATSHashMD5::size(void) const +ATSHashMD5::size() const { return EVP_MD_CTX_size(ctx); } void -ATSHashMD5::clear(void) +ATSHashMD5::clear() { -#if OPENSSL_VERSION_NUMBER < 0x10100000L -#define EVP_MD_CTX_reset(ctx) EVP_MD_CTX_cleanup((ctx)) -#endif int ret = EVP_MD_CTX_reset(ctx); ink_assert(ret == 1); ret = EVP_DigestInit_ex(ctx, EVP_md5(), nullptr); @@ -80,5 +77,5 @@ ATSHashMD5::clear(void) ATSHashMD5::~ATSHashMD5() { - EVP_MD_CTX_destroy(ctx); + EVP_MD_CTX_free(ctx); } diff --git a/lib/ts/HashSip.cc b/lib/ts/HashSip.cc index 714304fd59c..f2d36ecc729 100644 --- a/lib/ts/HashSip.cc +++ b/lib/ts/HashSip.cc @@ -33,7 +33,7 @@ Based off of implementation: x3 ^= x0; \ x2 = ROTL64(x2, 32); -ATSHash64Sip24::ATSHash64Sip24(void) +ATSHash64Sip24::ATSHash64Sip24() { k0 = 0; k1 = 0; @@ -96,7 +96,7 @@ ATSHash64Sip24::update(const void *data, size_t len) } void -ATSHash64Sip24::final(void) +ATSHash64Sip24::final() { uint64_t last7; int i; @@ -123,7 +123,7 @@ ATSHash64Sip24::final(void) } uint64_t -ATSHash64Sip24::get(void) const +ATSHash64Sip24::get() const { if (finalized) { return hfinal; @@ -133,7 +133,7 @@ ATSHash64Sip24::get(void) const } void -ATSHash64Sip24::clear(void) +ATSHash64Sip24::clear() { v0 = k0 ^ 0x736f6d6570736575ull; v1 = k1 ^ 0x646f72616e646f6dull; diff --git a/lib/ts/HostLookup.cc b/lib/ts/HostLookup.cc index 08e6d029030..69cf99ae313 100644 --- a/lib/ts/HostLookup.cc +++ b/lib/ts/HostLookup.cc @@ -155,7 +155,7 @@ hostcmp(const char *c1, const char *c2) } c1++; c2++; - } while (1); + } while (true); return 0; } @@ -366,7 +366,7 @@ charIndex::Insert(const char *match_data, HostBranch *toInsert) return; } - while (1) { + while (true) { index = asciiToTable[(unsigned char)(*match_data)]; // Check to see if our index into table is for an @@ -421,7 +421,7 @@ charIndex::Lookup(const char *match_data) return nullptr; } - while (1) { + while (true) { index = asciiToTable[(unsigned char)(*match_data)]; // Check to see if our index into table is for an @@ -498,7 +498,7 @@ charIndex::iter_next(charIndexIterState *s) index = s->cur_index; } - while (1) { + while (true) { // Check to see if we need to go back up a level if (index >= numLegalChars) { if (level <= 0) { @@ -554,7 +554,7 @@ charIndex::iter_next(charIndexIterState *s) // Since the only iter state is an index into the // array typedef it -typedef int hostArrayIterState; +using hostArrayIterState = int; class hostArray { @@ -1210,7 +1210,7 @@ HostLookup::MatchNext(HostLookupState *s, void **opaque_ptr) s->host_copy_next--; - while (1) { + while (true) { if (s->host_copy_next <= s->host_copy) { s->host_copy_next = s->host_copy; break; diff --git a/lib/ts/InkErrno.cc b/lib/ts/InkErrno.cc index ab788afa7b1..33301ceca93 100644 --- a/lib/ts/InkErrno.cc +++ b/lib/ts/InkErrno.cc @@ -23,7 +23,7 @@ #include "InkErrno.h" #include "ink_assert.h" -#include +#include const char * InkStrerror(int ink_errno) diff --git a/lib/ts/IpMap.cc b/lib/ts/IpMap.cc index da2b73b56c9..b6433e2db84 100644 --- a/lib/ts/IpMap.cc +++ b/lib/ts/IpMap.cc @@ -134,9 +134,9 @@ namespace detail struct IpMapBase { friend class ::IpMap; - typedef IpMapBase self; ///< Self reference type. - typedef typename N::ArgType ArgType; ///< Import type. - typedef typename N::Metric Metric; ///< Import type.g482 + using self = IpMapBase; ///< Self reference type. + using ArgType = typename N::ArgType; ///< Import type. + using Metric = typename N::Metric; ///< Import type.g482 IpMapBase() : _root(nullptr) {} ~IpMapBase() { this->clear(); } @@ -735,7 +735,7 @@ namespace detail friend struct IpMapBase; public: - typedef Ip4Node self; ///< Self reference type. + using self = ts::detail::Ip4Node; ///< Self reference type. /// Construct with values. Ip4Node(ArgType min, ///< Minimum address (host order). @@ -748,21 +748,21 @@ namespace detail ats_ip4_set(ats_ip_sa_cast(&_sa._max), htonl(max)); } /// @return The minimum value of the interval. - virtual sockaddr const * - min() const + sockaddr const * + min() const override { return ats_ip_sa_cast(&_sa._min); } /// @return The maximum value of the interval. - virtual sockaddr const * - max() const + sockaddr const * + max() const override { return ats_ip_sa_cast(&_sa._max); } /// Set the client data. self & setData(void *data ///< Client data. - ) + ) override { _data = data; return *this; @@ -871,7 +871,7 @@ namespace detail }; //---------------------------------------------------------------------------- - typedef Interval Ip6Span; + using Ip6Span = Interval; /** Node for IPv6 map. */ @@ -880,10 +880,10 @@ namespace detail friend struct IpMapBase; public: - typedef Ip6Node self; ///< Self reference type. + using self = ts::detail::Ip6Node; ///< Self reference type. /// Override @c ArgType from @c Interval because the convention /// is to use a pointer, not a reference. - typedef Metric const *ArgType; + using ArgType = const ts::detail::Interval::Metric *; /// Construct from pointers. Ip6Node(ArgType min, ///< Minimum address (network order). @@ -902,21 +902,21 @@ namespace detail { } /// @return The minimum value of the interval. - virtual sockaddr const * - min() const + sockaddr const * + min() const override { return ats_ip_sa_cast(&_min); } /// @return The maximum value of the interval. - virtual sockaddr const * - max() const + sockaddr const * + max() const override { return ats_ip_sa_cast(&_max); } /// Set the client data. self & setData(void *data ///< Client data. - ) + ) override { _data = data; return *this; diff --git a/lib/ts/MMH.cc b/lib/ts/MMH.cc index 0612fc62c52..31d9bd36fda 100644 --- a/lib/ts/MMH.cc +++ b/lib/ts/MMH.cc @@ -21,8 +21,8 @@ limitations under the License. */ -#include -#include +#include +#include #include "ts/ink_assert.h" #include "ts/ink_platform.h" #include "ts/MMH.h" diff --git a/lib/ts/Makefile.am b/lib/ts/Makefile.am index 039db6e4b0b..8b3c9689ec9 100644 --- a/lib/ts/Makefile.am +++ b/lib/ts/Makefile.am @@ -23,7 +23,7 @@ library_includedir=$(includedir)/ts library_include_HEADERS = apidefs.h noinst_PROGRAMS = mkdfa CompileParseRules -check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator +check_PROGRAMS = test_tsutil test_arena test_atomic test_freelist test_geometry test_List test_Map test_Vec test_X509HostnameValidator test_MemView TESTS_ENVIRONMENT = LSAN_OPTIONS=suppressions=suppression.txt @@ -85,6 +85,7 @@ libtsutil_la_SOURCES = \ IpMapConf.h \ Layout.cc \ List.h \ + MemView.h MemView.cc \ MMH.cc \ MMH.h \ Map.h \ @@ -235,6 +236,9 @@ test_X509HostnameValidator_SOURCES = test_X509HostnameValidator.cc test_X509HostnameValidator_LDADD = libtsutil.la @LIBTCL@ @LIBPCRE@ @OPENSSL_LIBS@ test_X509HostnameValidator_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ +test_MemView_SOURCES = test_MemView.cc +test_MemView_LDADD = libtsutil.la + test_tsutil_LDADD = libtsutil.la @LIBTCL@ @LIBPCRE@ test_tsutil_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ test_tsutil_SOURCES = \ @@ -250,4 +254,3 @@ clean-local: tidy-local: $(DIST_SOURCES) $(CXX_Clang_Tidy) - diff --git a/lib/ts/MemView.cc b/lib/ts/MemView.cc new file mode 100644 index 00000000000..9699bc1ac80 --- /dev/null +++ b/lib/ts/MemView.cc @@ -0,0 +1,158 @@ +/** @file + + Class for handling "views" of a buffer. Views presume the memory for the buffer is managed + elsewhere and allow efficient access to segments of the buffer without copies. Views are read + only as the view doesn't own the memory. Along with generic buffer methods are specialized + methods to support better string parsing, particularly token based parsing. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +#include +#include +#include +#include + +namespace ts +{ +StringView::StringView(const char *s) : _ptr(s), _size(s ? strlen(s) : 0) +{ +} + +int +memcmp(MemView const &lhs, MemView const &rhs) +{ + int zret; + size_t n; + + // Seems a bit ugly but size comparisons must be done anyway to get the memcmp args. + if (lhs.size() < rhs.size()) { + zret = 1, n = lhs.size(); + } else { + n = rhs.size(); + zret = rhs.size() < lhs.size() ? -1 : 0; + } + + int r = ::memcmp(lhs.ptr(), rhs.ptr(), n); + if (0 != r) { // If we got a not-equal, override the size based result. + zret = r; + } + + return zret; +} + +int +strcasecmp(StringView lhs, StringView rhs) +{ + while (lhs && rhs) { + char l = tolower(*lhs); + char r = tolower(*rhs); + if (l < r) { + return -1; + } else if (r < l) { + return 1; + } + ++lhs, ++rhs; + } + return lhs ? 1 : rhs ? -1 : 0; +} + +intmax_t +svtoi(StringView src, StringView *out, int base) +{ + static const int8_t convert[256] = { + /* [can't do this nicely because clang format won't allow exdented comments] + 0 1 2 3 4 5 6 7 8 9 A B C D E F + */ + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 00 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 10 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 20 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // 30 + -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 40 + 25, 26, 27, 28, 20, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1, // 50 + -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 60 + 25, 26, 27, 28, 20, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, -1, // 70 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 80 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 90 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // A0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // B0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // C0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // D0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // E0 + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // F0 + }; + + intmax_t zret = 0; + + if (out) { + out->clear(); + } + if (!(1 < base && base <= 36)) { + return 0; + } + if (src.ltrim(&isspace)) { + const char *start = src.ptr(); + int8_t v; + bool neg = false; + if ('-' == *src) { + ++src; + neg = true; + } + while (src.size() && (-1 != (v = convert[static_cast(*src)]))) { + zret = zret * base + v; + ++src; + } + if (out && (src.ptr() > (neg ? start + 1 : start))) { + out->setView(start, src.ptr()); + } + + if (neg) { + zret = -zret; + } + } + return zret; +} + +// Do the template instantions. +template void detail::stream_fill(std::ostream &, std::size_t); +template std::ostream &StringView::stream_write(std::ostream &, const StringView &) const; +} + +namespace std +{ +ostream & +operator<<(ostream &os, const ts::MemView &b) +{ + if (os.good()) { + ostringstream out; + out << b.size() << '@' << hex << b.ptr(); + os << out.str(); + } + return os; +} + +ostream & +operator<<(ostream &os, const ts::StringView &b) +{ + if (os.good()) { + b.stream_write(os, b); + os.width(0); + } + return os; +} +} diff --git a/lib/ts/MemView.h b/lib/ts/MemView.h new file mode 100644 index 00000000000..1925bc7fe18 --- /dev/null +++ b/lib/ts/MemView.h @@ -0,0 +1,1392 @@ +#if !defined TS_MEM_VIEW +#define TS_MEM_VIEW + +/** @file + + Class for handling "views" of a buffer. Views presume the memory for the buffer is managed + elsewhere and allow efficient access to segments of the buffer without copies. Views are read + only as the view doesn't own the memory. Along with generic buffer methods are specialized + methods to support better string parsing, particularly token based parsing. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +/// Apache Traffic Server commons. +namespace ts +{ +class MemView; +class StringView; + +/// Compare the memory in two views. +/// Return based on the first different byte. If one argument is a prefix of the other, the prefix +/// is considered the "smaller" value. +/// @return +/// - -1 if @a lhs byte is less than @a rhs byte. +/// - 1 if @a lhs byte is greater than @a rhs byte. +/// - 0 if the views contain identical memory. +int memcmp(MemView const &lhs, MemView const &rhs); +using ::memcmp; // Make this an overload, not an override. +/// Compare the strings in two views. +/// Return based on the first different character. If one argument is a prefix of the other, the prefix +/// is considered the "smaller" value. +/// @return +/// - -1 if @a lhs char is less than @a rhs char. +/// - 1 if @a lhs char is greater than @a rhs char. +/// - 0 if the views contain identical strings. +int strcmp(StringView const &lhs, StringView const &rhs); +using ::strcmp; // Make this an overload, not an override. +/// Compare the strings in two views. +/// Return based on the first different character. If one argument is a prefix of the other, the prefix +/// is considered the "smaller" value. The values are compared ignoring case. +/// @return +/// - -1 if @a lhs char is less than @a rhs char. +/// - 1 if @a lhs char is greater than @a rhs char. +/// - 0 if the views contain identical strings. +/// +/// @internal Why not ? Because the implementation would make copies anyway, might as well save +/// the cost of passing the pointers. +int strcasecmp(StringView lhs, StringView rhs); +using ::strcasecmp; // Make this an overload, not an override. + +/** Convert the text in @c StringView @a src to a numeric value. + + If @a parsed is non-null then the part of the string actually parsed is placed there. + @a base sets the conversion base. This defaults to 10 with two special cases: + + - If the number starts with a literal '0' then it is treated as base 8. + - If the number starts with the literal characters '0x' or '0X' then it is treated as base 16. +*/ +intmax_t svtoi(StringView src, StringView *parsed = nullptr, int base = 10); + +/** A read only view of contiguous piece of memory. + + A @c MemView does not own the memory to which it refers, it is simply a view of part of some + (presumably) larger memory object. The purpose is to allow working in a read only way a specific + part of the memory. This can avoid copying or allocation by allocating all needed memory at once + and then working with it via instances of this class. + + MemView is based on an earlier class ConstBuffer and influenced by Boost.string_ref. Neither + of these were adequate for how use of @c ConstBuffer evolved and so @c MemView is @c + ConstBuffer with some additional stylistic changes based on Boost.string_ref. + + This class is closely integrated with @c StringView. These classes have the same underlying + implementation and are differentiated only because of the return types and a few string oriented + methods. + */ +class MemView +{ + typedef MemView self; ///< Self reference type. + +protected: + const void *_ptr = nullptr; ///< Pointer to base of memory chunk. + size_t _size = 0; ///< Size of memory chunk. + +public: + /// Default constructor (empty buffer). + constexpr MemView(); + + /** Construct explicitly with a pointer and size. + */ + constexpr MemView(const void *ptr, ///< Pointer to buffer. + size_t n ///< Size of buffer. + ); + + /** Construct from a half open range of two pointers. + @note The instance at @start is in the view but the instance at @a end is not. + */ + template + constexpr MemView(T const *start, ///< First byte in the view. + T const *end ///< First byte not in the view. + ); + + /** Construct from a half open range of two pointers. + @note The instance at @start is in the view but the instance at @a end is not. + */ + MemView(void const *start, ///< First byte in the view. + void const *end ///< First byte not in the view. + ); + + /** Construct from nullptr. + This implicitly makes the length 0. + */ + constexpr MemView(std::nullptr_t); + + /// Convert from StringView. + constexpr MemView(StringView const &that); + + /** Equality. + + This is effectively a pointer comparison, buffer contents are not compared. + + @return @c true if @a that refers to the same view as @a this, + @c false otherwise. + */ + bool operator==(self const &that) const; + + /** Inequality. + @return @c true if @a that does not refer to the same view as @a this, + @c false otherwise. + */ + bool operator!=(self const &that) const; + + /// Assignment - the view is copied, not the content. + self &operator=(self const &that); + + /** Shift the view to discard the first byte. + @return @a this. + */ + self &operator++(); + + /** Shift the view to discard the leading @a n bytes. + @return @a this + */ + self &operator+=(size_t n); + + /// Check for empty view. + /// @return @c true if the view has a zero pointer @b or size. + bool operator!() const; + + /// Check for non-empty view. + /// @return @c true if the view refers to a non-empty range of bytes. + explicit operator bool() const; + + /// Check for empty view (no content). + /// @see operator bool + bool isEmpty() const; + + /// @name Accessors. + //@{ + /// Pointer to the first byte in the view. + const void *begin() const; + /// Pointer to first byte not in the view. + const void *end() const; + /// Number of bytes in the view. + constexpr size_t size() const; + /// Memory pointer. + /// @note This is equivalent to @c begin currently but it's probably good to have separation. + constexpr const void *ptr() const; + /// @return the @a V value at index @a n. + template V at(ssize_t n) const; + /// @return a pointer to the @a V value at index @a n. + template V const *at_ptr(ssize_t n) const; + //@} + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const void *ptr, ///< Buffer address. + size_t n = 0 ///< Buffer size. + ); + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const void *start, ///< First valid character. + const void *end ///< First invalid character. + ); + + /// Clear the view (become an empty view). + self &clear(); + + /// @return @c true if the byte at @a *p is in the view. + bool contains(const void *p) const; + + /** Find a value. + The memory is searched as if it were an array of the value type @a T. + + @return A pointer to the first occurrence of @a v in @a this + or @c nullptr if @a v is not found. + */ + template const V *find(V v) const; + + /** Find a value. + The memory is searched as if it were an array of the value type @a V. + + @return A pointer to the first value for which @a pred is @c true otherwise + @c nullptr. + */ + template const V *find(std::function const &pred); + + /** Get the initial segment of the view before @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view + is returned. + + @return A buffer that contains all data before @a p. + */ + self prefix(const void *p) const; + + /** Split the view at @a p. + + The view is split in to two parts at @a p and the prefix is returned. The view is updated to + contain the bytes not returned in the prefix. The prefix will not contain @a p. + + @note If @a *p refers to a byte that is not in @a this then @a this is not changed and an empty + buffer is returned. Therefore this method can be safely called with the return value of + calling @c find. + + @return A buffer containing data up to but not including @a p. + + @see extractPrefix + */ + self splitPrefix(const void *p); + + /** Extract a prefix delimited by @a p. + + A prefix of @a this is removed from the view and returned. If @a p is not in the view then the + entire view is extracted and returned. + + If @a p points at a byte in the view this is identical to @c splitPrefix. If not then the + entire view in @a this will be returned and @a this will become an empty view. + + @return The prefix bounded at @a p or the entire view if @a p is not a byte in the view. + + @see splitPrefix + */ + self extractPrefix(const void *p); + + /** Get the trailing segment of the view after @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view is returned. + + @return A buffer that contains all data after @a p. + */ + self suffix(const void *p) const; + + /** Split the view at @a p. + + The view is split in to two parts and the suffix is returned. The view is updated to contain + the bytes not returned in the suffix. The suffix will not contain @a p. + + @note If @a p does not refer to a byte in the view, an empty view is returned and @a this is + unchanged. + + @return @a this. + */ + self splitSuffix(const void *p); +}; + +/** A read only view of contiguous piece of memory. + + A @c StringView does not own the memory to which it refers, it is simply a view of part of some + (presumably) larger memory object. The purpose is to allow working in a read only way a specific + part of the memory. A classic example for ATS is working with HTTP header fields and values + which need to be accessed independently but preferably without copying. A @c StringView supports this style. + + MemView is based on an earlier class ConstBuffer and influenced by Boost.string_ref. Neither + of these were adequate for how use of @c ConstBuffer evolved and so @c MemView is @c + ConstBuffer with some additional stylistic changes based on Boost.string_ref. + + In particular @c MemView is designed both to support passing via API (to replace the need to + pass two parameters for one real argument) and to aid in parsing input without copying. + + */ +class StringView +{ + typedef StringView self; ///< Self reference type. + +protected: + const char *_ptr = nullptr; ///< Pointer to base of memory chunk. + size_t _size = 0; ///< Size of memory chunk. + + struct literal_t { + }; + struct array_t { + }; + +public: + /// Default constructor (empty buffer). + constexpr StringView(); + + /** Construct explicitly with a pointer and size. + */ + constexpr StringView(const char *ptr, ///< Pointer to buffer. + size_t n ///< Size of buffer. + ); + + /** Construct explicitly with a pointer and size. + If @a n is negative it is treated as 0. + @internal Overload for convience, otherwise get "narrow conversion" errors. + */ + constexpr StringView(const char *ptr, ///< Pointer to buffer. + int n ///< Size of buffer. + ); + + /** Construct from a half open range of two pointers. + @note The byte at @start is in the view but the byte at @a end is not. + */ + constexpr StringView(const char *start, ///< First byte in the view. + const char *end ///< First byte not in the view. + ); + + /** Constructor from literal string. + + Construct directly from a literal string. This avoids a call to :c strlen and therefore is + faster and can be @c constexpr. The terminal nul character is excluded. Internal nul + characters are included. + + @code + StringView a("literal", StringView::literal); + @endcode + */ + template constexpr StringView(const char (&s)[N], literal_t); + + /** Constructor from character array. + + Construct directly from an array of characters. All elements of the array are + included in the view. + + @code + char buff[SIZE]; + StringView a(buff, StringView::array); + @endcode + + @note If this is used on a literal string, the terminal nul character is included. + */ + template constexpr StringView(const char (&s)[N], array_t); + + /** Construct from nullptr. + This implicitly makes the length 0. + */ + constexpr StringView(std::nullptr_t); + + /// Construct from @c MemView to reference the same view. + /// @internal Can't be @c constexpr because @c static_cast of @c is not permitted. + StringView(MemView const &that); + + /** Construct from null terminated string. + @note The terminating null is not included. @c strlen is used to determine the length. + */ + explicit StringView(const char *s); + + /// Construct from @c std::string, referencing the entire string contents. + /// @internal Not all compilers make @c std::string methods called @c constexpr + StringView(std::string const &str); + + /** Equality. + + This is effectively a pointer comparison, buffer contents are not compared. + + @return @c true if @a that refers to the same view as @a this, + @c false otherwise. + */ + bool operator==(self const &that) const; + + /** Inequality. + @return @c true if @a that does not refer to the same view as @a this, + @c false otherwise. + */ + bool operator!=(self const &that) const; + + /** Prefix check. + @return @c true if @a this is a prefix of @a that. + */ + bool isPrefixOf(self const &that) const; + + /** Case ignoring prefix check. + @return @c true if @a this is a prefix of @a that, ignoring case. + */ + bool isNoCasePrefixOf(self const &that) const; + + /// Assignment - the view is copied, not the content. + self &operator=(self const &that); + + /// @return The first byte in the view. + char operator*() const; + + /// @return the byte at offset @a n. + char operator[](size_t n) const; + + /// @return the byte at offset @a n. + char operator[](int n) const; + + /** Shift the view to discard the first byte. + @return @a this. + */ + self &operator++(); + + /** Shift the view to discard the leading @a n bytes. + @return @a this + */ + self &operator+=(size_t n); + + /// Check for empty view. + /// @return @c true if the view has a zero pointer @b or size. + bool operator!() const; + + /// Check for non-empty view. + /// @return @c true if the view refers to a non-empty range of bytes. + explicit operator bool() const; + + /// Check for empty view (no content). + /// @see operator bool + bool isEmpty() const; + + /// @name Accessors. + //@{ + /// Pointer to the first byte in the view. + const char *begin() const; + /// Pointer to first byte not in the view. + const char *end() const; + /// Number of bytes in the view. + constexpr size_t size() const; + /// Memory pointer. + /// @note This is equivalent to @c begin currently but it's probably good to have separation. + constexpr const char *ptr() const; + //@} + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const char *ptr, ///< Buffer address. + size_t n = 0 ///< Buffer size. + ); + + /// Set the view. + /// This is faster but equivalent to constructing a new view with the same + /// arguments and assigning it. + /// @return @c this. + self &setView(const char *start, ///< First valid character. + const char *end ///< First invalid character. + ); + + /// Clear the view (become an empty view). + self &clear(); + + /// @return @c true if the byte at @a *p is in the view. + bool contains(const char *p) const; + + /** Find a byte. + @return A pointer to the first occurrence of @a c in @a this + or @c nullptr if @a c is not found. + */ + const char *find(char c) const; + + /** Find a byte. + @return A pointer to the first occurence of any of @a delimiters in @a + this or @c nullptr if not found. + */ + const char *find(self delimiters) const; + + /** Find a byte. + @return A pointer to the first byte for which @a pred is @c true otherwise + @c nullptr. + */ + const char *find(std::function const &pred) const; + + /** Remove bytes that match @a c from the start of the view. + */ + self <rim(char c); + /** Remove bytes from the start of the view that are in @a delimiters. + */ + self <rim(self delimiters); + /** Remove bytes from the start of the view for which @a pred is @c true. + */ + self <rim(std::function const &pred); + + /** Remove bytes that match @a c from the end of the view. + */ + self &rtrim(char c); + /** Remove bytes from the end of the view that are in @a delimiters. + */ + self &rtrim(self delimiters); + /** Remove bytes from the start and end of the view for which @a pred is @c true. + */ + self &rtrim(std::function const &pred); + + /** Remove bytes that match @a c from the end of the view. + */ + self &trim(char c); + /** Remove bytes from the start and end of the view that are in @a delimiters. + */ + self &trim(self delimiters); + /** Remove bytes from the start and end of the view for which @a pred is @c true. + */ + self &trim(std::function const &pred); + + /** Get the initial segment of the view before @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view + is returned. + + @return A buffer that contains all data before @a p. + */ + self prefix(const char *p) const; + + /// Convenience overload for character. + self prefix(char c); + /// Convenience overload, split on delimiter set. + self prefix(self delimiters) const; + /// Convenience overload, split on predicate. + self prefix(std::function const &pred) const; + + /** Split the view on the character at @a p. + + The view is split in to two parts and the byte at @a p is discarded. @a this retains all data + @b after @a p (equivalent to MemView(p+1, this->end()). A new view containing the + initial bytes up to but not including @a p is returned, (equivalent to + MemView(this->begin(), p)). + + This is convenient when tokenizing and @a p points at a delimiter. + + @note If @a *p refers toa byte that is not in @a this then @a this is not changed and an empty + buffer is returned. Therefore this method can be safely called with the return value of + calling @c find. + + @code + void f(MemView& text) { + MemView token = text.splitPrefix(text.find(delimiter)); + if (token) { // ... process token } + @endcode + + @return A buffer containing data up to but not including @a p. + + @see extractPrefix + */ + self splitPrefix(const char *p); + + /// Convenience overload, split on character. + self splitPrefix(char c); + /// Convenience overload, split on delimiter set. + self splitPrefix(self delimiters); + /// Convenience overload, split on predicate. + self splitPrefix(std::function const &pred); + + /** Extract a prefix delimited by @a p. + + A prefix of @a this is removed from the view and returned. If @a p is not in the view then the + entire view is extracted and returned. + + If @a p points at a byte in the view this is identical to @c splitPrefix. If not then the + entire view in @a this will be returned and @a this will become an empty view. This is easier + to use when repeated extracting tokens. The source view will become empty after extracting the + last token. + + @code + MemView text; + while (text) { + MemView token = text.extractPrefix(text.find(delimiter)); + // .. process token which will always be non-empty because text was not empty. + } + @endcode + + @return The prefix bounded at @a p or the entire view if @a p is not a byte in the view. + + @see splitPrefix + */ + self extractPrefix(const char *p); + + /// Convenience overload, extract on delimiter set. + self extractPrefix(char c); + /// Convenience overload, extract on delimiter set. + self extractPrefix(self delimiters); + /// Convenience overload, extract on predicate. + self extractPrefix(std::function const &pred); + + /** Get the trailing segment of the view after @a p. + + The byte at @a p is not included. If @a p is not in the view an empty view is returned. + + @return A buffer that contains all data after @a p. + */ + self suffix(const char *p) const; + + /// Convenience overload for character. + self suffix(char c); + /// Convenience overload for delimiter set. + self suffix(self delimiters); + /// Convenience overload for predicate. + self suffix(std::function const &pred); + + /** Split the view on the character at @a p. + + The view is split in to two parts and the byte at @a p is discarded. @a this retains all data + @b before @a p (equivalent to MemView(this->begin(), p)). A new view containing + the trailing bytes after @a p is returned, (equivalent to MemView(p+1, + this->end())). + + @note If @a p does not refer to a byte in the view, an empty view is returned and @a this is + unchanged. + + @return @a this. + */ + self splitSuffix(const char *p); + + /// Convenience overload for character. + self splitSuffix(char c); + /// Convenience overload for delimiter set. + self splitSuffix(self delimiters); + /// Convenience overload for predicate. + self splitSuffix(std::function const &pred); + + // Functors for using this class in STL containers. + /// Ordering functor, lexicographic comparison. + struct LessThan { + bool + operator()(MemView const &lhs, MemView const &rhs) + { + return -1 == strcmp(lhs, rhs); + } + }; + /// Ordering functor, case ignoring lexicographic comparison. + struct LessThanNoCase { + bool + operator()(MemView const &lhs, MemView const &rhs) + { + return -1 == strcasecmp(lhs, rhs); + } + }; + + /// Specialized stream operator implementation. + /// @note Use the standard stream operator unless there is a specific need for this, which is unlikely. + /// @return The stream @a os. + /// @internal Needed because @c std::ostream::write must be used and + /// so alignment / fill have to be explicitly handled. + template Stream &stream_write(Stream &os, const StringView &b) const; + + static constexpr literal_t literal{}; + static constexpr array_t array{}; + +protected: + /// Initialize a bit mask to mark which characters are in this view. + void initDelimiterSet(std::bitset<256> &set); +}; +// ---------------------------------------------------------- +// Inline implementations. + +inline constexpr MemView::MemView() +{ +} +inline constexpr MemView::MemView(void const *ptr, size_t n) : _ptr(ptr), _size(n) +{ +} +template constexpr MemView::MemView(const T *start, const T *end) : _ptr(start), _size((end - start) * sizeof(T)) +{ +} +// is magic, handle that specially. +// No constexpr because the spec specifically forbids casting from to a typed pointer. +inline MemView::MemView(void const *start, void const *end) + : _ptr(start), _size(static_cast(end) - static_cast(start)) +{ +} +inline constexpr MemView::MemView(std::nullptr_t) : _ptr(nullptr), _size(0) +{ +} +inline constexpr MemView::MemView(StringView const &that) : _ptr(that.ptr()), _size(that.size()) +{ +} + +inline MemView & +MemView::setView(const void *ptr, size_t n) +{ + _ptr = ptr; + _size = n; + return *this; +} + +inline MemView & +MemView::setView(const void *ptr, const void *limit) +{ + _ptr = ptr; + _size = static_cast(limit) - static_cast(ptr); + return *this; +} + +inline MemView & +MemView::clear() +{ + _ptr = 0; + _size = 0; + return *this; +} + +inline bool +MemView::operator==(self const &that) const +{ + return _size == that._size && _ptr == that._ptr; +} + +inline bool +MemView::operator!=(self const &that) const +{ + return !(*this == that); +} + +inline bool MemView::operator!() const +{ + return !(_ptr && _size); +} + +inline MemView::operator bool() const +{ + return _ptr && _size; +} + +inline bool +MemView::isEmpty() const +{ + return !(_ptr && _size); +} + +inline MemView &MemView::operator++() +{ + _ptr = static_cast(_ptr) + 1; + --_size; + return *this; +} + +inline MemView & +MemView::operator+=(size_t n) +{ + if (n > _size) { + _ptr = nullptr; + _size = 0; + } else { + _ptr = static_cast(_ptr) + n; + _size -= n; + } + return *this; +} + +inline const void * +MemView::begin() const +{ + return _ptr; +} +inline constexpr const void * +MemView::ptr() const +{ + return _ptr; +} + +inline const void * +MemView::end() const +{ + return static_cast(_ptr) + _size; +} + +inline constexpr size_t +MemView::size() const +{ + return _size; +} + +inline MemView & +MemView::operator=(MemView const &that) +{ + _ptr = that._ptr; + _size = that._size; + return *this; +} + +inline bool +MemView::contains(const void *p) const +{ + return _ptr <= this->begin() && p < this->end(); +} + +inline MemView +MemView::prefix(const void *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(_ptr, p); + return zret; +} + +inline MemView +MemView::splitPrefix(const void *p) +{ + self zret; // default to empty return. + if (this->contains(p)) { + zret.setView(_ptr, p); + this->setView(p, this->end()); + } + return zret; +} + +inline MemView +MemView::extractPrefix(const void *p) +{ + self zret{this->splitPrefix(p)}; + + // For extraction if zret is empty, use up all of @a this + if (!zret) { + zret = *this; + this->clear(); + } + + return zret; +} + +inline MemView +MemView::suffix(const void *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(p, this->end()); + return zret; +} + +inline MemView +MemView::splitSuffix(const void *p) +{ + self zret; + if (this->contains(p)) { + zret.setView(p, this->end()); + this->setView(_ptr, p); + } + return zret; +} + +template +inline V +MemView::at(ssize_t n) const +{ + return static_cast(_ptr)[n]; +} + +template +inline V const * +MemView::at_ptr(ssize_t n) const +{ + return static_cast(_ptr) + n; +} + +template +inline const V * +MemView::find(V v) const +{ + for (const V *spot = static_cast(_ptr), limit = spot + (_size / sizeof(V)); spot < limit; ++spot) + if (v == *spot) + return spot; + return nullptr; +} + +// Specialize char for performance. +template <> +inline const char * +MemView::find(char v) const +{ + return static_cast(memchr(_ptr, v, _size)); +} + +template +inline const V * +MemView::find(std::function const &pred) +{ + for (const V *p = static_cast(_ptr), *limit = p + (_size / sizeof(V)); p < limit; ++p) + if (pred(*p)) + return p; + return nullptr; +} + +// === StringView Implementation === +inline constexpr StringView::StringView() +{ +} +inline constexpr StringView::StringView(const char *ptr, size_t n) : _ptr(ptr), _size(n) +{ +} +inline constexpr StringView::StringView(const char *ptr, int n) : _ptr(ptr), _size(n < 0 ? 0 : n) +{ +} +inline constexpr StringView::StringView(const char *start, const char *end) : _ptr(start), _size(end - start) +{ +} +inline constexpr StringView::StringView(std::nullptr_t) : _ptr(nullptr), _size(0) +{ +} +inline StringView::StringView(MemView const &that) : _ptr(static_cast(that.ptr())), _size(that.size()) +{ +} +inline StringView::StringView(std::string const &str) : _ptr(str.data()), _size(str.size()) +{ +} +template constexpr StringView::StringView(const char (&s)[N], literal_t) : _ptr(s), _size(N - 1) +{ +} + +template constexpr StringView::StringView(const char (&s)[N], array_t) : _ptr(s), _size(N) +{ +} + +inline void StringView::initDelimiterSet(std::bitset<256> &set) +{ + set.reset(); + for (char c : *this) + set[static_cast(c)] = true; +} + +inline StringView & +StringView::setView(const char *ptr, size_t n) +{ + _ptr = ptr; + _size = n; + return *this; +} + +inline StringView & +StringView::setView(const char *ptr, const char *limit) +{ + _ptr = ptr; + _size = limit - ptr; + return *this; +} + +inline StringView & +StringView::clear() +{ + _ptr = 0; + _size = 0; + return *this; +} + +inline bool +StringView::operator==(self const &that) const +{ + return _size == that._size && _ptr == that._ptr; +} + +inline bool +StringView::operator!=(self const &that) const +{ + return !(*this == that); +} + +inline bool StringView::operator!() const +{ + return !(_ptr && _size); +} + +inline StringView::operator bool() const +{ + return _ptr && _size; +} + +inline bool +StringView::isEmpty() const +{ + return !(_ptr && _size); +} + +inline char StringView::operator*() const +{ + return *_ptr; +} + +inline StringView &StringView::operator++() +{ + ++_ptr; + --_size; + return *this; +} + +inline StringView & +StringView::operator+=(size_t n) +{ + if (n > _size) { + _ptr = nullptr; + _size = 0; + } else { + _ptr += n; + _size -= n; + } + return *this; +} + +inline const char * +StringView::begin() const +{ + return _ptr; +} +inline constexpr const char * +StringView::ptr() const +{ + return _ptr; +} + +inline const char * +StringView::end() const +{ + return _ptr + _size; +} + +inline constexpr size_t +StringView::size() const +{ + return _size; +} + +inline StringView & +StringView::operator=(StringView const &that) +{ + _ptr = that._ptr; + _size = that._size; + return *this; +} + +inline char StringView::operator[](size_t n) const +{ + return _ptr[n]; +} + +inline char StringView::operator[](int n) const +{ + return _ptr[n]; +} + +inline bool +StringView::contains(const char *p) const +{ + return _ptr <= p && p < _ptr + _size; +} + +inline StringView +StringView::prefix(const char *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(_ptr, p); + return zret; +} + +inline StringView +StringView::prefix(char c) +{ + return this->prefix(this->find(c)); +} + +inline StringView +StringView::prefix(self delimiters) const +{ + return this->prefix(this->find(delimiters)); +} + +inline StringView +StringView::prefix(std::function const &pred) const +{ + return this->prefix(this->find(pred)); +} + +inline StringView +StringView::splitPrefix(const char *p) +{ + self zret; // default to empty return. + if (this->contains(p)) { + zret.setView(_ptr, p); + this->setView(p + 1, this->end()); + } + return zret; +} + +inline StringView +StringView::splitPrefix(char c) +{ + return this->splitPrefix(this->find(c)); +} + +inline StringView +StringView::splitPrefix(self delimiters) +{ + return this->splitPrefix(this->find(delimiters)); +} + +inline StringView +StringView::splitPrefix(std::function const &pred) +{ + return this->splitPrefix(this->find(pred)); +} + +inline StringView +StringView::extractPrefix(const char *p) +{ + self zret{this->splitPrefix(p)}; + + // For extraction if zret is empty, use up all of @a this + if (!zret) { + zret = *this; + this->clear(); + } + + return zret; +} + +inline StringView +StringView::extractPrefix(char c) +{ + return this->extractPrefix(this->find(c)); +} + +inline StringView +StringView::extractPrefix(self delimiters) +{ + return this->extractPrefix(this->find(delimiters)); +} + +inline StringView +StringView::extractPrefix(std::function const &pred) +{ + return this->extractPrefix(this->find(pred)); +} + +inline StringView +StringView::suffix(const char *p) const +{ + self zret; + if (this->contains(p)) + zret.setView(p + 1, _ptr + _size); + return zret; +} + +inline StringView +StringView::suffix(char c) +{ + return this->suffix(this->find(c)); +} + +inline StringView +StringView::suffix(self delimiters) +{ + return this->suffix(this->find(delimiters)); +} + +inline StringView +StringView::suffix(std::function const &pred) +{ + return this->suffix(this->find(pred)); +} + +inline StringView +StringView::splitSuffix(const char *p) +{ + self zret; + if (this->contains(p)) { + zret.setView(p + 1, this->end()); + this->setView(_ptr, p); + } + return zret; +} + +inline StringView +StringView::splitSuffix(char c) +{ + return this->splitSuffix(this->find(c)); +} + +inline StringView +StringView::splitSuffix(self delimiters) +{ + return this->splitSuffix(this->find(delimiters)); +} + +inline StringView +StringView::splitSuffix(std::function const &pred) +{ + return this->splitSuffix(this->find(pred)); +} + +inline const char * +StringView::find(char c) const +{ + return static_cast(memchr(_ptr, c, _size)); +} + +inline const char * +StringView::find(self delimiters) const +{ + std::bitset<256> valid; + delimiters.initDelimiterSet(valid); + + for (const char *p = this->begin(), *limit = this->end(); p < limit; ++p) + if (valid[static_cast(*p)]) + return p; + + return nullptr; +} + +inline const char * +StringView::find(std::function const &pred) const +{ + const char *p = std::find_if(this->begin(), this->end(), pred); + return p == this->end() ? nullptr : p; +} + +inline StringView & +StringView::ltrim(char c) +{ + while (_size && *_ptr == c) + ++*this; + return *this; +} + +inline StringView & +StringView::rtrim(char c) +{ + while (_size && _ptr[_size - 1] == c) + --_size; + return *this; +} +inline StringView & +StringView::trim(char c) +{ + this->ltrim(c); + return this->rtrim(c); +} + +inline StringView & +StringView::ltrim(self delimiters) +{ + std::bitset<256> valid; + delimiters.initDelimiterSet(valid); + + while (_size && valid[static_cast(*_ptr)]) + ++*this; + + return *this; +} + +inline StringView & +StringView::rtrim(self delimiters) +{ + std::bitset<256> valid; + delimiters.initDelimiterSet(valid); + + while (_size && valid[static_cast(_ptr[_size - 1])]) + --_size; + + return *this; +} + +inline StringView & +StringView::trim(self delimiters) +{ + std::bitset<256> valid; + delimiters.initDelimiterSet(valid); + // Do this explicitly, so we don't have to initialize the character set twice. + while (_size && valid[static_cast(_ptr[_size - 1])]) + --_size; + while (_size && valid[static_cast(_ptr[0])]) + ++*this; + return *this; +} + +inline StringView & +StringView::ltrim(std::function const &pred) +{ + while (_size && pred(_ptr[0])) + ++*this; + return *this; +} + +inline StringView & +StringView::rtrim(std::function const &pred) +{ + while (_size && pred(_ptr[_size - 1])) + --_size; + return *this; +} + +inline StringView & +StringView::trim(std::function const &pred) +{ + this->ltrim(pred); + return this->rtrim(pred); +} + +inline bool +StringView::isPrefixOf(self const &that) const +{ + return _size <= that._size && 0 == memcmp(_ptr, that._ptr, _size); +} + +inline bool +StringView::isNoCasePrefixOf(self const &that) const +{ + return _size <= that._size && 0 == strncasecmp(_ptr, that._ptr, _size); +} + +inline int +strcmp(StringView const &lhs, StringView const &rhs) +{ + return ts::memcmp(lhs, rhs); +} + +namespace detail +{ + /// Write padding to the stream, using the current stream fill character. + template + void + stream_fill(Stream &os, std::size_t n) + { + static constexpr size_t pad_size = 8; + typename Stream::char_type padding[pad_size]; + + std::fill_n(padding, pad_size, os.fill()); + for (; n >= pad_size && os.good(); n -= pad_size) + os.write(padding, pad_size); + if (n > 0 && os.good()) + os.write(padding, n); + } + + extern template void stream_fill(std::ostream &, std::size_t); +} // detail + +template +Stream & +StringView::stream_write(Stream &os, const StringView &b) const +{ + const std::size_t w = os.width(); + if (w <= b.size()) { + os.write(b.ptr(), b.size()); + } else { + const std::size_t pad_size = w - b.size(); + const bool align_left = (os.flags() & Stream::adjustfield) == Stream::left; + if (!align_left && os.good()) + detail::stream_fill(os, pad_size); + if (os.good()) + os.write(b.ptr(), b.size()); + if (align_left && os.good()) + detail::stream_fill(os, pad_size); + } + return os; +} + +// Provide an instantiation for @c std::ostream as it's likely this is the only one ever used. +extern template std::ostream &StringView::stream_write(std::ostream &, const StringView &) const; + +} // end namespace ApacheTrafficServer + +namespace std +{ +ostream &operator<<(ostream &os, const ts::MemView &b); +ostream &operator<<(ostream &os, const ts::StringView &b); +} + +#endif // TS_BUFFER_HEADER diff --git a/lib/ts/PriorityQueue.h b/lib/ts/PriorityQueue.h index afae433d7cb..c0d2d11a011 100644 --- a/lib/ts/PriorityQueue.h +++ b/lib/ts/PriorityQueue.h @@ -47,7 +47,7 @@ template > class PriorityQueue public: PriorityQueue() {} ~PriorityQueue() {} - const bool empty(); + bool empty(); PriorityQueueEntry *top(); void pop(); void push(PriorityQueueEntry *); @@ -72,7 +72,7 @@ PriorityQueue::dump() const } template -const bool +bool PriorityQueue::empty() { return _v.length() == 0; diff --git a/lib/ts/SourceLocation.cc b/lib/ts/SourceLocation.cc index 2bcd0dea76a..8621e9ea15e 100644 --- a/lib/ts/SourceLocation.cc +++ b/lib/ts/SourceLocation.cc @@ -23,8 +23,8 @@ #include "SourceLocation.h" #include "ink_defs.h" -#include -#include +#include +#include // This method takes a SourceLocation source location data structure and // converts it to a human-readable representation, in the buffer diff --git a/lib/ts/TextBuffer.cc b/lib/ts/TextBuffer.cc index f35f49bb91c..7dc2344b278 100644 --- a/lib/ts/TextBuffer.cc +++ b/lib/ts/TextBuffer.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include "ts/ink_platform.h" #include "ts/ink_memory.h" #include "ts/TextBuffer.h" diff --git a/lib/ts/Tokenizer.cc b/lib/ts/Tokenizer.cc index 46a45744857..74f3738e39f 100644 --- a/lib/ts/Tokenizer.cc +++ b/lib/ts/Tokenizer.cc @@ -71,8 +71,8 @@ Tokenizer::~Tokenizer() while (cur != nullptr) { if (options & COPY_TOKS) { - for (int i = 0; i < TOK_NODE_ELEMENTS; i++) - ats_free(cur->el[i]); + for (auto &i : cur->el) + ats_free(i); } next = cur->next; @@ -356,8 +356,8 @@ Tokenizer::ReUse() while (cur_node != nullptr) { if (options & COPY_TOKS) { - for (int i = 0; i < TOK_NODE_ELEMENTS; i++) - ats_free(cur_node->el[i]); + for (auto &i : cur_node->el) + ats_free(i); } memset(cur_node->el, 0, sizeof(char *) * TOK_NODE_ELEMENTS); cur_node = cur_node->next; diff --git a/lib/ts/Vec.cc b/lib/ts/Vec.cc index 8773b2fd91a..43a1b6016dd 100644 --- a/lib/ts/Vec.cc +++ b/lib/ts/Vec.cc @@ -22,7 +22,7 @@ /* UnionFind after Tarjan */ -#include +#include #include "ts/Vec.h" const uintptr_t prime2[] = {1, 3, 7, 13, 31, 61, 127, 251, 509, 1021, diff --git a/lib/ts/X509HostnameValidator.cc b/lib/ts/X509HostnameValidator.cc index c46bcc93042..3f271afcb82 100644 --- a/lib/ts/X509HostnameValidator.cc +++ b/lib/ts/X509HostnameValidator.cc @@ -29,7 +29,7 @@ #include "ts/ink_memory.h" -typedef bool (*equal_fn)(const unsigned char *prefix, size_t prefix_len, const unsigned char *suffix, size_t suffix_len); +using equal_fn = bool (*)(const unsigned char *, size_t, const unsigned char *, size_t); /* Return a ptr to a valid wildcard or NULL if not found * @@ -107,7 +107,7 @@ static bool equal_nocase(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len) { if (pattern_len != subject_len) - return 0; + return false; return (strncasecmp((char *)pattern, (char *)subject, pattern_len) == 0); } @@ -116,7 +116,7 @@ static bool equal_case(const unsigned char *pattern, size_t pattern_len, const unsigned char *subject, size_t subject_len) { if (pattern_len != subject_len) - return 0; + return false; return (memcmp(pattern, subject, pattern_len) == 0); } diff --git a/lib/ts/apidefs.h.in b/lib/ts/apidefs.h.in index 3b6f58b45c4..d835fe55f51 100644 --- a/lib/ts/apidefs.h.in +++ b/lib/ts/apidefs.h.in @@ -288,7 +288,8 @@ typedef enum { TS_VCONN_PRE_ACCEPT_HOOK = TS_SSL_FIRST_HOOK, TS_SSL_SNI_HOOK, TS_SSL_CERT_HOOK = TS_SSL_SNI_HOOK, - TS_SSL_LAST_HOOK = TS_SSL_CERT_HOOK, + TS_SSL_SERVERNAME_HOOK, + TS_SSL_LAST_HOOK = TS_SSL_SERVERNAME_HOOK, TS_HTTP_LAST_HOOK } TSHttpHookID; @@ -448,7 +449,8 @@ typedef enum { TS_EVENT_INTERNAL_60200 = 60200, TS_EVENT_INTERNAL_60201 = 60201, TS_EVENT_INTERNAL_60202 = 60202, - TS_EVENT_SSL_CERT = 60203 + TS_EVENT_SSL_CERT = 60203, + TS_EVENT_SSL_SERVERNAME = 60204 } TSEvent; #define TS_EVENT_HTTP_READ_REQUEST_PRE_REMAP TS_EVENT_HTTP_PRE_REMAP /* backwards compat */ @@ -734,6 +736,7 @@ typedef enum { TS_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES, TS_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY, TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT, + TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE, TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE, TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT, TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT, @@ -744,6 +747,11 @@ typedef enum { TS_CONFIG_HTTP_FORWARD_CONNECT_METHOD, TS_CONFIG_SSL_CERT_FILENAME, TS_CONFIG_SSL_CERT_FILEPATH, + TS_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB, + TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD, + TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME, + TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS, + TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT, TS_CONFIG_LAST_ENTRY } TSOverridableConfigKey; @@ -838,6 +846,8 @@ typedef struct tsapi_bufferblock *TSIOBufferBlock; typedef struct tsapi_bufferreader *TSIOBufferReader; typedef struct tsapi_hostlookupresult *TSHostLookupResult; typedef struct tsapi_aiocallback *TSAIOCallback; +typedef struct tsapi_net_accept *TSAcceptor; +typedef struct tsapi_protocol_set *TSNextProtocolSet; typedef void *(*TSThreadFunc)(void *data); typedef int (*TSEventFunc)(TSCont contp, TSEvent event, void *edata); diff --git a/lib/ts/hugepages.cc b/lib/ts/hugepages.cc index 8e74bd9944e..8c746ae959e 100644 --- a/lib/ts/hugepages.cc +++ b/lib/ts/hugepages.cc @@ -37,7 +37,7 @@ static bool hugepage_enabled; #endif size_t -ats_hugepage_size(void) +ats_hugepage_size() { #ifdef MAP_HUGETLB return hugepage_size; @@ -48,7 +48,7 @@ ats_hugepage_size(void) } bool -ats_hugepage_enabled(void) +ats_hugepage_enabled() { #ifdef MAP_HUGETLB return hugepage_enabled; diff --git a/lib/ts/ink_cap.cc b/lib/ts/ink_cap.cc index 6ffe4826606..977fee2bb53 100644 --- a/lib/ts/ink_cap.cc +++ b/lib/ts/ink_cap.cc @@ -396,6 +396,7 @@ ElevateAccess::releasePrivilege() if (cap_set_proc(static_cast(cap_state)) != 0) { Fatal("failed to restore privileged capabilities: %s", strerror(errno)); } + cap_free(this->cap_state); cap_state = nullptr; } } @@ -407,7 +408,7 @@ ElevateAccess::ElevateAccess(unsigned lvl) level(lvl) #if TS_USE_POSIX_CAP , - cap_state(0) + cap_state(nullptr) #endif { elevate(level); diff --git a/lib/ts/ink_code.cc b/lib/ts/ink_code.cc index 1510005ebbb..bfd672497d4 100644 --- a/lib/ts/ink_code.cc +++ b/lib/ts/ink_code.cc @@ -21,8 +21,8 @@ limitations under the License. */ -#include -#include +#include +#include #include "ts/ink_code.h" #include "ts/INK_MD5.h" #include "ts/ink_assert.h" diff --git a/lib/ts/ink_error.cc b/lib/ts/ink_error.cc index d38bafb2f71..e0419fcb0d9 100644 --- a/lib/ts/ink_error.cc +++ b/lib/ts/ink_error.cc @@ -27,7 +27,7 @@ #include "ts/ink_stack_trace.h" #include -#include /* MAGIC_EDITING_TAG */ +#include /* MAGIC_EDITING_TAG */ /** This routine prints/logs an error message given the printf format diff --git a/lib/ts/ink_file.cc b/lib/ts/ink_file.cc index 9f72c211d16..dcd46dbae72 100644 --- a/lib/ts/ink_file.cc +++ b/lib/ts/ink_file.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include "ts/ink_platform.h" #include "ts/ink_file.h" #include "ts/ink_string.h" diff --git a/lib/ts/ink_inet.cc b/lib/ts/ink_inet.cc index b5791b3ad64..01c2127bf47 100644 --- a/lib/ts/ink_inet.cc +++ b/lib/ts/ink_inet.cc @@ -40,6 +40,19 @@ struct hostent *gethostbyaddr_r(const char *name, size_t size, int type, struct IpAddr const IpAddr::INVALID; +const ts::StringView IP_PROTO_TAG_IPV4("ipv4", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_IPV6("ipv6", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_UDP("udp", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_TCP("tcp", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_TLS_1_0("tls/1.0", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_TLS_1_1("tls/1.1", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_TLS_1_2("tls/1.2", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_TLS_1_3("tls/1.3", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_HTTP_0_9("http/0.9", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_HTTP_1_0("http/1.0", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_HTTP_1_1("http/1.1", ts::StringView::literal); +const ts::StringView IP_PROTO_TAG_HTTP_2_0("h2", ts::StringView::literal); // HTTP/2 over TLS + struct hostent * ink_gethostbyname_r(char *hostname, ink_gethostbyname_r_data *data) { @@ -172,10 +185,11 @@ ats_ip_ntop(const struct sockaddr *addr, char *dst, size_t size) return zret; } -const char * +ts::StringView ats_ip_family_name(int family) { - return AF_INET == family ? "IPv4" : AF_INET6 == family ? "IPv6" : "Unspec"; + static const ts::StringView UNSPEC("Unspec", ts::StringView::literal); + return AF_INET == family ? IP_PROTO_TAG_IPV4 : AF_INET6 == family ? IP_PROTO_TAG_IPV6 : UNSPEC; } const char * @@ -666,7 +680,8 @@ ats_tcp_somaxconn() /* Darwin version ... */ #if HAVE_SYSCTLBYNAME - if (sysctlbyname("kern.ipc.somaxconn", nullptr, nullptr, &value, sizeof(value)) == 0) { + size_t value_size = sizeof(value); + if (sysctlbyname("kern.ipc.somaxconn", &value, &value_size, nullptr, 0) == 0) { return value; } #endif diff --git a/lib/ts/ink_inet.h b/lib/ts/ink_inet.h index 7264e50136f..3977eab4df5 100644 --- a/lib/ts/ink_inet.h +++ b/lib/ts/ink_inet.h @@ -31,6 +31,7 @@ #include "ts/ink_memory.h" #include "ts/ink_apidefs.h" #include "ts/TsBuffer.h" +#include #define INK_GETHOSTBYNAME_R_DATA_SIZE 1024 #define INK_GETHOSTBYADDR_R_DATA_SIZE 1024 @@ -47,6 +48,20 @@ IN6_IS_ADDR_UNSPECIFIED(in6_addr const *addr) } #endif +// IP protocol stack tags. +extern const ts::StringView IP_PROTO_TAG_IPV4; +extern const ts::StringView IP_PROTO_TAG_IPV6; +extern const ts::StringView IP_PROTO_TAG_UDP; +extern const ts::StringView IP_PROTO_TAG_TCP; +extern const ts::StringView IP_PROTO_TAG_TLS_1_0; +extern const ts::StringView IP_PROTO_TAG_TLS_1_1; +extern const ts::StringView IP_PROTO_TAG_TLS_1_2; +extern const ts::StringView IP_PROTO_TAG_TLS_1_3; +extern const ts::StringView IP_PROTO_TAG_HTTP_0_9; +extern const ts::StringView IP_PROTO_TAG_HTTP_1_0; +extern const ts::StringView IP_PROTO_TAG_HTTP_1_1; +extern const ts::StringView IP_PROTO_TAG_HTTP_2_0; + struct IpAddr; // forward declare. /** A union to hold the standard IP address structures. @@ -213,7 +228,7 @@ ats_ip_invalidate(IpEndpoint *ip) /** Get a string name for an IP address family. @return The string name (never @c nullptr). */ -const char *ats_ip_family_name(int family); +ts::StringView ats_ip_family_name(int family); /// Test for IP protocol. /// @return @c true if the address is IP, @c false otherwise. diff --git a/lib/ts/ink_memory.cc b/lib/ts/ink_memory.cc index e17819e368c..dbce7615715 100644 --- a/lib/ts/ink_memory.cc +++ b/lib/ts/ink_memory.cc @@ -31,15 +31,15 @@ #include // for malloc_usable_size #endif -#include +#include #if defined(linux) // XXX: SHouldn't that be part of CPPFLAGS? #ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif #endif -#include -#include +#include +#include void * ats_malloc(size_t size) diff --git a/lib/ts/ink_mutex.cc b/lib/ts/ink_mutex.cc index 34fe28d4ced..a0881890cc7 100644 --- a/lib/ts/ink_mutex.cc +++ b/lib/ts/ink_mutex.cc @@ -23,8 +23,8 @@ #include "ts/ink_error.h" #include "ts/ink_defs.h" -#include -#include "stdio.h" +#include +#include #include "ts/ink_mutex.h" // Define the _g_mattr first to avoid static initialization order fiasco. diff --git a/lib/ts/ink_platform.h b/lib/ts/ink_platform.h index db38cb79ef9..95050d66c85 100644 --- a/lib/ts/ink_platform.h +++ b/lib/ts/ink_platform.h @@ -186,9 +186,7 @@ typedef unsigned int in_addr_t; #ifdef HAVE_DLFCN_H #include #endif -#ifdef HAVE_MATH_H -#include -#endif + #ifdef HAVE_FLOAT_H #include #endif diff --git a/lib/ts/ink_queue.cc b/lib/ts/ink_queue.cc index e9f11587d9b..14a1220c674 100644 --- a/lib/ts/ink_queue.cc +++ b/lib/ts/ink_queue.cc @@ -37,9 +37,9 @@ ****************************************************************************/ #include "ts/ink_config.h" -#include +#include #include -#include +#include #include #include #include diff --git a/lib/ts/ink_queue_utils.cc b/lib/ts/ink_queue_utils.cc index 2fc547b2581..b37c0ab8ebd 100644 --- a/lib/ts/ink_queue_utils.cc +++ b/lib/ts/ink_queue_utils.cc @@ -22,7 +22,7 @@ */ #include "ts/ink_config.h" -#include +#include #include "ts/ink_atomic.h" #include "ts/ink_queue.h" diff --git a/lib/ts/ink_res_init.cc b/lib/ts/ink_res_init.cc index 71f1597be26..1cafd414946 100644 --- a/lib/ts/ink_res_init.cc +++ b/lib/ts/ink_res_init.cc @@ -77,12 +77,12 @@ #ifdef HAVE_ARPA_NAMESER_COMPAT_H #include #endif -#include -#include +#include +#include #include #include -#include -#include +#include +#include #include "ts/ink_string.h" #include "ts/ink_resolver.h" @@ -256,7 +256,7 @@ ink_res_setoptions(ink_res_state statp, const char *options, const char *source } static unsigned -ink_res_randomid(void) +ink_res_randomid() { struct timeval now; @@ -614,14 +614,14 @@ ts_host_res_order_to_string(HostResPreferenceOrder const &order, char *out, int { int zret = 0; bool first = true; - for (int i = 0; i < N_HOST_RES_PREFERENCE_ORDER; ++i) { + for (auto i : order) { /* Note we use a semi-colon here because this must be compatible * with the -httpport command line option which uses comma to * separate port descriptors so we cannot use that to separate * resolution key words. */ - zret += snprintf(out + zret, size - zret, "%s%s", !first ? ";" : "", HOST_RES_PREFERENCE_STRING[order[i]]); - if (HOST_RES_PREFER_NONE == order[i]) + zret += snprintf(out + zret, size - zret, "%s%s", !first ? ";" : "", HOST_RES_PREFERENCE_STRING[i]); + if (HOST_RES_PREFER_NONE == i) break; first = false; } diff --git a/lib/ts/ink_res_mkquery.cc b/lib/ts/ink_res_mkquery.cc index f1e6f2cd713..63e012adbe1 100644 --- a/lib/ts/ink_res_mkquery.cc +++ b/lib/ts/ink_res_mkquery.cc @@ -76,8 +76,8 @@ #endif #include #include -#include -#include +#include +#include #include "ts/ink_error.h" #include "ts/ink_resolver.h" diff --git a/lib/ts/ink_stack_trace.cc b/lib/ts/ink_stack_trace.cc index e5d157a2ee2..88cdd35f381 100644 --- a/lib/ts/ink_stack_trace.cc +++ b/lib/ts/ink_stack_trace.cc @@ -26,8 +26,8 @@ #include "ts/ink_args.h" #include -#include -#include +#include +#include #include #ifndef STDERR_FILENO @@ -37,7 +37,7 @@ #if TS_HAS_BACKTRACE #include /* for backtrace_symbols, etc. */ -#include +#include void ink_stack_trace_dump() diff --git a/lib/ts/ink_string.cc b/lib/ts/ink_string.cc index a386c16dcaa..6ee87ebfd0a 100644 --- a/lib/ts/ink_string.cc +++ b/lib/ts/ink_string.cc @@ -24,10 +24,10 @@ #include "ts/ink_platform.h" #include "ts/ink_assert.h" -#include -#include -#include -#include +#include +#include +#include +#include #define INK_MAX_STRING_ARRAY_SIZE 128 @@ -62,7 +62,7 @@ ink_string_concatenate_strings(char *dest, ...) d = dest; - while (1) { + while (true) { s = va_arg(ap, char *); if (s == nullptr) break; diff --git a/lib/ts/ink_sys_control.cc b/lib/ts/ink_sys_control.cc index 0e43228c947..8cdc824dcee 100644 --- a/lib/ts/ink_sys_control.cc +++ b/lib/ts/ink_sys_control.cc @@ -41,7 +41,7 @@ ink_max_out_rlimit(int which, bool max_it, bool unlim_it) if (rl.rlim_cur != rl.rlim_max) { #if defined(darwin) if (which == RLIMIT_NOFILE) - rl.rlim_cur = fmin(OPEN_MAX, rl.rlim_max); + rl.rlim_cur = (OPEN_MAX < rl.rlim_max) ? OPEN_MAX : rl.rlim_max; else rl.rlim_cur = rl.rlim_max; #else diff --git a/lib/ts/ink_time.cc b/lib/ts/ink_time.cc index 97da41987f2..8741baa9e85 100644 --- a/lib/ts/ink_time.cc +++ b/lib/ts/ink_time.cc @@ -34,7 +34,7 @@ #include "ts/ink_assert.h" #include "ts/ink_string.h" -#include +#include #include /*===========================================================================* diff --git a/lib/ts/llqueue.cc b/lib/ts/llqueue.cc index 7eda019c9c8..e5b04fa9a58 100644 --- a/lib/ts/llqueue.cc +++ b/lib/ts/llqueue.cc @@ -24,13 +24,13 @@ #include "ts/ink_config.h" #include "ts/ink_memory.h" -#include -#include -#include -#include +#include +#include +#include +#include #include "ts/ink_llqueue.h" -#include "errno.h" +#include #define RECORD_CHUNK 1024 diff --git a/lib/ts/lockfile.cc b/lib/ts/lockfile.cc index 11d2cf6528b..51871399f72 100644 --- a/lib/ts/lockfile.cc +++ b/lib/ts/lockfile.cc @@ -166,7 +166,7 @@ Lockfile::Get(pid_t *holding_pid) } void -Lockfile::Close(void) +Lockfile::Close() { if (fd != -1) { close(fd); diff --git a/lib/ts/signals.cc b/lib/ts/signals.cc index 1c196186a3a..3980d275a25 100644 --- a/lib/ts/signals.cc +++ b/lib/ts/signals.cc @@ -46,7 +46,7 @@ signal_check_handler(int signal, signal_handler_t handler) sigact = (void *)oact.sa_sigaction; } - if (sigact != handler) { + if (sigact != (void *)handler) { Warning("handler for signal %d was %p, not %p as expected", signal, sigact, handler); return false; } @@ -162,13 +162,7 @@ signal_format_siginfo(int signo, siginfo_t *info, const char *msg) (void)info; (void)signo; -#if HAVE_PSIGINFO - psiginfo(info, const_cast(msg)); -#elif HAVE_PSIGNAL - psignal(signo, msg); -#else char buf[64]; - size_t len; #if HAVE_STRSIGNAL snprintf(buf, sizeof(buf), "%s: received signal %d (%s)\n", msg, signo, strsignal(signo)); @@ -176,8 +170,8 @@ signal_format_siginfo(int signo, siginfo_t *info, const char *msg) snprintf(buf, sizeof(buf), "%s: received signal %d\n", msg, signo); #endif - write(STDERR_FILENO, buf, strlen(buf)); -#endif + ssize_t ignored = write(STDERR_FILENO, buf, strlen(buf)); + (void)ignored; // because gcc and glibc are stupid, "(void)write(...)" doesn't suffice. } void diff --git a/lib/ts/test_Map.cc b/lib/ts/test_Map.cc index 4ec0713ae48..8ef7d24b8e6 100644 --- a/lib/ts/test_Map.cc +++ b/lib/ts/test_Map.cc @@ -20,19 +20,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "ts/Map.h" #include -typedef const char cchar; +using cchar = const char; struct Item { LINK(Item, m_link); struct Hash { - typedef uint32_t ID; - typedef uint32_t Key; - typedef Item Value; - typedef DList(Item, m_link) ListHead; + using ID = uint32_t; + using Key = uint32_t; + using Value = Item; + using ListHead = DLL; static ID hash(Key key) @@ -61,7 +61,7 @@ Item::Hash::equal(Key lhs, Key rhs) return lhs == rhs; } -typedef TSHashTable Table; +using Table = TSHashTable; void test_TSHashTable() diff --git a/lib/ts/test_MemView.cc b/lib/ts/test_MemView.cc new file mode 100644 index 00000000000..efe7b550e6c --- /dev/null +++ b/lib/ts/test_MemView.cc @@ -0,0 +1,96 @@ +/** @file + + MemView testing. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +using namespace ts; + +template +bool +CheckEqual(T const &lhs, S const &rhs, std::string const &prefix) +{ + bool zret = lhs == rhs; + if (!zret) { + std::cout << "FAIL: " << prefix << ": Expected " << lhs << " to be " << rhs << std::endl; + } + return zret; +} + +bool +Test_1() +{ + std::string text = "01234567"; + StringView a(text); + + std::cout << "Text = |" << a << '|' << std::endl; + std::cout << " = |" << std::setw(5) << a << '|' << std::endl; + std::cout << " = |" << std::setw(12) << a << '|' << std::endl; + std::cout << " = |" << std::setw(12) << std::right << a << '|' << std::endl; + std::cout << " = |" << std::setw(12) << std::left << a << '|' << std::endl; + std::cout << " = |" << std::setw(12) << std::right << std::setfill('_') << a << '|' << std::endl; + std::cout << " = |" << std::setw(12) << std::left << std::setfill('_') << a << '|' << std::endl; + return true; +} + +bool +Test_2() +{ + bool zret = true; + StringView sva("litt\0ral"); + StringView svb("litt\0ral", StringView::literal); + StringView svc("litt\0ral", StringView::array); + + zret = zret && CheckEqual(sva.size(), 4U, "strlen constructor"); + zret = zret && CheckEqual(svb.size(), 8U, "literal constructor"); + zret = zret && CheckEqual(svc.size(), 9U, "array constructor"); + + return zret; +} + +// These tests are purely compile time. +void +Test_Compile() +{ + int i[12]; + char c[29]; + void *x = i, *y = i + 12; + MemView mvi(i, i + 12); + MemView mci(c, c + 29); + MemView mcv(x, y); +} + +int +main(int, char *argv[]) +{ + bool zret = true; + + zret = zret && Test_1(); + zret = zret && Test_2(); + + return zret ? 0 : 1; +} diff --git a/lib/ts/test_PriorityQueue.cc b/lib/ts/test_PriorityQueue.cc index 21f1b21fc27..73b20f8ced6 100644 --- a/lib/ts/test_PriorityQueue.cc +++ b/lib/ts/test_PriorityQueue.cc @@ -22,7 +22,8 @@ */ #include -#include +#include +#include #include @@ -33,7 +34,7 @@ using namespace std; class N { public: - N(uint32_t w, string c) : weight(w), content(c) {} + N(uint32_t w, string c) : weight(w), content(std::move(c)) {} bool operator<(const N &n) const { @@ -44,8 +45,8 @@ class N string content; }; -typedef PriorityQueueEntry Entry; -typedef PriorityQueue PQ; +using Entry = PriorityQueueEntry; +using PQ = PriorityQueue; // For debug void diff --git a/lib/ts/test_Ptr.cc b/lib/ts/test_Ptr.cc index eb79036fd77..9392bf2e81e 100644 --- a/lib/ts/test_Ptr.cc +++ b/lib/ts/test_Ptr.cc @@ -24,7 +24,7 @@ struct PtrObject : RefCountObj { PtrObject(unsigned *_c) : count(_c) { ++(*count); } - ~PtrObject() { --(*count); } + ~PtrObject() override { --(*count); } unsigned *count; }; diff --git a/lib/ts/test_Vec.cc b/lib/ts/test_Vec.cc index c1dc5e936a4..8109999fd15 100644 --- a/lib/ts/test_Vec.cc +++ b/lib/ts/test_Vec.cc @@ -22,8 +22,8 @@ /* UnionFind after Tarjan */ -#include -#include +#include +#include #include "ts/ink_assert.h" #include "ts/Vec.h" diff --git a/lib/ts/test_arena.cc b/lib/ts/test_arena.cc index 89abb2cb30c..dbc0fadbbf8 100644 --- a/lib/ts/test_arena.cc +++ b/lib/ts/test_arena.cc @@ -34,7 +34,7 @@ ****************************************************************************/ #include "ts/Arena.h" -#include +#include void fill_test_data(char *ptr, int size, int seed) diff --git a/lib/ts/test_atomic.cc b/lib/ts/test_atomic.cc index a02a91aaa82..90f234722b0 100644 --- a/lib/ts/test_atomic.cc +++ b/lib/ts/test_atomic.cc @@ -22,8 +22,8 @@ */ #include -#include -#include +#include +#include #include #include diff --git a/lib/ts/test_freelist.cc b/lib/ts/test_freelist.cc index 33d8de058d0..19d269b514c 100644 --- a/lib/ts/test_freelist.cc +++ b/lib/ts/test_freelist.cc @@ -21,8 +21,8 @@ limitations under the License. */ -#include -#include +#include +#include #include "ts/ink_thread.h" #include "ts/ink_queue.h" diff --git a/lib/tsconfig/TsBuilder.cc b/lib/tsconfig/TsBuilder.cc index f9af31947f2..56a4e966ce8 100644 --- a/lib/tsconfig/TsBuilder.cc +++ b/lib/tsconfig/TsBuilder.cc @@ -25,7 +25,7 @@ # include "TsErrataUtil.h" # include "TsConfigLexer.h" # include "TsConfigGrammar.hpp" -# include +# include // Prefix for text of our messages. # define PRE "Configuration Parser: " diff --git a/lib/tsconfig/TsErrataUtil.cc b/lib/tsconfig/TsErrataUtil.cc index fe24d50769c..d9b3373d937 100644 --- a/lib/tsconfig/TsErrataUtil.cc +++ b/lib/tsconfig/TsErrataUtil.cc @@ -22,11 +22,11 @@ */ # if !defined(_MSC_VER) -# include -# include +# include +# include # endif -# include -# include +# include +# include # include # include "ts/ink_string.h" # include "ts/ink_defs.h" diff --git a/lib/tsconfig/TsValue.cc b/lib/tsconfig/TsValue.cc index b9d9b7c83d2..d8770908711 100644 --- a/lib/tsconfig/TsValue.cc +++ b/lib/tsconfig/TsValue.cc @@ -27,8 +27,8 @@ # include # include -# include -# include +# include +# include # if !defined(_MSC_VER) # define _fileno fileno @@ -55,8 +55,8 @@ unsigned int const detail::Type_Property[N_VALUE_TYPES] = { // --------------------------------------------------------------------------- detail::ValueTableImpl::ValueTableImpl() : _generation(0) { } detail::ValueTableImpl::~ValueTableImpl() { - for ( BufferGroup::iterator spot(_buffers.begin()), limit(_buffers.end()) ; spot != limit ; ++spot) - free(spot->_ptr); + for (auto & _buffer : _buffers) + free(_buffer._ptr); } // --------------------------------------------------------------------------- detail::ValueTable::ImplType* @@ -135,9 +135,9 @@ Value::operator [] (ConstBuffer const& name) const { Value zret; detail::ValueItem const* item = this->item(); if (item) { - for ( detail::ValueItem::ChildGroup::const_iterator spot = item->_children.begin(), limit = item->_children.end(); spot != limit; ++spot ) { - if (_config._table[*spot]._name == name) { - zret = Value(_config, *spot); + for (const auto & spot : item->_children) { + if (_config._table[spot]._name == name) { + zret = Value(_config, spot); if (PathValue == zret.getType()) zret = _config.getRoot().find(_config._table[zret._vidx]._path); break; } diff --git a/lib/tsconfig/test-tsconfig.cc b/lib/tsconfig/test-tsconfig.cc index 5995e9f455d..8b507c32ebb 100644 --- a/lib/tsconfig/test-tsconfig.cc +++ b/lib/tsconfig/test-tsconfig.cc @@ -19,7 +19,7 @@ */ # include "tsconfig/TsValue.h" -# include +# include # include using ts::config::Configuration; diff --git a/mgmt/FileManager.cc b/mgmt/FileManager.cc index 656a16f3d94..bdbd23c7f6c 100644 --- a/mgmt/FileManager.cc +++ b/mgmt/FileManager.cc @@ -36,7 +36,7 @@ #define DIR_MODE S_IRWXU #define FILE_MODE S_IRWXU -typedef fileEntry snapshot; +using snapshot = fileEntry; FileManager::FileManager() { @@ -180,7 +180,7 @@ FileManager::fileChanged(const char *fileName, bool incVersion) { callbackListable *cb; char *filenameCopy; - + Debug("lm", "filename changed %s", fileName); ink_mutex_acquire(&cbListLock); for (cb = cblist.head; cb != nullptr; cb = cb->link.next) { @@ -667,6 +667,7 @@ FileManager::rereadConfig() if (found && enabled) { fileChanged("proxy.config.body_factory.template_sets_dir", true); } + fileChanged("proxy.config.ssl.server.ticket_key.filename", true); } bool diff --git a/mgmt/LocalManager.cc b/mgmt/LocalManager.cc index 835c0f4a54b..b27dd7bcf75 100644 --- a/mgmt/LocalManager.cc +++ b/mgmt/LocalManager.cc @@ -244,7 +244,7 @@ LocalManager::LocalManager(bool proxy_on) : BaseManager(), run_proxy(proxy_on), proxy_name = REC_readString("proxy.config.proxy_name", &found); proxy_binary = REC_readString("proxy.config.proxy_binary", &found); env_prep = REC_readString("proxy.config.env_prep", &found); - proxy_options = NULL; + proxy_options = nullptr; // Calculate proxy_binary from the absolute bin_path absolute_proxy_binary = Layout::relative_to(bindir, proxy_binary); @@ -389,7 +389,7 @@ LocalManager::pollMgmtProcessServer() struct timeval timeout; fd_set fdlist; - while (1) { + while (true) { #if TS_HAS_WCCP int wccp_fd = wccp_cache.getSocket(); #endif @@ -700,7 +700,7 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh) case MGMT_EVENT_CONFIG_FILE_UPDATE: case MGMT_EVENT_CONFIG_FILE_UPDATE_NO_INC_VERSION: bool found; - char *fname; + char *fname = nullptr; Rollback *rb; char *data_raw; @@ -714,9 +714,11 @@ LocalManager::sendMgmtMsgToProcesses(MgmtMessageHdr *mh) mgmt_log("[LocalManager:sendMgmtMsgToProcesses] Unknown file change: '%s'\n", data_raw); } ink_assert(found); - if (!(configFiles && configFiles->getRollbackObj(fname, &rb)) && + + if (!(fname && configFiles && configFiles->getRollbackObj(fname, &rb)) && (strcmp(data_raw, "proxy.config.cluster.cluster_configuration") != 0) && - (strcmp(data_raw, "proxy.config.body_factory.template_sets_dir") != 0)) { + (strcmp(data_raw, "proxy.config.body_factory.template_sets_dir") != 0) && + (strcmp(data_raw, "proxy.config.ssl.server.ticket_key.filename") != 0)) { mgmt_fatal(0, "[LocalManager::sendMgmtMsgToProcesses] " "Invalid 'data_raw' for MGMT_EVENT_CONFIG_FILE_UPDATE\n"); } @@ -854,7 +856,7 @@ LocalManager::processEventQueue() ink_assert(enqueue(mgmt_event_queue, mh)); return; } - Debug("lm", "[TrafficManager] ==> Sending signal event '%d' payload=%d", mh->msg_id, mh->data_len); + Debug("lm", "[TrafficManager] ==> Sending signal event '%d' %s payload=%d", mh->msg_id, data_raw, mh->data_len); lmgmt->sendMgmtMsgToProcesses(mh); } ats_free(mh); @@ -946,7 +948,7 @@ LocalManager::startProxy(const char *onetime_options) } // Make sure we're starting the proxy in mgmt mode - if (strstr(real_proxy_options, MGMT_OPT) == 0) { + if (strstr(real_proxy_options, MGMT_OPT) == nullptr) { ink_strlcat(real_proxy_options, " ", OPTIONS_SIZE); ink_strlcat(real_proxy_options, MGMT_OPT, OPTIONS_SIZE); } @@ -1035,9 +1037,9 @@ LocalManager::listenForProxy() if ((listen(p.m_fd, backlog)) < 0) { mgmt_fatal(errno, "[LocalManager::listenForProxy] Unable to listen on port: %d (%s)\n", p.m_port, - ats_ip_family_name(p.m_family)); + ats_ip_family_name(p.m_family).ptr()); } - mgmt_log("[LocalManager::listenForProxy] Listening on port: %d (%s)\n", p.m_port, ats_ip_family_name(p.m_family)); + mgmt_log("[LocalManager::listenForProxy] Listening on port: %d (%s)\n", p.m_port, ats_ip_family_name(p.m_family).ptr()); } return; } diff --git a/mgmt/ProcessManager.cc b/mgmt/ProcessManager.cc index 78870a1740d..cd7c714cefd 100644 --- a/mgmt/ProcessManager.cc +++ b/mgmt/ProcessManager.cc @@ -260,7 +260,9 @@ ProcessManager::pollLMConnection() // handle EOF if (res == 0) { close_socket(local_manager_sockfd); - mgmt_fatal(0, "[ProcessManager::pollLMConnection] Lost Manager EOF!"); + if (!shutdown_event_system) { + mgmt_fatal(0, "[ProcessManager::pollLMConnection] Lost Manager EOF!"); + } } } else if (num < 0) { /* Error */ mgmt_log("[ProcessManager::pollLMConnection] select failed or was interrupted (%d)\n", errno); diff --git a/mgmt/ProxyConfig.cc b/mgmt/ProxyConfig.cc index 23cf750ae51..22d955cbc00 100644 --- a/mgmt/ProxyConfig.cc +++ b/mgmt/ProxyConfig.cc @@ -254,7 +254,7 @@ struct RegressionConfig : public ConfigInfo { ink_atomic_increment(&nobjects, 1); } - ~RegressionConfig() + ~RegressionConfig() override { TestBox box(this->test, this->pstatus); @@ -281,7 +281,7 @@ volatile int RegressionConfig::nobjects = 0; struct ProxyConfig_Set_Completion { ProxyConfig_Set_Completion(int _id, RegressionConfig *_c) : configid(_id), config(_c) {} void - operator()(void) const + operator()() const { // Push one more RegressionConfig to force the LAST-tagged one to get destroyed. rprintf(config->test, "setting LAST config object %p\n", config); @@ -315,7 +315,7 @@ EXCLUSIVE_REGRESSION_TEST(ProxyConfig_Set)(RegressionTest *test, int /* atype AT struct ProxyConfig_Release_Completion { ProxyConfig_Release_Completion(int _id, RegressionConfig *_c) : configid(_id), config(_c) {} void - operator()(void) const + operator()() const { // Release the reference count. Since we were keeping this alive, it should be the last to die. configProcessor.release(configid, config); diff --git a/mgmt/ProxyConfig.h b/mgmt/ProxyConfig.h index 9247b3b9dab..0c9431efdc8 100644 --- a/mgmt/ProxyConfig.h +++ b/mgmt/ProxyConfig.h @@ -35,6 +35,7 @@ #include "ts/ink_memory.h" #include "ProcessManager.h" #include "I_EventSystem.h" +#include "I_Tasks.h" class ProxyMutex; @@ -111,7 +112,7 @@ template int ConfigScheduleUpdate(Ptr &mutex) { - eventProcessor.schedule_imm(new ConfigUpdateContinuation(mutex), ET_CALL); + eventProcessor.schedule_imm(new ConfigUpdateContinuation(mutex), ET_TASK); return 0; } diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc index 3176f13b889..54f28ad4921 100644 --- a/mgmt/RecordsConfig.cc +++ b/mgmt/RecordsConfig.cc @@ -259,7 +259,7 @@ static const RecordElement RecordsConfig[] = , {RECT_CONFIG, "proxy.config.cluster.mcport", RECD_INT, "8089", RECU_DYNAMIC, RR_REQUIRED, RECC_NULL, nullptr, RECA_NULL} , - {RECT_CONFIG, "proxy.config.cluster.mc_group_addr", RECD_STRING, "224.0.1.37", RECU_DYNAMIC, RR_REQUIRED, RECC_IP, "[0-255]\\.[0-255]\\.[0-255]\\.[0-255]", RECA_NULL} + {RECT_CONFIG, "proxy.config.cluster.mc_group_addr", RECD_STRING, "224.0.1.37", RECU_DYNAMIC, RR_REQUIRED, RECC_IP, R"([0-255]\.[0-255]\.[0-255]\.[0-255])", RECA_NULL} , {RECT_CONFIG, "proxy.config.cluster.mc_ttl", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} , @@ -473,6 +473,8 @@ static const RecordElement RecordsConfig[] = , {RECT_CONFIG, "proxy.config.http.attach_server_session_to_client", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} , + {RECT_CONFIG, "proxy.config.http.safe_requests_retryable", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} + , {RECT_CONFIG, "proxy.config.net.max_connections_in", RECD_INT, "30000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} , {RECT_CONFIG, "proxy.config.net.max_connections_active_in", RECD_INT, "10000", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} @@ -512,6 +514,8 @@ static const RecordElement RecordsConfig[] = , {RECT_CONFIG, "proxy.config.http.parent_proxy.connect_attempts_timeout", RECD_INT, "30", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} , + {RECT_CONFIG, "proxy.config.http.parent_proxy.mark_down_hostdb", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} + , {RECT_CONFIG, "proxy.config.http.forward.proxy_auth_to_parent", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} , @@ -945,9 +949,9 @@ static const RecordElement RecordsConfig[] = // # alternatively: 20971520 (20MB) {RECT_CONFIG, "proxy.config.cache.ram_cache.size", RECD_INT, "-1", RECU_RESTART_TS, RR_NULL, RECC_STR, "^-?[0-9]+$", RECA_NULL} , - {RECT_CONFIG, "proxy.config.cache.ram_cache.algorithm", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} + {RECT_CONFIG, "proxy.config.cache.ram_cache.algorithm", RECD_INT, "1", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} , - {RECT_CONFIG, "proxy.config.cache.ram_cache.use_seen_filter", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} + {RECT_CONFIG, "proxy.config.cache.ram_cache.use_seen_filter", RECD_INT, "1", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_NULL} , {RECT_CONFIG, "proxy.config.cache.ram_cache.compress", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-3]", RECA_NULL} , @@ -1293,7 +1297,7 @@ static const RecordElement RecordsConfig[] = , {RECT_CONFIG, "proxy.config.ssl.wire_trace_enabled", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-2]", RECA_NULL} , - {RECT_CONFIG, "proxy.config.ssl.wire_trace_addr", RECD_STRING, nullptr , RECU_DYNAMIC, RR_NULL, RECC_IP, "[0-255]\\.[0-255]\\.[0-255]\\.[0-255]", RECA_NULL} + {RECT_CONFIG, "proxy.config.ssl.wire_trace_addr", RECD_STRING, nullptr , RECU_DYNAMIC, RR_NULL, RECC_IP, R"([0-255]\.[0-255]\.[0-255]\.[0-255])", RECA_NULL} , {RECT_CONFIG, "proxy.config.ssl.wire_trace_percentage", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_INT, "[0-100]", RECA_NULL} , diff --git a/mgmt/WebMgmtUtils.cc b/mgmt/WebMgmtUtils.cc index 3d9120d58ec..b71c9ecc6f5 100644 --- a/mgmt/WebMgmtUtils.cc +++ b/mgmt/WebMgmtUtils.cc @@ -1181,7 +1181,7 @@ recordIPCheck(const char *pattern, const char *value) // int result; bool check; const char *range_pattern = - "\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]\\\\\\.\\[[0-9]+\\-[0-9]+\\]"; + R"(\[[0-9]+\-[0-9]+\]\\\.\[[0-9]+\-[0-9]+\]\\\.\[[0-9]+\-[0-9]+\]\\\.\[[0-9]+\-[0-9]+\])"; const char *ip_pattern = "[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9].[0-9]*[0-9]*[0-9]"; Tokenizer dotTok1("."); diff --git a/mgmt/api/APITestCliRemote.cc b/mgmt/api/APITestCliRemote.cc index 3e8547cd197..7efbeb84d9e 100644 --- a/mgmt/api/APITestCliRemote.cc +++ b/mgmt/api/APITestCliRemote.cc @@ -95,9 +95,9 @@ #include "ts/ink_config.h" #include "ts/ink_defs.h" -#include -#include -#include +#include +#include +#include #include #include "ts/ink_string.h" @@ -939,7 +939,7 @@ reconfigure() * tests if correct action need is returned when requested record is set */ void -test_action_need(void) +test_action_need() { TSActionNeedT action; @@ -1235,7 +1235,7 @@ test_rec_get(char *rec_name) * get list of records */ void -test_record_get_mlt(void) +test_record_get_mlt() { TSRecordEle *rec_ele; TSStringList name_list; @@ -1323,7 +1323,7 @@ test_record_get_mlt(void) * Also checks to make sure correct action_need type is set. */ void -test_record_set_mlt(void) +test_record_set_mlt() { TSList list; TSRecordEle *ele1, *ele2, *ele3, *ele4, *ele5; @@ -2025,7 +2025,7 @@ eventCallbackFn(char *name, char *msg, int /* pri ATS_UNUSED */, void * /* data * that for any event that's signalled, the callback fn will also be called */ void -register_event_callback(void) +register_event_callback() { TSMgmtError err; @@ -2042,7 +2042,7 @@ register_event_callback(void) * event called, the eventCallbackFn will NOT be called */ void -unregister_event_callback(void) +unregister_event_callback() { TSMgmtError err; @@ -2292,7 +2292,7 @@ runInteractive() char buf[512]; // holds request from interactive prompt // process input from command line - while (1) { + while (true) { // Display a prompt printf("api_cli-> "); diff --git a/mgmt/api/CfgContextUtils.cc b/mgmt/api/CfgContextUtils.cc index 1dea013563c..72e74095b43 100644 --- a/mgmt/api/CfgContextUtils.cc +++ b/mgmt/api/CfgContextUtils.cc @@ -998,7 +998,7 @@ pdest_sspec_to_string(TSPrimeDestT pd, char *pd_val, TSSspec *sspec) } } } - } while (0); + } while (false); str = ats_strdup(buf); return str; diff --git a/mgmt/api/CoreAPIRemote.cc b/mgmt/api/CoreAPIRemote.cc index 26f051216ec..32e7ac14d7c 100644 --- a/mgmt/api/CoreAPIRemote.cc +++ b/mgmt/api/CoreAPIRemote.cc @@ -80,7 +80,7 @@ send_and_parse_list(OpType op, LLQ *list) Tokenizer tokens(REMOTE_DELIM_STR); tok_iter_state i_state; - MgmtMarshallInt optype = op; + OpType optype = op; MgmtMarshallInt err; MgmtMarshallData reply = {nullptr, 0}; MgmtMarshallString strval = nullptr; @@ -142,7 +142,7 @@ mgmt_record_set(const char *rec_name, const char *rec_val, TSActionNeedT *action { TSMgmtError ret; - MgmtMarshallInt optype = RECORD_SET; + OpType optype = OpType::RECORD_SET; MgmtMarshallString name = const_cast(rec_name); MgmtMarshallString value = const_cast(rec_val); @@ -157,7 +157,7 @@ mgmt_record_set(const char *rec_name, const char *rec_val, TSActionNeedT *action *action_need = TS_ACTION_UNDEFINED; // create and send request - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECORD_SET, &optype, &name, &value); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECORD_SET, &optype, &name, &value); if (ret != TS_ERR_OKAY) { return ret; } @@ -167,7 +167,7 @@ mgmt_record_set(const char *rec_name, const char *rec_val, TSActionNeedT *action return ret; } - ret = recv_mgmt_response(reply.ptr, reply.len, RECORD_SET, &err, &action); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::RECORD_SET, &err, &action); ats_free(reply.ptr); if (ret != TS_ERR_OKAY) { @@ -298,12 +298,12 @@ TSProxyStateT ProxyStateGet() { TSMgmtError ret; - MgmtMarshallInt optype = PROXY_STATE_GET; + OpType optype = OpType::PROXY_STATE_GET; MgmtMarshallData reply = {nullptr, 0}; MgmtMarshallInt err; MgmtMarshallInt state; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, PROXY_STATE_GET, &optype); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::PROXY_STATE_GET, &optype); if (ret != TS_ERR_OKAY) { return TS_PROXY_UNDEFINED; } @@ -313,7 +313,7 @@ ProxyStateGet() return TS_PROXY_UNDEFINED; } - ret = recv_mgmt_response(reply.ptr, reply.len, PROXY_STATE_GET, &err, &state); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::PROXY_STATE_GET, &err, &state); ats_free(reply.ptr); if (ret != TS_ERR_OKAY || err != TS_ERR_OKAY) { @@ -327,12 +327,12 @@ TSMgmtError ProxyStateSet(TSProxyStateT state, TSCacheClearT clear) { TSMgmtError ret; - MgmtMarshallInt optype = PROXY_STATE_SET; + OpType optype = OpType::PROXY_STATE_SET; MgmtMarshallInt pstate = state; MgmtMarshallInt pclear = clear; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, PROXY_STATE_SET, &optype, &pstate, &pclear); - return (ret == TS_ERR_OKAY) ? parse_generic_response(PROXY_STATE_SET, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::PROXY_STATE_SET, &optype, &pstate, &pclear); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::PROXY_STATE_SET, main_socket_fd) : ret; } TSMgmtError @@ -340,13 +340,13 @@ ServerBacktrace(unsigned options, char **trace) { ink_release_assert(trace != nullptr); TSMgmtError ret; - MgmtMarshallInt optype = SERVER_BACKTRACE; MgmtMarshallInt err; + OpType optype = OpType::SERVER_BACKTRACE; MgmtMarshallInt flags = options; MgmtMarshallData reply = {nullptr, 0}; MgmtMarshallString strval = nullptr; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, SERVER_BACKTRACE, &optype, &flags); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::SERVER_BACKTRACE, &optype, &flags); if (ret != TS_ERR_OKAY) { goto fail; } @@ -356,7 +356,7 @@ ServerBacktrace(unsigned options, char **trace) goto fail; } - ret = recv_mgmt_response(reply.ptr, reply.len, SERVER_BACKTRACE, &err, &strval); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::SERVER_BACKTRACE, &err, &strval); if (ret != TS_ERR_OKAY) { goto fail; } @@ -380,10 +380,10 @@ TSMgmtError Reconfigure() { TSMgmtError ret; - MgmtMarshallInt optype = RECONFIGURE; + OpType optype = OpType::RECONFIGURE; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECONFIGURE, &optype); - return (ret == TS_ERR_OKAY) ? parse_generic_response(RECONFIGURE, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECONFIGURE, &optype); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::RECONFIGURE, main_socket_fd) : ret; } /*------------------------------------------------------------------------- @@ -399,15 +399,15 @@ TSMgmtError Restart(unsigned options) { TSMgmtError ret; - MgmtMarshallInt optype = RESTART; - MgmtMarshallInt oval = options; + OpType optype = OpType::RESTART; + MgmtMarshallInt oval = options; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RESTART, &optype, &oval); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RESTART, &optype, &oval); if (ret != TS_ERR_OKAY) { return ret; } - ret = parse_generic_response(RESTART, main_socket_fd); + ret = parse_generic_response(OpType::RESTART, main_socket_fd); if (ret == TS_ERR_OKAY) { ret = reconnect_loop(MAX_CONN_TRIES); } @@ -424,12 +424,12 @@ TSMgmtError Bounce(unsigned options) { TSMgmtError ret; - MgmtMarshallInt optype = BOUNCE; - MgmtMarshallInt oval = options; + OpType optype = OpType::BOUNCE; + MgmtMarshallInt oval = options; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, BOUNCE, &optype, &oval); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::BOUNCE, &optype, &oval); - return (ret == TS_ERR_OKAY) ? parse_generic_response(BOUNCE, main_socket_fd) : ret; + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::BOUNCE, main_socket_fd) : ret; } /*------------------------------------------------------------------------- @@ -441,11 +441,11 @@ TSMgmtError StorageDeviceCmdOffline(const char *dev) { TSMgmtError ret; - MgmtMarshallInt optype = STORAGE_DEVICE_CMD_OFFLINE; + OpType optype = OpType::STORAGE_DEVICE_CMD_OFFLINE; MgmtMarshallString name = const_cast(dev); - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, STORAGE_DEVICE_CMD_OFFLINE, &optype, &name); - return (ret == TS_ERR_OKAY) ? parse_generic_response(STORAGE_DEVICE_CMD_OFFLINE, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::STORAGE_DEVICE_CMD_OFFLINE, &optype, &name); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::STORAGE_DEVICE_CMD_OFFLINE, main_socket_fd) : ret; } /*------------------------------------------------------------------------- @@ -457,12 +457,12 @@ TSMgmtError LifecycleMessage(const char *tag, void const *data, size_t data_size) { TSMgmtError ret; - MgmtMarshallInt optype = LIFECYCLE_MESSAGE; + OpType optype = OpType::LIFECYCLE_MESSAGE; MgmtMarshallString mtag = const_cast(tag); MgmtMarshallData mdata = {const_cast(data), data_size}; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, LIFECYCLE_MESSAGE, &optype, &mtag, &mdata); - return (ret == TS_ERR_OKAY) ? parse_generic_response(LIFECYCLE_MESSAGE, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::LIFECYCLE_MESSAGE, &optype, &mtag, &mdata); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::LIFECYCLE_MESSAGE, main_socket_fd) : ret; } /*************************************************************************** @@ -567,8 +567,8 @@ mgmt_record_describe_reply(TSConfigRecordDescription *val) MgmtMarshallInt checktype; MgmtMarshallInt source; - ret = recv_mgmt_response(reply.ptr, reply.len, RECORD_DESCRIBE_CONFIG, &err, &name, &value, &deflt, &rtype, &rclass, &version, - &rsb, &order, &access, &update, &updatetype, &checktype, &source, &expr); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::RECORD_DESCRIBE_CONFIG, &err, &name, &value, &deflt, &rtype, &rclass, + &version, &rsb, &order, &access, &update, &updatetype, &checktype, &source, &expr); ats_free(reply.ptr); @@ -611,7 +611,7 @@ TSMgmtError MgmtRecordGet(const char *rec_name, TSRecordEle *rec_ele) { TSMgmtError ret; - MgmtMarshallInt optype = RECORD_GET; + OpType optype = OpType::RECORD_GET; MgmtMarshallString record = const_cast(rec_name); if (!rec_name || !rec_ele) { @@ -619,20 +619,20 @@ MgmtRecordGet(const char *rec_name, TSRecordEle *rec_ele) } // create and send request - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECORD_GET, &optype, &record); - return (ret == TS_ERR_OKAY) ? mgmt_record_get_reply(RECORD_GET, rec_ele) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECORD_GET, &optype, &record); + return (ret == TS_ERR_OKAY) ? mgmt_record_get_reply(OpType::RECORD_GET, rec_ele) : ret; } TSMgmtError MgmtConfigRecordDescribeMatching(const char *rec_name, unsigned options, TSList rec_vals) { TSMgmtError ret; - MgmtMarshallInt optype = RECORD_DESCRIBE_CONFIG; + OpType optype = OpType::RECORD_DESCRIBE_CONFIG; MgmtMarshallInt flags = options | RECORD_DESCRIBE_FLAGS_MATCH; MgmtMarshallString record = const_cast(rec_name); // create and send request - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECORD_DESCRIBE_CONFIG, &optype, &record, &flags); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECORD_DESCRIBE_CONFIG, &optype, &record, &flags); if (ret != TS_ERR_OKAY) { return ret; } @@ -673,12 +673,12 @@ TSMgmtError MgmtConfigRecordDescribe(const char *rec_name, unsigned options, TSConfigRecordDescription *val) { TSMgmtError ret; - MgmtMarshallInt optype = RECORD_DESCRIBE_CONFIG; + OpType optype = OpType::RECORD_DESCRIBE_CONFIG; MgmtMarshallInt flags = options & ~RECORD_DESCRIBE_FLAGS_MATCH; MgmtMarshallString record = const_cast(rec_name); // create and send request - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECORD_DESCRIBE_CONFIG, &optype, &record, &flags); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECORD_DESCRIBE_CONFIG, &optype, &record, &flags); if (ret != TS_ERR_OKAY) { return ret; } @@ -692,14 +692,14 @@ MgmtRecordGetMatching(const char *regex, TSList rec_vals) TSMgmtError ret; TSRecordEle *rec_ele; - MgmtMarshallInt optype = RECORD_MATCH_GET; + OpType optype = OpType::RECORD_MATCH_GET; MgmtMarshallString record = const_cast(regex); if (!regex || !rec_vals) { return TS_ERR_PARAMS; } - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, RECORD_MATCH_GET, &optype, &record); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::RECORD_MATCH_GET, &optype, &record); if (ret != TS_ERR_OKAY) { return ret; } @@ -708,7 +708,7 @@ MgmtRecordGetMatching(const char *regex, TSList rec_vals) rec_ele = TSRecordEleCreate(); // parse the reply to get record value and type - ret = mgmt_record_get_reply(RECORD_MATCH_GET, rec_ele); + ret = mgmt_record_get_reply(OpType::RECORD_MATCH_GET, rec_ele); if (ret != TS_ERR_OKAY) { TSRecordEleDestroy(rec_ele); goto fail; @@ -833,8 +833,8 @@ TSMgmtError ReadFile(TSFileNameT file, char **text, int *size, int *version) { TSMgmtError ret; - MgmtMarshallInt optype = FILE_READ; - MgmtMarshallInt fid = file; + OpType optype = OpType::FILE_READ; + MgmtMarshallInt fid = file; MgmtMarshallData reply = {nullptr, 0}; MgmtMarshallInt err; @@ -844,7 +844,7 @@ ReadFile(TSFileNameT file, char **text, int *size, int *version) *text = nullptr; *size = *version = 0; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, FILE_READ, &optype, &fid); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::FILE_READ, &optype, &fid); if (ret != TS_ERR_OKAY) { return ret; } @@ -854,7 +854,7 @@ ReadFile(TSFileNameT file, char **text, int *size, int *version) return ret; } - ret = recv_mgmt_response(reply.ptr, reply.len, FILE_READ, &err, &vers, &data); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::FILE_READ, &err, &vers, &data); ats_free(reply.ptr); if (ret != TS_ERR_OKAY) { @@ -890,13 +890,13 @@ WriteFile(TSFileNameT file, const char *text, int size, int version) { TSMgmtError ret; - MgmtMarshallInt optype = FILE_WRITE; - MgmtMarshallInt fid = file; - MgmtMarshallInt vers = version; - MgmtMarshallData data = {(void *)text, (size_t)size}; + OpType optype = OpType::FILE_WRITE; + MgmtMarshallInt fid = file; + MgmtMarshallInt vers = version; + MgmtMarshallData data = {(void *)text, (size_t)size}; - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, FILE_WRITE, &optype, &fid, &vers, &data); - return (ret == TS_ERR_OKAY) ? parse_generic_response(FILE_WRITE, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::FILE_WRITE, &optype, &fid, &vers, &data); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::FILE_WRITE, main_socket_fd) : ret; } /*************************************************************************** @@ -924,15 +924,15 @@ TSMgmtError EventResolve(const char *event_name) { TSMgmtError ret; - MgmtMarshallInt optype = EVENT_RESOLVE; + OpType optype = OpType::EVENT_RESOLVE; MgmtMarshallString name = const_cast(event_name); if (!event_name) { return TS_ERR_PARAMS; } - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, EVENT_RESOLVE, &optype, &name); - return (ret == TS_ERR_OKAY) ? parse_generic_response(EVENT_RESOLVE, main_socket_fd) : ret; + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::EVENT_RESOLVE, &optype, &name); + return (ret == TS_ERR_OKAY) ? parse_generic_response(OpType::EVENT_RESOLVE, main_socket_fd) : ret; } /*------------------------------------------------------------------------- @@ -948,7 +948,7 @@ ActiveEventGetMlt(LLQ *active_events) return TS_ERR_PARAMS; } - return (send_and_parse_list(EVENT_GET_MLT, active_events)); + return (send_and_parse_list(OpType::EVENT_GET_MLT, active_events)); } /*------------------------------------------------------------------------- @@ -960,7 +960,7 @@ TSMgmtError EventIsActive(const char *event_name, bool *is_current) { TSMgmtError ret; - MgmtMarshallInt optype = EVENT_ACTIVE; + OpType optype = OpType::EVENT_ACTIVE; MgmtMarshallString name = const_cast(event_name); MgmtMarshallData reply = {nullptr, 0}; @@ -972,7 +972,7 @@ EventIsActive(const char *event_name, bool *is_current) } // create and send request - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, EVENT_ACTIVE, &optype, &name); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::EVENT_ACTIVE, &optype, &name); if (ret != TS_ERR_OKAY) { return ret; } @@ -982,7 +982,7 @@ EventIsActive(const char *event_name, bool *is_current) return ret; } - ret = recv_mgmt_response(reply.ptr, reply.len, EVENT_ACTIVE, &err, &bval); + ret = recv_mgmt_response(reply.ptr, reply.len, OpType::EVENT_ACTIVE, &err, &bval); ats_free(reply.ptr); if (ret != TS_ERR_OKAY) { @@ -1022,10 +1022,10 @@ EventSignalCbRegister(const char *event_name, TSEventSignalFunc func, void *data // if we need to notify traffic manager of the event then send msg if (first_time) { - MgmtMarshallInt optype = EVENT_REG_CALLBACK; + OpType optype = OpType::EVENT_REG_CALLBACK; MgmtMarshallString name = const_cast(event_name); - ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, EVENT_REG_CALLBACK, &optype, &name); + ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, OpType::EVENT_REG_CALLBACK, &optype, &name); if (ret != TS_ERR_OKAY) { return ret; } @@ -1080,7 +1080,7 @@ static TSMgmtError snapshot_message(OpType op, const char *snapshot_name) { TSMgmtError ret; - MgmtMarshallInt optype = op; + OpType optype = op; MgmtMarshallString name = const_cast(snapshot_name); if (!snapshot_name) { @@ -1094,19 +1094,19 @@ snapshot_message(OpType op, const char *snapshot_name) TSMgmtError SnapshotTake(const char *snapshot_name) { - return snapshot_message(SNAPSHOT_TAKE, snapshot_name); + return snapshot_message(OpType::SNAPSHOT_TAKE, snapshot_name); } TSMgmtError SnapshotRestore(const char *snapshot_name) { - return snapshot_message(SNAPSHOT_RESTORE, snapshot_name); + return snapshot_message(OpType::SNAPSHOT_RESTORE, snapshot_name); } TSMgmtError SnapshotRemove(const char *snapshot_name) { - return snapshot_message(SNAPSHOT_REMOVE, snapshot_name); + return snapshot_message(OpType::SNAPSHOT_REMOVE, snapshot_name); } TSMgmtError @@ -1116,15 +1116,15 @@ SnapshotGetMlt(LLQ *snapshots) return TS_ERR_PARAMS; } - return send_and_parse_list(SNAPSHOT_GET_MLT, snapshots); + return send_and_parse_list(OpType::SNAPSHOT_GET_MLT, snapshots); } TSMgmtError StatsReset(bool cluster, const char *stat_name) { TSMgmtError ret; - OpType op = cluster ? STATS_RESET_CLUSTER : STATS_RESET_NODE; - MgmtMarshallInt optype = op; + OpType op = cluster ? OpType::STATS_RESET_CLUSTER : OpType::STATS_RESET_NODE; + OpType optype = op; MgmtMarshallString name = const_cast(stat_name); ret = MGMTAPI_SEND_MESSAGE(main_socket_fd, op, &optype, &name); diff --git a/mgmt/api/EventCallback.cc b/mgmt/api/EventCallback.cc index 629dfaed2ad..6dee2652ff3 100644 --- a/mgmt/api/EventCallback.cc +++ b/mgmt/api/EventCallback.cc @@ -83,8 +83,8 @@ create_callback_table(const char *lock_name) { CallbackTable *cb_table = (CallbackTable *)ats_malloc(sizeof(CallbackTable)); - for (int i = 0; i < NUM_EVENTS; i++) { - cb_table->event_callback_l[i] = nullptr; + for (auto &i : cb_table->event_callback_l) { + i = nullptr; } // initialize the mutex @@ -110,15 +110,15 @@ delete_callback_table(CallbackTable *cb_table) ink_mutex_acquire(&cb_table->event_callback_lock); // for each event - for (int i = 0; i < NUM_EVENTS; i++) { - if (cb_table->event_callback_l[i]) { + for (auto &i : cb_table->event_callback_l) { + if (i) { // remove and delete each EventCallbackT for that event - while (!queue_is_empty(cb_table->event_callback_l[i])) { - event_cb = (EventCallbackT *)dequeue(cb_table->event_callback_l[i]); + while (!queue_is_empty(i)) { + event_cb = (EventCallbackT *)dequeue(i); delete_event_callback(event_cb); } - delete_queue(cb_table->event_callback_l[i]); + delete_queue(i); } } @@ -182,7 +182,7 @@ get_events_with_callbacks(CallbackTable *cb_table) TSMgmtError cb_table_register(CallbackTable *cb_table, const char *event_name, TSEventSignalFunc func, void *data, bool *first_cb) { - bool first_time = 0; + bool first_time = false; int id; EventCallbackT *event_cb; // create new EventCallbackT EACH TIME enqueue @@ -196,19 +196,19 @@ cb_table_register(CallbackTable *cb_table, const char *event_name, TSEventSignal // got lock, add it if (event_name == nullptr) { // register for all alarms // printf("[EventSignalCbRegister] Register callback for all alarms\n"); - for (int i = 0; i < NUM_EVENTS; i++) { - if (!cb_table->event_callback_l[i]) { - cb_table->event_callback_l[i] = create_queue(); - first_time = 1; + for (auto &i : cb_table->event_callback_l) { + if (!i) { + i = create_queue(); + first_time = true; } - if (!cb_table->event_callback_l[i]) { + if (!i) { ink_mutex_release(&cb_table->event_callback_lock); return TS_ERR_SYS_CALL; } event_cb = create_event_callback(func, data); - enqueue(cb_table->event_callback_l[i], event_cb); + enqueue(i, event_cb); } } else { // register callback for specific alarm // printf("[EventSignalCbRegister] Register callback for %s\n", event_name); @@ -216,7 +216,7 @@ cb_table_register(CallbackTable *cb_table, const char *event_name, TSEventSignal if (id != -1) { if (!cb_table->event_callback_l[id]) { cb_table->event_callback_l[id] = create_queue(); - first_time = 1; + first_time = true; } if (!cb_table->event_callback_l[id]) { @@ -262,27 +262,27 @@ cb_table_unregister(CallbackTable *cb_table, const char *event_name, TSEventSign // got lock, add it if (event_name == nullptr) { // unregister the callback for ALL EVENTS // for each event - for (int i = 0; i < NUM_EVENTS; i++) { - if (!cb_table->event_callback_l[i]) { // this event has no callbacks + for (auto &i : cb_table->event_callback_l) { + if (!i) { // this event has no callbacks continue; } // func == NULL means unregister all functions associated with alarm if (func == nullptr) { - while (!queue_is_empty(cb_table->event_callback_l[i])) { - event_cb = (EventCallbackT *)dequeue(cb_table->event_callback_l[i]); + while (!queue_is_empty(i)) { + event_cb = (EventCallbackT *)dequeue(i); delete_event_callback(event_cb); } // clean up queue and set to NULL - delete_queue(cb_table->event_callback_l[i]); - cb_table->event_callback_l[i] = nullptr; + delete_queue(i); + i = nullptr; } else { // only remove the func passed in int queue_depth; - queue_depth = queue_len(cb_table->event_callback_l[i]); + queue_depth = queue_len(i); // remove this function for (int j = 0; j < queue_depth; j++) { - event_cb = (EventCallbackT *)dequeue(cb_table->event_callback_l[i]); + event_cb = (EventCallbackT *)dequeue(i); cb_fun = event_cb->func; // the pointers are the same so don't enqueue the fn back on @@ -291,13 +291,13 @@ cb_table_unregister(CallbackTable *cb_table, const char *event_name, TSEventSign continue; } - enqueue(cb_table->event_callback_l[i], event_cb); + enqueue(i, event_cb); } // is queue empty now? then clean up - if (queue_is_empty(cb_table->event_callback_l[i])) { - delete_queue(cb_table->event_callback_l[i]); - cb_table->event_callback_l[i] = nullptr; + if (queue_is_empty(i)) { + delete_queue(i); + i = nullptr; } } } // end for (int i = 0; i < NUM_EVENTS; i++) { diff --git a/mgmt/api/EventControlMain.cc b/mgmt/api/EventControlMain.cc index 32e81bc3935..5fd9acf5e49 100644 --- a/mgmt/api/EventControlMain.cc +++ b/mgmt/api/EventControlMain.cc @@ -61,8 +61,8 @@ new_event_client() EventClientT *ele = (EventClientT *)ats_malloc(sizeof(EventClientT)); // now set the alarms registered section - for (int i = 0; i < NUM_EVENTS; i++) { - ele->events_registered[i] = 0; + for (bool &i : ele->events_registered) { + i = false; } ele->adr = (struct sockaddr *)ats_malloc(sizeof(struct sockaddr)); @@ -274,7 +274,7 @@ event_callback_main(void *arg) int fds_ready; // return value for select go here struct timeval timeout; - while (1) { + while (true) { // LINUX fix: to prevent hard-spin reset timeout on each loop timeout.tv_sec = 1; timeout.tv_usec = 0; @@ -391,11 +391,11 @@ event_callback_main(void *arg) while (con_entry) { client_entry = (EventClientT *)ink_hash_table_entry_value(accepted_clients, con_entry); if (client_entry->events_registered[event->id]) { - MgmtMarshallInt optype = EVENT_NOTIFY; + OpType optype = OpType::EVENT_NOTIFY; MgmtMarshallString name = event->name; MgmtMarshallString desc = event->description; - ret = send_mgmt_request(client_entry->fd, EVENT_NOTIFY, &optype, &name, &desc); + ret = send_mgmt_request(client_entry->fd, OpType::EVENT_NOTIFY, &optype, &name, &desc); if (ret != TS_ERR_OKAY) { Debug("event", "sending even notification to fd [%d] failed.", client_entry->fd); } @@ -452,15 +452,15 @@ handle_event_reg_callback(EventClientT *client, void *req, size_t reqlen) MgmtMarshallString name = nullptr; TSMgmtError ret; - ret = recv_mgmt_request(req, reqlen, EVENT_REG_CALLBACK, &optype, &name); + ret = recv_mgmt_request(req, reqlen, OpType::EVENT_REG_CALLBACK, &optype, &name); if (ret != TS_ERR_OKAY) { goto done; } // mark the specified alarm as "wanting to be notified" in the client's alarm_registered list if (strlen(name) == 0) { // mark all alarms - for (int i = 0; i < NUM_EVENTS; i++) { - client->events_registered[i] = true; + for (bool &i : client->events_registered) { + i = true; } } else { int id = get_event_id(name); @@ -495,15 +495,15 @@ handle_event_unreg_callback(EventClientT *client, void *req, size_t reqlen) MgmtMarshallString name = nullptr; TSMgmtError ret; - ret = recv_mgmt_request(req, reqlen, EVENT_UNREG_CALLBACK, &optype, &name); + ret = recv_mgmt_request(req, reqlen, OpType::EVENT_UNREG_CALLBACK, &optype, &name); if (ret != TS_ERR_OKAY) { goto done; } // mark the specified alarm as "wanting to be notified" in the client's alarm_registered list if (strlen(name) == 0) { // mark all alarms - for (int i = 0; i < NUM_EVENTS; i++) { - client->events_registered[i] = false; + for (bool &i : client->events_registered) { + i = false; } } else { int id = get_event_id(name); @@ -522,7 +522,7 @@ handle_event_unreg_callback(EventClientT *client, void *req, size_t reqlen) return ret; } -typedef TSMgmtError (*event_message_handler)(EventClientT *, void *, size_t); +using event_message_handler = TSMgmtError (*)(EventClientT *, void *, size_t); static const event_message_handler handlers[] = { nullptr, // FILE_READ @@ -557,11 +557,11 @@ handle_event_message(EventClientT *client, void *req, size_t reqlen) { OpType optype = extract_mgmt_request_optype(req, reqlen); - if (optype < 0 || static_cast(optype) >= countof(handlers)) { + if (static_cast(optype) >= countof(handlers)) { goto fail; } - if (handlers[optype] == nullptr) { + if (handlers[static_cast(optype)] == nullptr) { goto fail; } @@ -576,7 +576,7 @@ handle_event_message(EventClientT *client, void *req, size_t reqlen) } } - return handlers[optype](client, req, reqlen); + return handlers[static_cast(optype)](client, req, reqlen); fail: mgmt_elog(0, "%s: missing handler for type %d event message\n", __func__, (int)optype); diff --git a/mgmt/api/INKMgmtAPI.cc b/mgmt/api/INKMgmtAPI.cc index f385721dc7e..bc05f64236d 100644 --- a/mgmt/api/INKMgmtAPI.cc +++ b/mgmt/api/INKMgmtAPI.cc @@ -32,7 +32,7 @@ #include "ts/ink_platform.h" #include "ts/ink_code.h" #include "ts/ParseRules.h" -#include +#include #include "ts/I_Layout.h" #include "mgmtapi.h" diff --git a/mgmt/api/NetworkMessage.cc b/mgmt/api/NetworkMessage.cc index 8073759c8fb..7293f6dc1a9 100644 --- a/mgmt/api/NetworkMessage.cc +++ b/mgmt/api/NetworkMessage.cc @@ -110,15 +110,15 @@ static const struct NetCmdOperation responses[] = { /* LIFECYCLE_MESSAGE */ {1, {MGMT_MARSHALL_INT}}, }; -#define GETCMD(ops, optype, cmd) \ - do { \ - if (optype < 0 || static_cast(optype) >= countof(ops)) { \ - return TS_ERR_PARAMS; \ - } \ - if (ops[optype].nfields == 0) { \ - return TS_ERR_PARAMS; \ - } \ - cmd = &ops[optype]; \ +#define GETCMD(ops, optype, cmd) \ + do { \ + if (static_cast(optype) >= countof(ops)) { \ + return TS_ERR_PARAMS; \ + } \ + if (ops[static_cast(optype)].nfields == 0) { \ + return TS_ERR_PARAMS; \ + } \ + cmd = &ops[static_cast(optype)]; \ } while (0); TSMgmtError @@ -203,59 +203,59 @@ send_mgmt_error(int fd, OpType optype, TSMgmtError error) // Switch on operations, grouped by response format. switch (optype) { - case BOUNCE: - case EVENT_RESOLVE: - case FILE_WRITE: - case LIFECYCLE_MESSAGE: - case PROXY_STATE_SET: - case RECONFIGURE: - case RESTART: - case SNAPSHOT_REMOVE: - case SNAPSHOT_RESTORE: - case SNAPSHOT_TAKE: - case STATS_RESET_CLUSTER: - case STATS_RESET_NODE: - case STORAGE_DEVICE_CMD_OFFLINE: - ink_release_assert(responses[optype].nfields == 1); + case OpType::BOUNCE: + case OpType::EVENT_RESOLVE: + case OpType::FILE_WRITE: + case OpType::LIFECYCLE_MESSAGE: + case OpType::PROXY_STATE_SET: + case OpType::RECONFIGURE: + case OpType::RESTART: + case OpType::SNAPSHOT_REMOVE: + case OpType::SNAPSHOT_RESTORE: + case OpType::SNAPSHOT_TAKE: + case OpType::STATS_RESET_CLUSTER: + case OpType::STATS_RESET_NODE: + case OpType::STORAGE_DEVICE_CMD_OFFLINE: + ink_release_assert(responses[static_cast(optype)].nfields == 1); return send_mgmt_response(fd, optype, &ecode); - case RECORD_SET: - case PROXY_STATE_GET: - case EVENT_ACTIVE: - ink_release_assert(responses[optype].nfields == 2); + case OpType::RECORD_SET: + case OpType::PROXY_STATE_GET: + case OpType::EVENT_ACTIVE: + ink_release_assert(responses[static_cast(optype)].nfields == 2); return send_mgmt_response(fd, optype, &ecode, &intval); - case EVENT_GET_MLT: - case SNAPSHOT_GET_MLT: - case SERVER_BACKTRACE: - ink_release_assert(responses[optype].nfields == 2); + case OpType::EVENT_GET_MLT: + case OpType::SNAPSHOT_GET_MLT: + case OpType::SERVER_BACKTRACE: + ink_release_assert(responses[static_cast(optype)].nfields == 2); return send_mgmt_response(fd, optype, &ecode, &strval); - case FILE_READ: - ink_release_assert(responses[optype].nfields == 3); + case OpType::FILE_READ: + ink_release_assert(responses[static_cast(optype)].nfields == 3); return send_mgmt_response(fd, optype, &ecode, &intval, &dataval); - case RECORD_GET: - case RECORD_MATCH_GET: - ink_release_assert(responses[optype].nfields == 5); + case OpType::RECORD_GET: + case OpType::RECORD_MATCH_GET: + ink_release_assert(responses[static_cast(optype)].nfields == 5); return send_mgmt_response(fd, optype, &ecode, &intval, &intval, &strval, &dataval); - case RECORD_DESCRIBE_CONFIG: - ink_release_assert(responses[optype].nfields == 15); + case OpType::RECORD_DESCRIBE_CONFIG: + ink_release_assert(responses[static_cast(optype)].nfields == 15); return send_mgmt_response(fd, optype, &ecode, &strval /* name */, &dataval /* value */, &dataval /* default */, &intval /* type */, &intval /* class */, &intval /* version */, &intval /* rsb */, &intval /* order */, &intval /* access */, &intval /* update */, &intval /* updatetype */, &intval /* checktype */, &intval /* source */, &strval /* checkexpr */); - case EVENT_REG_CALLBACK: - case EVENT_UNREG_CALLBACK: - case EVENT_NOTIFY: - case API_PING: + case OpType::EVENT_REG_CALLBACK: + case OpType::EVENT_UNREG_CALLBACK: + case OpType::EVENT_NOTIFY: + case OpType::API_PING: /* no response for these */ - ink_release_assert(responses[optype].nfields == 0); + ink_release_assert(responses[static_cast(optype)].nfields == 0); return TS_ERR_OKAY; - case UNDEFINED_OP: + case OpType::UNDEFINED_OP: return TS_ERR_OKAY; } @@ -263,7 +263,7 @@ send_mgmt_error(int fd, OpType optype, TSMgmtError error) // updating the switch statement above. Don't do that; this // code must be able to handle every OpType. - ink_fatal("missing generic error support for type %d management message", optype); + ink_fatal("missing generic error support for type %d management message", static_cast(optype)); return TS_ERR_FAIL; } @@ -367,7 +367,7 @@ extract_mgmt_request_optype(void *msg, size_t msglen) MgmtMarshallInt optype; if (mgmt_message_parse(msg, msglen, fields, countof(fields), &optype) == -1) { - return UNDEFINED_OP; + return OpType::UNDEFINED_OP; } return (OpType)optype; diff --git a/mgmt/api/NetworkMessage.h b/mgmt/api/NetworkMessage.h index e32fe6ba379..741c160f2d4 100644 --- a/mgmt/api/NetworkMessage.h +++ b/mgmt/api/NetworkMessage.h @@ -32,7 +32,7 @@ #define MAX_CONN_TRIES 10 // maximum number of attemps to reconnect to TM // the possible operations or msg types sent from remote client to TM -typedef enum { +enum class OpType : MgmtMarshallInt { FILE_READ, FILE_WRITE, RECORD_SET, @@ -61,9 +61,7 @@ typedef enum { RECORD_DESCRIBE_CONFIG, LIFECYCLE_MESSAGE, UNDEFINED_OP /* This must be last */ -} OpType; - -#define MGMT_OPERATION_TYPE_MAX (UNDEFINED_OP) +}; enum { RECORD_DESCRIBE_FLAGS_MATCH = 0x0001, diff --git a/mgmt/api/NetworkUtilsRemote.cc b/mgmt/api/NetworkUtilsRemote.cc index 483d4ccfcf4..740ae4674cf 100644 --- a/mgmt/api/NetworkUtilsRemote.cc +++ b/mgmt/api/NetworkUtilsRemote.cc @@ -79,10 +79,10 @@ set_socket_paths(const char *path) static bool socket_test(int fd) { - MgmtMarshallInt optype = API_PING; - MgmtMarshallInt now = time(nullptr); + OpType optype = OpType::API_PING; + MgmtMarshallInt now = time(nullptr); - if (MGMTAPI_SEND_MESSAGE(fd, API_PING, &optype, &now) == TS_ERR_OKAY) { + if (MGMTAPI_SEND_MESSAGE(fd, OpType::API_PING, &optype, &now) == TS_ERR_OKAY) { return true; // write was successful; connection still open } @@ -403,7 +403,7 @@ void * socket_test_thread(void *) { // loop until client process dies - while (1) { + while (true) { if (main_socket_fd == -1 || !socket_test(main_socket_fd)) { // ASSUMES that in between the time the socket_test is made // and this reconnect call is made, the main_socket_fd remains @@ -450,10 +450,10 @@ send_register_all_callbacks(int fd, CallbackTable *cb_table) events_with_cb = get_events_with_callbacks(cb_table); // need to check that the list has all the events registered if (!events_with_cb) { // all events have registered callback - MgmtMarshallInt optype = EVENT_REG_CALLBACK; + OpType optype = OpType::EVENT_REG_CALLBACK; MgmtMarshallString event_name = nullptr; - err = MGMTAPI_SEND_MESSAGE(fd, EVENT_REG_CALLBACK, &optype, &event_name); + err = MGMTAPI_SEND_MESSAGE(fd, OpType::EVENT_REG_CALLBACK, &optype, &event_name); if (err != TS_ERR_OKAY) { return err; } @@ -461,12 +461,12 @@ send_register_all_callbacks(int fd, CallbackTable *cb_table) int num_events = queue_len(events_with_cb); // iterate through the LLQ and send request for each event for (int i = 0; i < num_events; i++) { - MgmtMarshallInt optype = EVENT_REG_CALLBACK; + OpType optype = OpType::EVENT_REG_CALLBACK; MgmtMarshallInt event_id = *(int *)dequeue(events_with_cb); MgmtMarshallString event_name = (char *)get_event_name(event_id); if (event_name) { - err = MGMTAPI_SEND_MESSAGE(fd, EVENT_REG_CALLBACK, &optype, &event_name); + err = MGMTAPI_SEND_MESSAGE(fd, OpType::EVENT_REG_CALLBACK, &optype, &event_name); ats_free(event_name); // free memory if (err != TS_ERR_OKAY) { send_err = err; // save the type of send error @@ -507,8 +507,8 @@ send_unregister_all_callbacks(int fd, CallbackTable *cb_table) bool no_errors = true; // set to false if at least one send fails // init array so that all events don't have any callbacks - for (int i = 0; i < NUM_EVENTS; i++) { - reg_callback[i] = 0; + for (int &i : reg_callback) { + i = 0; } events_with_cb = get_events_with_callbacks(cb_table); @@ -527,10 +527,10 @@ send_unregister_all_callbacks(int fd, CallbackTable *cb_table) // send message to TM to mark unregister for (int k = 0; k < NUM_EVENTS; k++) { if (reg_callback[k] == 0) { // event has no registered callbacks - MgmtMarshallInt optype = EVENT_UNREG_CALLBACK; + OpType optype = OpType::EVENT_UNREG_CALLBACK; MgmtMarshallString event_name = get_event_name(k); - err = MGMTAPI_SEND_MESSAGE(fd, EVENT_UNREG_CALLBACK, &optype, &event_name); + err = MGMTAPI_SEND_MESSAGE(fd, OpType::EVENT_UNREG_CALLBACK, &optype, &event_name); ats_free(event_name); if (err != TS_ERR_OKAY) { send_err = err; // save the type of the sending error @@ -602,12 +602,12 @@ event_poll_thread_main(void *arg) sock_fd = *((int *)arg); // should be same as event_socket_fd // the sock_fd is going to be the one we listen for events on - while (1) { + while (true) { TSMgmtError ret; TSMgmtEvent *event = nullptr; MgmtMarshallData reply = {nullptr, 0}; - MgmtMarshallInt optype; + OpType optype; MgmtMarshallString name = nullptr; MgmtMarshallString desc = nullptr; @@ -627,7 +627,7 @@ event_poll_thread_main(void *arg) break; } - ret = recv_mgmt_request(reply.ptr, reply.len, EVENT_NOTIFY, &optype, &name, &desc); + ret = recv_mgmt_request(reply.ptr, reply.len, OpType::EVENT_NOTIFY, &optype, &name, &desc); ats_free(reply.ptr); if (ret != TS_ERR_OKAY) { @@ -636,7 +636,7 @@ event_poll_thread_main(void *arg) break; } - ink_assert(optype == EVENT_NOTIFY); + ink_assert(optype == OpType::EVENT_NOTIFY); // The new event takes ownership of the message strings. event = TSEventCreate(); diff --git a/mgmt/api/TSControlMain.cc b/mgmt/api/TSControlMain.cc index b688339546f..6a336af111c 100644 --- a/mgmt/api/TSControlMain.cc +++ b/mgmt/api/TSControlMain.cc @@ -143,7 +143,7 @@ ts_ctrl_main(void *arg) struct timeval timeout; // loops until TM dies; waits for and processes requests from clients - while (1) { + while (true) { // LINUX: to prevent hard-spin of CPU, reset timeout on each loop timeout.tv_sec = TIMEOUT_SECS; timeout.tv_usec = 0; @@ -352,7 +352,7 @@ send_record_get_response(int fd, const RecRecord *rec) break; // skip it } - return send_mgmt_response(fd, RECORD_GET, &err, &rclass, &type, &name, &value); + return send_mgmt_response(fd, OpType::RECORD_GET, &err, &rclass, &type, &name, &value); } /************************************************************************** @@ -381,7 +381,7 @@ handle_record_get(int fd, void *req, size_t reqlen) int fderr = fd; // [in,out] variable for the fd and error - ret = recv_mgmt_request(req, reqlen, RECORD_GET, &optype, &name); + ret = recv_mgmt_request(req, reqlen, OpType::RECORD_GET, &optype, &name); if (ret != TS_ERR_OKAY) { return ret; } @@ -432,7 +432,7 @@ handle_record_match(int fd, void *req, size_t reqlen) MgmtMarshallInt optype; MgmtMarshallString name; - ret = recv_mgmt_request(req, reqlen, RECORD_MATCH_GET, &optype, &name); + ret = recv_mgmt_request(req, reqlen, OpType::RECORD_MATCH_GET, &optype, &name); if (ret != TS_ERR_OKAY) { return ret; } @@ -476,7 +476,7 @@ handle_record_set(int fd, void *req, size_t reqlen) MgmtMarshallString name = nullptr; MgmtMarshallString value = nullptr; - ret = recv_mgmt_request(req, reqlen, RECORD_SET, &optype, &name, &value); + ret = recv_mgmt_request(req, reqlen, OpType::RECORD_SET, &optype, &name, &value); if (ret != TS_ERR_OKAY) { ret = TS_ERR_FAIL; goto fail; @@ -496,7 +496,7 @@ handle_record_set(int fd, void *req, size_t reqlen) MgmtMarshallInt err = ret; MgmtMarshallInt act = action; - return send_mgmt_response(fd, RECORD_SET, &err, &act); + return send_mgmt_response(fd, OpType::RECORD_SET, &err, &act); } /************************************************************************** @@ -519,7 +519,7 @@ handle_file_read(int fd, void *req, size_t reqlen) MgmtMarshallInt vers = 0; MgmtMarshallData data = {nullptr, 0}; - err = recv_mgmt_request(req, reqlen, FILE_READ, &optype, &fid); + err = recv_mgmt_request(req, reqlen, OpType::FILE_READ, &optype, &fid); if (err != TS_ERR_OKAY) { return (TSMgmtError)err; } @@ -532,7 +532,7 @@ handle_file_read(int fd, void *req, size_t reqlen) data.len = size; } - err = send_mgmt_response(fd, FILE_READ, &err, &vers, &data); + err = send_mgmt_response(fd, OpType::FILE_READ, &err, &vers, &data); ats_free(text); // free memory allocated by ReadFile return (TSMgmtError)err; @@ -555,7 +555,7 @@ handle_file_write(int fd, void *req, size_t reqlen) MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, FILE_WRITE, &optype, &fid, &vers, &data); + err = recv_mgmt_request(req, reqlen, OpType::FILE_WRITE, &optype, &fid, &vers, &data); if (err != TS_ERR_OKAY) { goto done; } @@ -570,7 +570,7 @@ handle_file_write(int fd, void *req, size_t reqlen) done: ats_free(data.ptr); - return send_mgmt_response(fd, FILE_WRITE, &err); + return send_mgmt_response(fd, OpType::FILE_WRITE, &err); } /************************************************************************** @@ -587,12 +587,12 @@ handle_proxy_state_get(int fd, void *req, size_t reqlen) MgmtMarshallInt err; MgmtMarshallInt state = TS_PROXY_UNDEFINED; - err = recv_mgmt_request(req, reqlen, PROXY_STATE_GET, &optype); + err = recv_mgmt_request(req, reqlen, OpType::PROXY_STATE_GET, &optype); if (err == TS_ERR_OKAY) { state = ProxyStateGet(); } - return send_mgmt_response(fd, PROXY_STATE_GET, &err, &state); + return send_mgmt_response(fd, OpType::PROXY_STATE_GET, &err, &state); } /************************************************************************** @@ -611,13 +611,13 @@ handle_proxy_state_set(int fd, void *req, size_t reqlen) MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, PROXY_STATE_SET, &optype, &state, &clear); + err = recv_mgmt_request(req, reqlen, OpType::PROXY_STATE_SET, &optype, &state, &clear); if (err != TS_ERR_OKAY) { - return send_mgmt_response(fd, PROXY_STATE_SET, &err); + return send_mgmt_response(fd, OpType::PROXY_STATE_SET, &err); } err = ProxyStateSet((TSProxyStateT)state, (TSCacheClearT)clear); - return send_mgmt_response(fd, PROXY_STATE_SET, &err); + return send_mgmt_response(fd, OpType::PROXY_STATE_SET, &err); } /************************************************************************** @@ -633,12 +633,12 @@ handle_reconfigure(int fd, void *req, size_t reqlen) MgmtMarshallInt err; MgmtMarshallInt optype; - err = recv_mgmt_request(req, reqlen, RECONFIGURE, &optype); + err = recv_mgmt_request(req, reqlen, OpType::RECONFIGURE, &optype); if (err == TS_ERR_OKAY) { err = Reconfigure(); } - return send_mgmt_response(fd, RECONFIGURE, &err); + return send_mgmt_response(fd, OpType::RECONFIGURE, &err); } /************************************************************************** @@ -651,17 +651,17 @@ handle_reconfigure(int fd, void *req, size_t reqlen) static TSMgmtError handle_restart(int fd, void *req, size_t reqlen) { - MgmtMarshallInt optype; + OpType optype; MgmtMarshallInt options; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, RESTART, &optype, &options); + err = recv_mgmt_request(req, reqlen, OpType::RESTART, &optype, &options); if (err == TS_ERR_OKAY) { switch (optype) { - case BOUNCE: + case OpType::BOUNCE: err = Bounce(options); break; - case RESTART: + case OpType::RESTART: err = Restart(options); break; default: @@ -670,7 +670,7 @@ handle_restart(int fd, void *req, size_t reqlen) } } - return send_mgmt_response(fd, RESTART, &err); + return send_mgmt_response(fd, OpType::RESTART, &err); } /************************************************************************** @@ -687,13 +687,13 @@ handle_storage_device_cmd_offline(int fd, void *req, size_t reqlen) MgmtMarshallString name = nullptr; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, STORAGE_DEVICE_CMD_OFFLINE, &optype, &name); + err = recv_mgmt_request(req, reqlen, OpType::STORAGE_DEVICE_CMD_OFFLINE, &optype, &name); if (err == TS_ERR_OKAY) { // forward to server lmgmt->signalEvent(MGMT_EVENT_STORAGE_DEVICE_CMD_OFFLINE, name); } - return send_mgmt_response(fd, STORAGE_DEVICE_CMD_OFFLINE, &err); + return send_mgmt_response(fd, OpType::STORAGE_DEVICE_CMD_OFFLINE, &err); } /************************************************************************** @@ -710,13 +710,13 @@ handle_event_resolve(int fd, void *req, size_t reqlen) MgmtMarshallString name = nullptr; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, EVENT_RESOLVE, &optype, &name); + err = recv_mgmt_request(req, reqlen, OpType::EVENT_RESOLVE, &optype, &name); if (err == TS_ERR_OKAY) { err = EventResolve(name); } ats_free(name); - return send_mgmt_response(fd, EVENT_RESOLVE, &err); + return send_mgmt_response(fd, OpType::EVENT_RESOLVE, &err); } /************************************************************************** @@ -738,7 +738,7 @@ handle_event_get_mlt(int fd, void *req, size_t reqlen) MgmtMarshallInt err; MgmtMarshallString list = nullptr; - err = recv_mgmt_request(req, reqlen, EVENT_GET_MLT, &optype); + err = recv_mgmt_request(req, reqlen, OpType::EVENT_GET_MLT, &optype); if (err != TS_ERR_OKAY) { goto done; } @@ -766,7 +766,7 @@ handle_event_get_mlt(int fd, void *req, size_t reqlen) done: delete_queue(event_list); - return send_mgmt_response(fd, EVENT_GET_MLT, &err, &list); + return send_mgmt_response(fd, OpType::EVENT_GET_MLT, &err, &list); } /************************************************************************** @@ -786,7 +786,7 @@ handle_event_active(int fd, void *req, size_t reqlen) MgmtMarshallInt err; MgmtMarshallInt bval = 0; - err = recv_mgmt_request(req, reqlen, EVENT_ACTIVE, &optype, &name); + err = recv_mgmt_request(req, reqlen, OpType::EVENT_ACTIVE, &optype, &name); if (err != TS_ERR_OKAY) { goto done; } @@ -803,7 +803,7 @@ handle_event_active(int fd, void *req, size_t reqlen) done: ats_free(name); - return send_mgmt_response(fd, EVENT_ACTIVE, &err, &bval); + return send_mgmt_response(fd, OpType::EVENT_ACTIVE, &err, &bval); } /************************************************************************** @@ -815,12 +815,12 @@ handle_event_active(int fd, void *req, size_t reqlen) static TSMgmtError handle_snapshot(int fd, void *req, size_t reqlen) { - MgmtMarshallInt optype; + OpType optype; MgmtMarshallString name = nullptr; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, SNAPSHOT_TAKE, &optype, &name); + err = recv_mgmt_request(req, reqlen, OpType::SNAPSHOT_TAKE, &optype, &name); if (err != TS_ERR_OKAY) { goto done; } @@ -832,13 +832,13 @@ handle_snapshot(int fd, void *req, size_t reqlen) // call CoreAPI call on Traffic Manager side switch (optype) { - case SNAPSHOT_TAKE: + case OpType::SNAPSHOT_TAKE: err = SnapshotTake(name); break; - case SNAPSHOT_RESTORE: + case OpType::SNAPSHOT_RESTORE: err = SnapshotRestore(name); break; - case SNAPSHOT_REMOVE: + case OpType::SNAPSHOT_REMOVE: err = SnapshotRemove(name); break; default: @@ -870,7 +870,7 @@ handle_snapshot_get_mlt(int fd, void *req, size_t reqlen) MgmtMarshallInt err; MgmtMarshallString list = nullptr; - err = recv_mgmt_request(req, reqlen, SNAPSHOT_GET_MLT, &optype); + err = recv_mgmt_request(req, reqlen, OpType::SNAPSHOT_GET_MLT, &optype); if (err != TS_ERR_OKAY) { goto done; } @@ -898,7 +898,7 @@ handle_snapshot_get_mlt(int fd, void *req, size_t reqlen) done: delete_queue(snap_list); - return send_mgmt_response(fd, SNAPSHOT_GET_MLT, &err, &list); + return send_mgmt_response(fd, OpType::SNAPSHOT_GET_MLT, &err, &list); } /************************************************************************** @@ -910,13 +910,13 @@ handle_snapshot_get_mlt(int fd, void *req, size_t reqlen) static TSMgmtError handle_stats_reset(int fd, void *req, size_t reqlen) { - MgmtMarshallInt optype; + OpType optype; MgmtMarshallString name = nullptr; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, STATS_RESET_NODE, &optype, &name); + err = recv_mgmt_request(req, reqlen, OpType::STATS_RESET_NODE, &optype, &name); if (err == TS_ERR_OKAY) { - err = StatsReset(optype == STATS_RESET_CLUSTER, name); + err = StatsReset(optype == OpType::STATS_RESET_CLUSTER, name); } ats_free(name); @@ -936,7 +936,7 @@ handle_api_ping(int /* fd */, void *req, size_t reqlen) MgmtMarshallInt optype; MgmtMarshallInt stamp; - return recv_mgmt_request(req, reqlen, API_PING, &optype, &stamp); + return recv_mgmt_request(req, reqlen, OpType::API_PING, &optype, &stamp); } static TSMgmtError @@ -947,12 +947,12 @@ handle_server_backtrace(int fd, void *req, size_t reqlen) MgmtMarshallString trace = nullptr; MgmtMarshallInt err; - err = recv_mgmt_request(req, reqlen, SERVER_BACKTRACE, &optype, &options); + err = recv_mgmt_request(req, reqlen, OpType::SERVER_BACKTRACE, &optype, &options); if (err == TS_ERR_OKAY) { err = ServerBacktrace(options, &trace); } - err = send_mgmt_response(fd, SERVER_BACKTRACE, &err, &trace); + err = send_mgmt_response(fd, OpType::SERVER_BACKTRACE, &err, &trace); ats_free(trace); return (TSMgmtError)err; @@ -1032,9 +1032,9 @@ send_record_describe(const RecRecord *rec, void *edata) } } - err = send_mgmt_response(match->fd, RECORD_DESCRIBE_CONFIG, &err, &rec_name, &rec_value, &rec_default, &rec_type, &rec_class, - &rec_version, &rec_rsb, &rec_order, &rec_access, &rec_update, &rec_updatetype, &rec_checktype, - &rec_source, &rec_checkexpr); + err = send_mgmt_response(match->fd, OpType::RECORD_DESCRIBE_CONFIG, &err, &rec_name, &rec_value, &rec_default, &rec_type, + &rec_class, &rec_version, &rec_rsb, &rec_order, &rec_access, &rec_update, &rec_updatetype, + &rec_checktype, &rec_source, &rec_checkexpr); done: match->err = err; @@ -1049,7 +1049,7 @@ handle_record_describe(int fd, void *req, size_t reqlen) MgmtMarshallInt options; MgmtMarshallString name; - ret = recv_mgmt_request(req, reqlen, RECORD_DESCRIBE_CONFIG, &optype, &name, &options); + ret = recv_mgmt_request(req, reqlen, OpType::RECORD_DESCRIBE_CONFIG, &optype, &name, &options); if (ret != TS_ERR_OKAY) { return ret; } @@ -1103,12 +1103,12 @@ handle_lifecycle_message(int fd, void *req, size_t reqlen) MgmtMarshallString tag; MgmtMarshallData data; - err = recv_mgmt_request(req, reqlen, LIFECYCLE_MESSAGE, &optype, &tag, &data); + err = recv_mgmt_request(req, reqlen, OpType::LIFECYCLE_MESSAGE, &optype, &tag, &data); if (err == TS_ERR_OKAY) { lmgmt->signalEvent(MGMT_EVENT_LIFECYCLE_MESSAGE, static_cast(req), reqlen); } - return send_mgmt_response(fd, LIFECYCLE_MESSAGE, &err); + return send_mgmt_response(fd, OpType::LIFECYCLE_MESSAGE, &err); } /**************************************************************************/ @@ -1148,8 +1148,8 @@ static const control_message_handler handlers[] = { }; // This should use countof(), but we need a constexpr :-/ -#define NUM_OP_HANDLERS (sizeof(handlers) / sizeof(handlers[0])) -extern char __msg_handler_static_assert[NUM_OP_HANDLERS == MGMT_OPERATION_TYPE_MAX ? 0 : -1]; +static_assert((sizeof(handlers) / sizeof(handlers[0])) == static_cast(OpType::UNDEFINED_OP), + "handlers array is not of correct size"); static TSMgmtError handle_control_message(int fd, void *req, size_t reqlen) @@ -1157,11 +1157,11 @@ handle_control_message(int fd, void *req, size_t reqlen) OpType optype = extract_mgmt_request_optype(req, reqlen); TSMgmtError error; - if (optype < 0 || static_cast(optype) >= countof(handlers)) { + if (static_cast(optype) >= countof(handlers)) { goto fail; } - if (handlers[optype].handler == nullptr) { + if (handlers[static_cast(optype)].handler == nullptr) { goto fail; } @@ -1170,7 +1170,7 @@ handle_control_message(int fd, void *req, size_t reqlen) gid_t egid = -1; // For privileged calls, ensure we have caller credentials and that the caller is privileged. - if (handlers[optype].flags & MGMT_API_PRIVILEGED) { + if (handlers[static_cast(optype)].flags & MGMT_API_PRIVILEGED) { if (mgmt_get_peereid(fd, &euid, &egid) == -1 || (euid != 0 && euid != geteuid())) { Debug("ts_main", "denied privileged API access on fd=%d for uid=%d gid=%d", fd, euid, egid); return send_mgmt_error(fd, optype, TS_ERR_PERMISSION_DENIED); @@ -1178,9 +1178,9 @@ handle_control_message(int fd, void *req, size_t reqlen) } } - Debug("ts_main", "handling message type=%d ptr=%p len=%zu on fd=%d", optype, req, reqlen, fd); + Debug("ts_main", "handling message type=%d ptr=%p len=%zu on fd=%d", static_cast(optype), req, reqlen, fd); - error = handlers[optype].handler(fd, req, reqlen); + error = handlers[static_cast(optype)].handler(fd, req, reqlen); if (error != TS_ERR_OKAY) { // NOTE: if the error was produced by the handler sending a response, this could attempt to // send a response again. However, this would only happen if sending the response failed, so diff --git a/mgmt/cluster/ClusterCom.cc b/mgmt/cluster/ClusterCom.cc index 7755927d7d0..496def12086 100644 --- a/mgmt/cluster/ClusterCom.cc +++ b/mgmt/cluster/ClusterCom.cc @@ -641,7 +641,7 @@ ClusterCom::checkPeers(time_t *ticker) } /* End ClusterCom::checkPeers */ void -ClusterCom::generateClusterDelta(void) +ClusterCom::generateClusterDelta() { long highest_delta = 0L; InkHashTableEntry *entry; @@ -1636,7 +1636,7 @@ ClusterCom::establishChannels() * Setup our multicast channel for broadcasting. */ void -ClusterCom::establishBroadcastChannel(void) +ClusterCom::establishBroadcastChannel() { if ((broadcast_fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { mgmt_fatal(errno, "[ClusterCom::establishBroadcastChannel] Unable to open socket.\n"); diff --git a/mgmt/utils/MgmtSocket.cc b/mgmt/utils/MgmtSocket.cc index 5451c23f32c..e5ce6ef402d 100644 --- a/mgmt/utils/MgmtSocket.cc +++ b/mgmt/utils/MgmtSocket.cc @@ -295,7 +295,7 @@ mgmt_read_timeout(int fd, int sec, int usec) } bool -mgmt_has_peereid(void) +mgmt_has_peereid() { #if HAVE_GETPEEREID return true; diff --git a/plugins/authproxy/authproxy.cc b/plugins/authproxy/authproxy.cc index ebeb77f6296..da422dd50d5 100644 --- a/plugins/authproxy/authproxy.cc +++ b/plugins/authproxy/authproxy.cc @@ -44,7 +44,7 @@ using std::strlen; struct AuthRequestContext; -typedef bool (*AuthRequestTransform)(AuthRequestContext *auth); +using AuthRequestTransform = bool (*)(AuthRequestContext *); const static int MAX_HOST_LENGTH = 4096; @@ -73,7 +73,7 @@ static AuthOptions *AuthGlobalOptions; // TS_EVENT_CONTINUE Continue the state machine, returning to the ATS event loop // TS_EVENT_NONE Stop processing (because a nested dispatch occurred) // Anything else Continue the state machine with this event -typedef TSEvent (*StateHandler)(struct AuthRequestContext *, void *edata); +using StateHandler = TSEvent (*)(struct AuthRequestContext *, void *); struct StateTransition { TSEvent event; diff --git a/plugins/background_fetch/background_fetch.cc b/plugins/background_fetch/background_fetch.cc index c80c79cb865..7103c696bfb 100644 --- a/plugins/background_fetch/background_fetch.cc +++ b/plugins/background_fetch/background_fetch.cc @@ -21,10 +21,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include -#include +#include +#include +#include +#include #include #include diff --git a/plugins/background_fetch/headers.cc b/plugins/background_fetch/headers.cc index 1d99017f30d..4043993f3e6 100644 --- a/plugins/background_fetch/headers.cc +++ b/plugins/background_fetch/headers.cc @@ -21,7 +21,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "configs.h" #include "headers.h" diff --git a/plugins/background_fetch/rules.cc b/plugins/background_fetch/rules.cc index 8fcac32bf75..fe4f7ed4cd2 100644 --- a/plugins/background_fetch/rules.cc +++ b/plugins/background_fetch/rules.cc @@ -22,7 +22,7 @@ limitations under the License. */ -#include +#include #include "configs.h" #include "rules.h" diff --git a/plugins/conf_remap/conf_remap.cc b/plugins/conf_remap/conf_remap.cc index a2d0dbafea5..d5dc7f4646c 100644 --- a/plugins/conf_remap/conf_remap.cc +++ b/plugins/conf_remap/conf_remap.cc @@ -20,10 +20,10 @@ #include "ts/remap.h" #include "ts/ink_defs.h" -#include -#include -#include -#include +#include +#include +#include +#include #include static const char PLUGIN_NAME[] = "conf_remap"; diff --git a/plugins/esi/combo_handler.cc b/plugins/esi/combo_handler.cc index 7ffa3611968..1d3c1ff0add 100644 --- a/plugins/esi/combo_handler.cc +++ b/plugins/esi/combo_handler.cc @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include @@ -66,7 +66,7 @@ static int COMBO_HANDLER_PATH_SIZE; TSDebug(DEBUG_TAG, "[%s:%d] [%s] DEBUG: " fmt, __FILE__, __LINE__, __FUNCTION__, ##args); \ } while (0) -typedef list StringList; +using StringList = list; struct ClientRequest { TSHttpStatus status; @@ -210,6 +210,16 @@ pthread_key_t threadKey = 0; void TSPluginInit(int argc, const char *argv[]) { + TSPluginRegistrationInfo info; + info.plugin_name = "combo_handler"; + info.vendor_name = "Apache Software Foundation"; + info.support_email = "dev@trafficserver.apache.org"; + + if (TSPluginRegister(&info) != TS_SUCCESS) { + TSError("[combo_handler][%s] plugin registration failed.", __FUNCTION__); + return; + } + if ((argc > 1) && (strcmp(argv[1], "-") != 0)) { COMBO_HANDLER_PATH = argv[1]; if (COMBO_HANDLER_PATH == "/") { diff --git a/plugins/esi/esi.cc b/plugins/esi/esi.cc index e4cf93bb2e4..91ea082453b 100644 --- a/plugins/esi/esi.cc +++ b/plugins/esi/esi.cc @@ -23,10 +23,10 @@ #include "ts/ink_defs.h" -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include @@ -559,7 +559,7 @@ removeCacheKey(TSHttpTxn txnp) TSCacheRemove(contp, cacheKey); result = true; TSError("[esi][%s] TSCacheRemoved", __FUNCTION__); - } while (0); + } while (false); if (cacheKey != nullptr) { TSCacheKeyDestroy(cacheKey); @@ -1325,7 +1325,7 @@ isTxnTransformable(TSHttpTxn txnp, bool is_cache_txn, bool *intercept_header, bo } retval = true; - } while (0); + } while (false); TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); return retval; diff --git a/plugins/esi/fetcher/HttpDataFetcherImpl.cc b/plugins/esi/fetcher/HttpDataFetcherImpl.cc index 337b9a5ed18..a569547cf9f 100644 --- a/plugins/esi/fetcher/HttpDataFetcherImpl.cc +++ b/plugins/esi/fetcher/HttpDataFetcherImpl.cc @@ -26,7 +26,7 @@ #include "lib/gzip.h" #include -#include +#include using std::string; using namespace EsiLib; diff --git a/plugins/esi/lib/DocNode.cc b/plugins/esi/lib/DocNode.cc index 6adb1a86d42..030304a7822 100644 --- a/plugins/esi/lib/DocNode.cc +++ b/plugins/esi/lib/DocNode.cc @@ -68,9 +68,9 @@ DocNode::pack(string &buffer) const packString(data, data_len, buffer); int32_t n_elements = attr_list.size(); buffer.append(reinterpret_cast(&n_elements), sizeof(n_elements)); - for (AttributeList::const_iterator iter = attr_list.begin(); iter != attr_list.end(); ++iter) { - packString(iter->name, iter->name_len, buffer); - packString(iter->value, iter->value_len, buffer); + for (const auto &iter : attr_list) { + packString(iter.name, iter.name_len, buffer); + packString(iter.value, iter.value_len, buffer); } child_nodes.packToBuffer(buffer); *(reinterpret_cast(&buffer[orig_buf_size + 1])) = buffer.size() - orig_buf_size; @@ -126,8 +126,8 @@ DocNodeList::packToBuffer(string &buffer) const { int32_t n_elements = size(); buffer.append(reinterpret_cast(&n_elements), sizeof(n_elements)); - for (DocNodeList::const_iterator iter = begin(); iter != end(); ++iter) { - iter->pack(buffer); + for (const auto &iter : *this) { + iter.pack(buffer); } } diff --git a/plugins/esi/lib/EsiGunzip.cc b/plugins/esi/lib/EsiGunzip.cc index 9ab41b81ebc..a11648c8418 100644 --- a/plugins/esi/lib/EsiGunzip.cc +++ b/plugins/esi/lib/EsiGunzip.cc @@ -23,8 +23,8 @@ #include "EsiGunzip.h" #include "gzip.h" -#include -#include +#include +#include using std::string; using namespace EsiLib; @@ -112,8 +112,8 @@ EsiGunzip::stream_decode(const char *data, int data_len, std::string &udata) _total_data_length += data_len; } - for (BufferList::iterator iter = buf_list.begin(); iter != buf_list.end(); ++iter) { - udata.append(iter->data(), iter->size()); + for (auto &iter : buf_list) { + udata.append(iter.data(), iter.size()); } return true; diff --git a/plugins/esi/lib/EsiGzip.cc b/plugins/esi/lib/EsiGzip.cc index 81408c091d2..35d2ff86ccd 100644 --- a/plugins/esi/lib/EsiGzip.cc +++ b/plugins/esi/lib/EsiGzip.cc @@ -23,8 +23,8 @@ #include "EsiGzip.h" #include "gzip.h" -#include -#include +#include +#include using std::string; using namespace EsiLib; diff --git a/plugins/esi/lib/EsiParser.cc b/plugins/esi/lib/EsiParser.cc index 353c4573c94..7c135ef8b62 100644 --- a/plugins/esi/lib/EsiParser.cc +++ b/plugins/esi/lib/EsiParser.cc @@ -24,7 +24,7 @@ #include "EsiParser.h" #include "Utils.h" -#include +#include using std::string; using namespace EsiLib; diff --git a/plugins/esi/lib/EsiProcessor.cc b/plugins/esi/lib/EsiProcessor.cc index 6cae15601f8..59df2bea857 100644 --- a/plugins/esi/lib/EsiProcessor.cc +++ b/plugins/esi/lib/EsiProcessor.cc @@ -24,7 +24,7 @@ #include "EsiProcessor.h" #include "Stats.h" #include "FailureInfo.h" -#include +#include using std::string; using namespace EsiLib; diff --git a/plugins/esi/lib/Utils.cc b/plugins/esi/lib/Utils.cc index 94bdfdb287b..9af3c64267d 100644 --- a/plugins/esi/lib/Utils.cc +++ b/plugins/esi/lib/Utils.cc @@ -115,8 +115,8 @@ Utils::parseKeyValueConfig(const std::list &lines, KeyValueMap &kvMap, H { string key, value; std::istringstream iss; - for (std::list::const_iterator list_iter = lines.begin(); list_iter != lines.end(); ++list_iter) { - const string &conf_line = *list_iter; // handy reference + for (const auto &conf_line : lines) { + // handy reference if (!conf_line.size() || (conf_line[0] == '#')) { continue; } diff --git a/plugins/esi/lib/Variables.cc b/plugins/esi/lib/Variables.cc index 1730cf36ce5..8a8dec91212 100644 --- a/plugins/esi/lib/Variables.cc +++ b/plugins/esi/lib/Variables.cc @@ -25,7 +25,7 @@ #include "Attribute.h" #include "Utils.h" -#include +#include using std::list; using std::pair; @@ -50,9 +50,9 @@ const string Variables::NORM_SPECIAL_HEADERS[] = {string("HTTP_ACCEPT_LANGUAGE") inline string & Variables::_toUpperCase(string &str) const { - for (size_t i = 0; i < str.size(); ++i) { - if ((str[i] >= 'a') && (str[i] <= 'z')) { - str[i] = 'A' + (str[i] - 'a'); + for (char &i : str) { + if ((i >= 'a') && (i <= 'z')) { + i = 'A' + (i - 'a'); } } return str; @@ -167,10 +167,10 @@ Variables::_parseQueryString(const char *query_string, int query_string_len) _insert(_simple_data, string("QUERY_STRING"), string(query_string, query_string_len)); AttributeList attr_list; Utils::parseAttributes(query_string, query_string_len, attr_list, "&"); - for (AttributeList::iterator iter = attr_list.begin(); iter != attr_list.end(); ++iter) { - _debugLog(_debug_tag, "[%s] Inserting query string variable [%.*s] with value [%.*s]", __FUNCTION__, iter->name_len, iter->name, - iter->value_len, iter->value); - _insert(_dict_data[QUERY_STRING], string(iter->name, iter->name_len), string(iter->value, iter->value_len)); + for (auto &iter : attr_list) { + _debugLog(_debug_tag, "[%s] Inserting query string variable [%.*s] with value [%.*s]", __FUNCTION__, iter.name_len, iter.name, + iter.value_len, iter.value); + _insert(_dict_data[QUERY_STRING], string(iter.name, iter.name_len), string(iter.value, iter.value_len)); } } @@ -276,10 +276,10 @@ Variables::_parseSubCookies() StringHash &subcookies = _sub_cookies[name]; AttributeList attr_list; Utils::parseAttributes(value.c_str(), value.length(), attr_list, "&"); - for (AttributeList::iterator iter = attr_list.begin(); iter != attr_list.end(); ++iter) { - _debugLog(_debug_tag, "[%s] Inserting query string variable [%.*s] with value [%.*s]", __FUNCTION__, iter->name_len, - iter->name, iter->value_len, iter->value); - _insert(subcookies, string(iter->name, iter->name_len), string(iter->value, iter->value_len)); + for (auto &iter : attr_list) { + _debugLog(_debug_tag, "[%s] Inserting query string variable [%.*s] with value [%.*s]", __FUNCTION__, iter.name_len, iter.name, + iter.value_len, iter.value); + _insert(subcookies, string(iter.name, iter.name_len), string(iter.value, iter.value_len)); } } } @@ -342,8 +342,8 @@ Variables::clear() _dict_data[i].clear(); _cached_special_headers[i].clear(); } - for (int i = 0; i < N_SIMPLE_HEADERS; ++i) { - _cached_simple_headers[i].clear(); + for (auto &_cached_simple_header : _cached_simple_headers) { + _cached_simple_header.clear(); } _query_string.clear(); _headers_parsed = _query_string_parsed = false; @@ -356,25 +356,25 @@ Variables::_parseCookieString(const char *str, int str_len) { AttributeList cookies; Utils::parseAttributes(str, str_len, cookies, ";,"); - for (AttributeList::iterator iter = cookies.begin(); iter != cookies.end(); ++iter) { - std::string cookie = iter->name; - size_t pos = cookie.find("="); + for (auto &iter : cookies) { + std::string cookie = iter.name; + size_t pos = cookie.find('='); if (pos != std::string::npos) { cookie = cookie.substr(0, pos); } bool found = false; - for (Utils::HeaderValueList::iterator approved = _whitelistCookies.begin(); approved != _whitelistCookies.end(); ++approved) { - if ((*approved == "*") || (*approved == cookie)) { + for (auto &_whitelistCookie : _whitelistCookies) { + if ((_whitelistCookie == "*") || (_whitelistCookie == cookie)) { found = true; } } if (found == true) { - _insert(_dict_data[HTTP_COOKIE], string(iter->name, iter->name_len), string(iter->value, iter->value_len)); - _debugLog(_debug_tag, "[%s] Inserted cookie with name [%.*s] and value [%.*s]", __FUNCTION__, iter->name_len, iter->name, - iter->value_len, iter->value); + _insert(_dict_data[HTTP_COOKIE], string(iter.name, iter.name_len), string(iter.value, iter.value_len)); + _debugLog(_debug_tag, "[%s] Inserted cookie with name [%.*s] and value [%.*s]", __FUNCTION__, iter.name_len, iter.name, + iter.value_len, iter.value); } } } diff --git a/plugins/esi/lib/gzip.cc b/plugins/esi/lib/gzip.cc index d89633c5fca..d87cb77d631 100644 --- a/plugins/esi/lib/gzip.cc +++ b/plugins/esi/lib/gzip.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include "gzip.h" #include @@ -90,17 +90,17 @@ EsiLib::gzip(const ByteBlockList &blocks, std::string &cdata) uLong crc = crc32(0, Z_NULL, 0); int deflate_result = Z_OK; int in_data_size = 0; - for (ByteBlockList::const_iterator iter = blocks.begin(); iter != blocks.end(); ++iter) { - if (iter->data && (iter->data_len > 0)) { - zstrm.next_in = reinterpret_cast(const_cast(iter->data)); - zstrm.avail_in = iter->data_len; - in_data_size += iter->data_len; + for (auto block : blocks) { + if (block.data && (block.data_len > 0)) { + zstrm.next_in = reinterpret_cast(const_cast(block.data)); + zstrm.avail_in = block.data_len; + in_data_size += block.data_len; deflate_result = runDeflateLoop(zstrm, 0, cdata); if (deflate_result != Z_OK) { break; // break out of the blocks iteration } - crc = crc32(crc, reinterpret_cast(iter->data), iter->data_len); - total_data_len += iter->data_len; + crc = crc32(crc, reinterpret_cast(block.data), block.data_len); + total_data_len += block.data_len; } } if (!in_data_size) { diff --git a/plugins/esi/serverIntercept.cc b/plugins/esi/serverIntercept.cc index bdbe8bc8eaf..2ed42693954 100644 --- a/plugins/esi/serverIntercept.cc +++ b/plugins/esi/serverIntercept.cc @@ -25,9 +25,9 @@ #include "serverIntercept.h" #include -#include +#include #include -#include +#include const char *ECHO_HEADER_PREFIX = "Echo-"; const int ECHO_HEADER_PREFIX_LEN = 5; diff --git a/plugins/esi/test/StubIncludeHandler.cc b/plugins/esi/test/StubIncludeHandler.cc index 06ce32ee556..69687834f2e 100644 --- a/plugins/esi/test/StubIncludeHandler.cc +++ b/plugins/esi/test/StubIncludeHandler.cc @@ -20,7 +20,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "StubIncludeHandler.h" #include "TestHttpDataFetcher.h" diff --git a/plugins/esi/test/docnode_test.cc b/plugins/esi/test/docnode_test.cc index a497c819f32..e0af7b6545b 100644 --- a/plugins/esi/test/docnode_test.cc +++ b/plugins/esi/test/docnode_test.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include "EsiParser.h" diff --git a/plugins/esi/test/gzip_test.cc b/plugins/esi/test/gzip_test.cc index 8dcf7ffc25c..a8b9defaf4f 100644 --- a/plugins/esi/test/gzip_test.cc +++ b/plugins/esi/test/gzip_test.cc @@ -22,9 +22,9 @@ */ #include -#include +#include #include -#include +#include #include "print_funcs.h" #include "Utils.h" diff --git a/plugins/esi/test/parser_test.cc b/plugins/esi/test/parser_test.cc index 22040f7cb00..1bd5f7b817a 100644 --- a/plugins/esi/test/parser_test.cc +++ b/plugins/esi/test/parser_test.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include "EsiParser.h" diff --git a/plugins/esi/test/print_funcs.cc b/plugins/esi/test/print_funcs.cc index a6e3b534744..851545c776e 100644 --- a/plugins/esi/test/print_funcs.cc +++ b/plugins/esi/test/print_funcs.cc @@ -21,8 +21,8 @@ limitations under the License. */ -#include -#include +#include +#include #include "print_funcs.h" diff --git a/plugins/esi/test/processor_test.cc b/plugins/esi/test/processor_test.cc index b89c2b33497..444dc5ae7ac 100644 --- a/plugins/esi/test/processor_test.cc +++ b/plugins/esi/test/processor_test.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include "EsiProcessor.h" diff --git a/plugins/esi/test/utils_test.cc b/plugins/esi/test/utils_test.cc index a35efd57974..481bfbc9a87 100644 --- a/plugins/esi/test/utils_test.cc +++ b/plugins/esi/test/utils_test.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include "print_funcs.h" @@ -93,7 +93,7 @@ main() const char *expected_strs7[] = {"key1", "val1", nullptr}; checkAttributes("test7", attr_list, expected_strs7); - const char *escaped_sequence = "{\\\"site-attribute\\\":\\\"content=no_expandable; ajax_cert_expandable\\\"}"; + const char *escaped_sequence = R"({\"site-attribute\":\"content=no_expandable; ajax_cert_expandable\"})"; string str8("pos=\"FPM1\" spaceid=96584352 extra_mime=\""); str8.append(escaped_sequence); str8.append("\" foo=bar a=\"b\""); diff --git a/plugins/esi/test/vars_test.cc b/plugins/esi/test/vars_test.cc index ba0ca8847ed..5da39ee09c0 100644 --- a/plugins/esi/test/vars_test.cc +++ b/plugins/esi/test/vars_test.cc @@ -20,11 +20,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include -#include +#include #include -#include +#include #include "print_funcs.h" #include "Variables.h" diff --git a/plugins/experimental/balancer/balancer.cc b/plugins/experimental/balancer/balancer.cc index 8f68ea2d6d8..e9f59810ed1 100644 --- a/plugins/experimental/balancer/balancer.cc +++ b/plugins/experimental/balancer/balancer.cc @@ -23,10 +23,10 @@ #include "balancer.h" #include -#include +#include #include -#include -#include +#include +#include #include // Using ink_inet API is cheating, but I was too lazy to write new IPv6 address parsing routines ;) diff --git a/plugins/experimental/balancer/hash.cc b/plugins/experimental/balancer/hash.cc index 1f9dc138bdb..d08df3ab861 100644 --- a/plugins/experimental/balancer/hash.cc +++ b/plugins/experimental/balancer/hash.cc @@ -22,10 +22,10 @@ */ #include "balancer.h" -#include +#include #include #include -#include +#include #include #include #include @@ -43,6 +43,8 @@ sockaddrlen(const struct sockaddr *sa) default: TSReleaseAssert(0 && "unsupported socket type"); } + + return 0; } struct md5_key { @@ -67,7 +69,7 @@ struct md5_key { unsigned char key[MD5_DIGEST_LENGTH]; }; -typedef void (*HashComponent)(TSHttpTxn txn, TSRemapRequestInfo *, MD5_CTX *); +using HashComponent = void (*)(TSHttpTxn, TSRemapRequestInfo *, MD5_CTX *); // Hash on the source (client) IP address. void @@ -144,7 +146,7 @@ HashTxnKey(TSHttpTxn txn, TSRemapRequestInfo *rri, MD5_CTX *ctx) struct HashBalancer : public BalancerInstance { typedef std::map hash_ring_type; - typedef std::vector hash_part_type; + using hash_part_type = std::vector; enum { iterations = 10, @@ -152,7 +154,7 @@ struct HashBalancer : public BalancerInstance { HashBalancer() { this->hash_parts.push_back(HashTxnUrl); } void - push_target(const BalancerTarget &target) + push_target(const BalancerTarget &target) override { for (unsigned i = 0; i < iterations; ++i) { this->hash_ring.insert(std::make_pair(md5_key(target, i), target)); @@ -160,7 +162,7 @@ struct HashBalancer : public BalancerInstance { } const BalancerTarget & - balance(TSHttpTxn txn, TSRemapRequestInfo *rri) + balance(TSHttpTxn txn, TSRemapRequestInfo *rri) override { md5_key key; MD5_CTX ctx; diff --git a/plugins/experimental/balancer/roundrobin.cc b/plugins/experimental/balancer/roundrobin.cc index 652964407e8..16f0ffd3419 100644 --- a/plugins/experimental/balancer/roundrobin.cc +++ b/plugins/experimental/balancer/roundrobin.cc @@ -22,8 +22,8 @@ */ #include "balancer.h" -#include -#include +#include +#include #include #include #include @@ -33,13 +33,13 @@ namespace struct RoundRobinBalancer : public BalancerInstance { RoundRobinBalancer() : targets(), next(0) {} void - push_target(const BalancerTarget &target) + push_target(const BalancerTarget &target) override { this->targets.push_back(target); } const BalancerTarget & - balance(TSHttpTxn, TSRemapRequestInfo *) + balance(TSHttpTxn, TSRemapRequestInfo *) override { return this->targets[++next % this->targets.size()]; } diff --git a/plugins/experimental/buffer_upload/buffer_upload.cc b/plugins/experimental/buffer_upload/buffer_upload.cc index f80977debc4..a4e94acdad5 100644 --- a/plugins/experimental/buffer_upload/buffer_upload.cc +++ b/plugins/experimental/buffer_upload/buffer_upload.cc @@ -27,19 +27,19 @@ * */ -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include -#include +#include #include #include -#include -#include +#include +#include /* #define DEBUG 1 */ #define DEBUG_TAG "buffer_upload-dbg" @@ -76,7 +76,7 @@ struct upload_config_t { int thread_num; }; -typedef struct upload_config_t upload_config; +using upload_config = struct upload_config_t; enum config_type { TYPE_INT, @@ -135,7 +135,7 @@ struct pvc_state_t { TSHttpTxn http_txnp; }; -typedef struct pvc_state_t pvc_state; +using pvc_state = struct pvc_state_t; // print IOBuffer for test purpose /* @@ -322,7 +322,7 @@ pvc_process_p_read(TSCont contp, TSEvent event, pvc_state *my_state) TSMutexLock(my_state->disk_io_mutex); if (write_buffer_to_disk(my_state->req_hdr_reader, my_state, contp) == TS_ERROR) { LOG_ERROR("write_buffer_to_disk"); - uconfig->use_disk_buffer = 0; + uconfig->use_disk_buffer = false; close(my_state->fd); my_state->fd = -1; } @@ -427,7 +427,7 @@ pvc_process_n_write(TSCont contp, TSEvent event, pvc_state *my_state) /* FALL THROUGH */ case TS_EVENT_VCONN_WRITE_COMPLETE: /* We should have already shutdown read pvc side */ - TSAssert(my_state->p_read_vio == NULL); + TSAssert(my_state->p_read_vio == nullptr); TSVConnShutdown(my_state->net_vc, 0, 1); my_state->req_finished = 1; @@ -512,7 +512,7 @@ pvc_process_p_write(TSCont contp, TSEvent event, pvc_state *my_state) /* FALL THROUGH */ case TS_EVENT_VCONN_WRITE_COMPLETE: /* We should have already shutdown read net side */ - TSAssert(my_state->n_read_vio == NULL); + TSAssert(my_state->n_read_vio == nullptr); TSVConnShutdown(my_state->p_vc, 0, 1); my_state->resp_finished = 1; pvc_check_done(contp, my_state); @@ -906,7 +906,7 @@ attach_pvc_plugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edata) unlink(path); if (my_state->fd < 0) { LOG_ERROR("open"); - uconfig->use_disk_buffer = 0; + uconfig->use_disk_buffer = false; my_state->fd = -1; } else { TSDebug(DEBUG_TAG, "temp filename: %s", path); @@ -1228,7 +1228,7 @@ TSPluginInit(int argc, const char *argv[]) if (uconfig->use_disk_buffer && !create_directory()) { TSError("[buffer_upload] Directory creation failed."); - uconfig->use_disk_buffer = 0; + uconfig->use_disk_buffer = false; } if (TSPluginRegister(&info) != TS_SUCCESS) { diff --git a/plugins/experimental/cache_promote/cache_promote.cc b/plugins/experimental/cache_promote/cache_promote.cc index 83c7b217ba1..a5101cbb0af 100644 --- a/plugins/experimental/cache_promote/cache_promote.cc +++ b/plugins/experimental/cache_promote/cache_promote.cc @@ -16,12 +16,12 @@ limitations under the License. */ -#include -#include +#include +#include #include #include -#include -#include +#include +#include #include #include @@ -117,20 +117,20 @@ class PromotionPolicy class ChancePolicy : public PromotionPolicy { public: - bool doPromote(TSHttpTxn /* txnp ATS_UNUSED */) + bool doPromote(TSHttpTxn /* txnp ATS_UNUSED */) override { TSDebug(PLUGIN_NAME, "ChancePolicy::doPromote(%f)", getSample()); return true; } void - usage() const + usage() const override { TSError("[%s] Usage: @plugin=%s.so @pparam=--policy=chance @pparam=--sample=%%", PLUGIN_NAME, PLUGIN_NAME); } const char * - policyName() const + policyName() const override { return "chance"; } @@ -153,7 +153,9 @@ class LRUHash operator=(const LRUHash &h) { TSDebug(PLUGIN_NAME, "copying an LRUHash object"); - memcpy(_hash, h._hash, sizeof(_hash)); + if (this != &h) { + memcpy(_hash, h._hash, sizeof(_hash)); + } return *this; } @@ -186,7 +188,7 @@ struct LRUHashHasher { }; typedef std::pair LRUEntry; -typedef std::list LRUList; +using LRUList = std::list; typedef std::unordered_map LRUMap; static LRUEntry NULL_LRU_ENTRY; // Used to create an "empty" new LRUEntry @@ -194,22 +196,24 @@ static LRUEntry NULL_LRU_ENTRY; // Used to create an "empty" new LRUEntry class LRUPolicy : public PromotionPolicy { public: - LRUPolicy() : PromotionPolicy(), _buckets(1000), _hits(10), _lock(TSMutexCreate()) {} - ~LRUPolicy() + LRUPolicy() : PromotionPolicy(), _buckets(1000), _hits(10), _lock(TSMutexCreate()), _list_size(0), _freelist_size(0) {} + ~LRUPolicy() override { TSDebug(PLUGIN_NAME, "deleting LRUPolicy object"); TSMutexLock(_lock); _map.clear(); _list.clear(); + _list_size = 0; _freelist.clear(); + _freelist_size = 0; TSMutexUnlock(_lock); TSMutexDestroy(_lock); } bool - parseOption(int opt, char *optarg) + parseOption(int opt, char *optarg) override { switch (opt) { case 'b': @@ -236,7 +240,7 @@ class LRUPolicy : public PromotionPolicy } bool - doPromote(TSHttpTxn txnp) + doPromote(TSHttpTxn txnp) override { LRUHash hash; LRUMap::iterator map_it; @@ -275,11 +279,13 @@ class LRUPolicy : public PromotionPolicy map_it = _map.find(&hash); if (_map.end() != map_it) { // We have an entry in the LRU - TSAssert(_list.size() > 0); // mismatch in the LRUs hash and list, shouldn't happen + TSAssert(_list_size > 0); // mismatch in the LRUs hash and list, shouldn't happen if (++(map_it->second->second) >= _hits) { // Promoted! Cleanup the LRU, and signal success. Save the promoted entry on the freelist. TSDebug(PLUGIN_NAME, "saving the LRUEntry to the freelist"); _freelist.splice(_freelist.begin(), _list, map_it->second); + ++_freelist_size; + --_list_size; _map.erase(map_it->first); ret = true; } else { @@ -289,16 +295,19 @@ class LRUPolicy : public PromotionPolicy } } else { // New LRU entry for the URL, try to repurpose the list entry as much as possible - if (_list.size() >= _buckets) { + if (_list_size >= _buckets) { TSDebug(PLUGIN_NAME, "repurposing last LRUHash entry"); _list.splice(_list.begin(), _list, --_list.end()); _map.erase(&(_list.begin()->first)); - } else if (_freelist.size() > 0) { + } else if (_freelist_size > 0) { TSDebug(PLUGIN_NAME, "reusing LRUEntry from freelist"); _list.splice(_list.begin(), _freelist, _freelist.begin()); + --_freelist_size; + ++_list_size; } else { TSDebug(PLUGIN_NAME, "creating new LRUEntry"); _list.push_front(NULL_LRU_ENTRY); + ++_list_size; } // Update the "new" LRUEntry and add it to the hash _list.begin()->first = hash; @@ -312,14 +321,14 @@ class LRUPolicy : public PromotionPolicy } void - usage() const + usage() const override { TSError("[%s] Usage: @plugin=%s.so @pparam=--policy=lru @pparam=--buckets= --hits= --sample=", PLUGIN_NAME, PLUGIN_NAME); } const char * - policyName() const + policyName() const override { return "LRU"; } @@ -327,10 +336,12 @@ class LRUPolicy : public PromotionPolicy private: unsigned _buckets; unsigned _hits; - // For the LRU + // For the LRU. Note that we keep track of the List sizes, because some versions fo STL have broken + // implementations of size(), making them obsessively slow on calling ::size(). TSMutex _lock; LRUMap _map; LRUList _list, _freelist; + size_t _list_size, _freelist_size; }; ////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/plugins/experimental/cache_range_requests/cache_range_requests.cc b/plugins/experimental/cache_range_requests/cache_range_requests.cc index a11003f4917..a50001fe503 100644 --- a/plugins/experimental/cache_range_requests/cache_range_requests.cc +++ b/plugins/experimental/cache_range_requests/cache_range_requests.cc @@ -26,8 +26,8 @@ * requests. */ -#include -#include +#include +#include #include "ts/ts.h" #include "ts/remap.h" diff --git a/plugins/experimental/cachekey/cachekey.cc b/plugins/experimental/cachekey/cachekey.cc index 71af4a16e9f..8a6d62b7e25 100644 --- a/plugins/experimental/cachekey/cachekey.cc +++ b/plugins/experimental/cachekey/cachekey.cc @@ -21,8 +21,8 @@ * @brief Cache key manipulation. */ -#include /* strlen() */ -#include /* istringstream */ +#include /* strlen() */ +#include /* istringstream */ #include "cachekey.h" static void @@ -108,7 +108,7 @@ getKeyQuery(const char *query, int length, const ConfigQuery &config) T container; while (std::getline(istr, token, '&')) { - String::size_type pos(token.find_first_of("=")); + String::size_type pos(token.find_first_of('=')); String param(token.substr(0, pos == String::npos ? token.size() : pos)); if (config.toBeAdded(param)) { @@ -284,8 +284,8 @@ CacheKey::appendPrefix(const String &prefix, Pattern &prefixCapture, Pattern &pr StringVector captures; if (prefixCapture.process(hostAndPort, captures)) { - for (StringVector::iterator it = captures.begin(); it != captures.end(); it++) { - append(*it); + for (auto &capture : captures) { + append(capture); } CacheKeyDebug("added host:port capture prefix, key: '%s'", _key.c_str()); } @@ -298,8 +298,8 @@ CacheKey::appendPrefix(const String &prefix, Pattern &prefixCapture, Pattern &pr if (!uri.empty()) { StringVector captures; if (prefixCaptureUri.process(uri, captures)) { - for (StringVector::iterator it = captures.begin(); it != captures.end(); it++) { - append(*it); + for (auto &capture : captures) { + append(capture); } CacheKeyDebug("added URI capture prefix, key: '%s'", _key.c_str()); } @@ -341,8 +341,8 @@ CacheKey::appendPath(Pattern &pathCapture, Pattern &pathCaptureUri) if (!uri.empty()) { StringVector captures; if (pathCaptureUri.process(uri, captures)) { - for (StringVector::iterator it = captures.begin(); it != captures.end(); it++) { - append(*it); + for (auto &capture : captures) { + append(capture); } CacheKeyDebug("added URI capture (path), key: '%s'", _key.c_str()); } @@ -356,8 +356,8 @@ CacheKey::appendPath(Pattern &pathCapture, Pattern &pathCaptureUri) if (!path.empty()) { StringVector captures; if (pathCapture.process(path, captures)) { - for (StringVector::iterator it = captures.begin(); it != captures.end(); it++) { - append(*it); + for (auto &capture : captures) { + append(capture); } CacheKeyDebug("added path capture, key: '%s'", _key.c_str()); } @@ -458,7 +458,7 @@ CacheKey::appendCookies(const ConfigCookies &config) while (std::getline(istr, cookie, ';')) { ::ltrim(cookie); // Trim leading spaces. - String::size_type pos(cookie.find_first_of("=")); + String::size_type pos(cookie.find_first_of('=')); String name(cookie.substr(0, pos == String::npos ? cookie.size() : pos)); /* We only add it to the cache key it is in the cookie set. */ @@ -554,8 +554,8 @@ CacheKey::appendUaCaptures(Pattern &config) StringVector captures; if (config.process(val, captures)) { - for (StringVector::iterator it = captures.begin(); it != captures.end(); it++) { - append(*it); + for (auto &capture : captures) { + append(capture); } } } diff --git a/plugins/experimental/cachekey/pattern.cc b/plugins/experimental/cachekey/pattern.cc index abe41dd9168..870dd60eda0 100644 --- a/plugins/experimental/cachekey/pattern.cc +++ b/plugins/experimental/cachekey/pattern.cc @@ -86,7 +86,7 @@ Pattern::init(const String &config) size_t next = 1; do { current = next + 1; - next = config.find_first_of("/", current); + next = config.find_first_of('/', current); } while (next != String::npos && '\\' == config[next - 1]); if (next != String::npos) { @@ -100,7 +100,7 @@ Pattern::init(const String &config) start = next + 1; do { current = next + 1; - next = config.find_first_of("/", current); + next = config.find_first_of('/', current); } while (next != String::npos && '\\' == config[next - 1]); if (next != String::npos) { @@ -395,8 +395,8 @@ Pattern::compile() */ MultiPattern::~MultiPattern() { - for (std::vector::iterator p = this->_list.begin(); p != this->_list.end(); ++p) { - delete (*p); + for (auto &p : this->_list) { + delete p; } } @@ -430,8 +430,8 @@ MultiPattern::add(Pattern *pattern) bool MultiPattern::match(const String &subject) const { - for (std::vector::const_iterator p = this->_list.begin(); p != this->_list.end(); ++p) { - if (nullptr != (*p) && (*p)->match(subject)) { + for (auto p : this->_list) { + if (nullptr != p && p->match(subject)) { return true; } } @@ -452,8 +452,8 @@ MultiPattern::name() const */ Classifier::~Classifier() { - for (std::vector::iterator p = _list.begin(); p != _list.end(); ++p) { - delete (*p); + for (auto &p : _list) { + delete p; } } @@ -468,11 +468,11 @@ bool Classifier::classify(const String &subject, String &name) const { bool matched = false; - for (std::vector::const_iterator p = _list.begin(); p != _list.end(); ++p) { - if ((*p)->empty()) { + for (auto p : _list) { + if (p->empty()) { continue; - } else if ((*p)->match(subject)) { - name = (*p)->name(); + } else if (p->match(subject)) { + name = p->name(); matched = true; break; } diff --git a/plugins/experimental/cachekey/tests/test_cachekey.py b/plugins/experimental/cachekey/tests/test_cachekey.py index a232f2b25c4..e061b2e9759 100644 --- a/plugins/experimental/cachekey/tests/test_cachekey.py +++ b/plugins/experimental/cachekey/tests/test_cachekey.py @@ -31,119 +31,119 @@ # a 'meta' bench and then use it to create / adjust the corresponding query, headers and cookie # related test benches and use to to validate the plugin behavoior. TBD how well that works. meta_bench = [ - # Testing empty parametes and defaults. - { "args": "", - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('include', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('exclude',[])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('exclude', []), ('include', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('remove-all', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - - # Testing the removal of query parameters from the cache key. - { "args": [('remove-all', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('remove-all', ['false'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('remove-all', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [] }, - - # Testing the sorting of the query parameters in the cache key. - { "args": [('sort', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('sort', ['false'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - { "args": [('sort', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('a','1'),('b','2'),('c','1'),('k','1'),('u','1'),('x','1'),('y','1')] }, - { "args": [('sort', []), ('remove-all', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')] }, - - # Testing the exclusion of query parameters from the cache key. - { "args": [('exclude', ['x','y','z'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('k','1'),('u','1')] }, - { "args": [('exclude', ['x','y','z']), ('include', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('k','1'),('u','1')] }, - { "args": [('exclude', ['x','y','z']), ('include', []), ('sort', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('a','1'),('b','2'),('c','1'),('k','1'),('u','1')] }, - - # Testing the inclusion of query parameters in the cache key. - { "args": [('include', ['x','y','b','c'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2'),('x','1'),('y','1')] }, - { "args": [('include', ['x','y','b','c', 'g'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2'),('x','1'),('y','1')] }, - { "args": [('include', ['x','y','b','c']), ('exclude', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2'),('x','1'),('y','1')] }, - { "args": [('include', ['x','y','b','c']), ('sort', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('b','2'),('c','1'),('x','1'),('y','1')] }, - - # Testing various useful cases (combinations) to include/exclude/sort query parameters in the cache key. - { "args": [('exclude', ['x','y','z']), ('include', ['x','y','b','c'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2')] }, - { "args": [('exclude', ['x','y','z']), ('include', [])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('k','1'),('u','1')] }, - { "args": [('exclude', ['x','y','z']), ('include', []), ('sort', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('a','1'),('b','2'),('c','1'),('k','1'),('u','1')] }, - { "args": [('exclude', ['x','y','z']), ('include', ['x','y','b','c']), ('sort', ['true']), ('remove-all', ['true'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [] }, - { "args": [('exclude', ['x']), ('exclude', ['y']), ('exclude', ['z']), ('include', ['y','c']), ('include', ['x','b'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2')] }, - - # Testing regex include-match. - { "args": [('include-match', ['(a|b|c)']),], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),] }, - # Testing multiple regex include-match with pattern that don't match ('k' and 'u'). - { "args": [('include-match', ['(a|b|c)']), ('include-match', ['(x|y|z)'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2'),('x','1'),('y','1')] }, - # Testing regex exclude match. - { "args": [('exclude-match', ['(a|b|c)']),], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('x','1'),('k','1'),('u','1'),('y','1')] }, - # Testing multiple regex exclude-match with pattern that don't match ('k' and 'u'). - { "args": [('exclude-match', ['(a|b|c)']), ('exclude-match', ['(x|y|z)'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('k','1'),('u','1')] }, - # Testing mixing exclude and include match - { "args": [('include-match', ['(a|b|c|x)']), ('exclude-match', ['(x|y|z)'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('a','1'),('b','2')] }, - # Testing mixing exclude and include match - { "args": [('exclude-match', ['x']), ('exclude-match', ['y']), ('exclude-match', ['z']), ('include-match', ['(y|c)']), ('include-match', ['(x|b)'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2')] }, - # Testing mixing `--include-params`, `--exclude-params`, `--include-match-param` and `--exclude-match-param` - { "args": [('exclude', ['x']), ('exclude-match', ['y']), ('exclude-match', ['z']), ('include', ['y','c']), ('include-match', ['(x|b)'])], - "uri": [('c','1'),('a','1'),('b','2'),('x','1'),('k','1'),('u','1'),('y','1')], - "key": [('c','1'),('b','2')] }, - ] + # Testing empty parametes and defaults. + {"args": "", + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('include', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('exclude', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('exclude', []), ('include', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('remove-all', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + + # Testing the removal of query parameters from the cache key. + {"args": [('remove-all', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('remove-all', ['false'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('remove-all', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": []}, + + # Testing the sorting of the query parameters in the cache key. + {"args": [('sort', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('sort', ['false'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + {"args": [('sort', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('a', '1'), ('b', '2'), ('c', '1'), ('k', '1'), ('u', '1'), ('x', '1'), ('y', '1')]}, + {"args": [('sort', []), ('remove-all', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + + # Testing the exclusion of query parameters from the cache key. + {"args": [('exclude', ['x', 'y', 'z'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('k', '1'), ('u', '1')]}, + {"args": [('exclude', ['x', 'y', 'z']), ('include', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('k', '1'), ('u', '1')]}, + {"args": [('exclude', ['x', 'y', 'z']), ('include', []), ('sort', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('a', '1'), ('b', '2'), ('c', '1'), ('k', '1'), ('u', '1')]}, + + # Testing the inclusion of query parameters in the cache key. + {"args": [('include', ['x', 'y', 'b', 'c'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2'), ('x', '1'), ('y', '1')]}, + {"args": [('include', ['x', 'y', 'b', 'c', 'g'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2'), ('x', '1'), ('y', '1')]}, + {"args": [('include', ['x', 'y', 'b', 'c']), ('exclude', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2'), ('x', '1'), ('y', '1')]}, + {"args": [('include', ['x', 'y', 'b', 'c']), ('sort', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('b', '2'), ('c', '1'), ('x', '1'), ('y', '1')]}, + + # Testing various useful cases (combinations) to include/exclude/sort query parameters in the cache key. + {"args": [('exclude', ['x', 'y', 'z']), ('include', ['x', 'y', 'b', 'c'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2')]}, + {"args": [('exclude', ['x', 'y', 'z']), ('include', [])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('k', '1'), ('u', '1')]}, + {"args": [('exclude', ['x', 'y', 'z']), ('include', []), ('sort', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('a', '1'), ('b', '2'), ('c', '1'), ('k', '1'), ('u', '1')]}, + {"args": [('exclude', ['x', 'y', 'z']), ('include', ['x', 'y', 'b', 'c']), ('sort', ['true']), ('remove-all', ['true'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": []}, + {"args": [('exclude', ['x']), ('exclude', ['y']), ('exclude', ['z']), ('include', ['y', 'c']), ('include', ['x', 'b'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2')]}, + + # Testing regex include-match. + {"args": [('include-match', ['(a|b|c)']), ], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ]}, + # Testing multiple regex include-match with pattern that don't match ('k' and 'u'). + {"args": [('include-match', ['(a|b|c)']), ('include-match', ['(x|y|z)'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('y', '1')]}, + # Testing regex exclude match. + {"args": [('exclude-match', ['(a|b|c)']), ], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')]}, + # Testing multiple regex exclude-match with pattern that don't match ('k' and 'u'). + {"args": [('exclude-match', ['(a|b|c)']), ('exclude-match', ['(x|y|z)'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('k', '1'), ('u', '1')]}, + # Testing mixing exclude and include match + {"args": [('include-match', ['(a|b|c|x)']), ('exclude-match', ['(x|y|z)'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('a', '1'), ('b', '2')]}, + # Testing mixing exclude and include match + {"args": [('exclude-match', ['x']), ('exclude-match', ['y']), ('exclude-match', ['z']), ('include-match', ['(y|c)']), ('include-match', ['(x|b)'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2')]}, + # Testing mixing `--include-params`, `--exclude-params`, `--include-match-param` and `--exclude-match-param` + {"args": [('exclude', ['x']), ('exclude-match', ['y']), ('exclude-match', ['z']), ('include', ['y', 'c']), ('include-match', ['(x|b)'])], + "uri": [('c', '1'), ('a', '1'), ('b', '2'), ('x', '1'), ('k', '1'), ('u', '1'), ('y', '1')], + "key": [('c', '1'), ('b', '2')]}, +] # Query related bench - meta_bench is used to populate it. query_bench = [] @@ -156,212 +156,213 @@ # Prefix related tests. Doesn't use the meta_bench. prefix_bench = [ - # Testing not adding any custom prefix - { "args": "", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing using the option but with no value - { "args": "@pparam=--static-prefix=", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a static prefix to the cache key - { "args": "@pparam=--static-prefix=static_prefix", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/static_prefix/path/to/object?a=1&b=2&c=3" - }, - # Testing using the option but with no value - { "args": "@pparam=--capture-prefix=", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a capture prefix to the cache key - { "args": "@pparam=--capture-prefix=(test_prefix).*:([^\s\/$]*)", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/test_prefix/{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a capture prefix with replacement string defined - { "args": "@pparam=--capture-prefix=/(test_prefix).*:([^\s\/]*)/$1_$2/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/test_prefix_{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a capture prefix from URI to the cache key - { "args": "@pparam=--capture-prefix-uri=(test_prefix).*:.*(object).*$", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/test_prefix/object/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a capture prefix from with replacement string defined - { "args": "@pparam=--capture-prefix-uri=/(test_prefix).*:.*(object).*$/$1_$2/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/test_prefix_object/path/to/object?a=1&b=2&c=3" - }, - # Testing adding both static and capture prefix to the cache key - { "args": "@pparam=--static-prefix=static_prefix @pparam=--capture-prefix=(test_prefix).*", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/static_prefix/test_prefix/path/to/object?a=1&b=2&c=3" - }, - # Testing adding static and capture prefix and capture prefix from URI to the cache key - { "args": "@pparam=--static-prefix=static_prefix @pparam=--capture-prefix=(test_prefix).* @pparam=--capture-prefix-uri=(object).*", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/static_prefix/test_prefix/object/path/to/object?a=1&b=2&c=3" - }, - ] + # Testing not adding any custom prefix + {"args": "", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing using the option but with no value + {"args": "@pparam=--static-prefix=", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a static prefix to the cache key + {"args": "@pparam=--static-prefix=static_prefix", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/static_prefix/path/to/object?a=1&b=2&c=3" + }, + # Testing using the option but with no value + {"args": "@pparam=--capture-prefix=", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a capture prefix to the cache key + {"args": "@pparam=--capture-prefix=(test_prefix).*:([^\s\/$]*)", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/test_prefix/{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a capture prefix with replacement string defined + {"args": "@pparam=--capture-prefix=/(test_prefix).*:([^\s\/]*)/$1_$2/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/test_prefix_{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a capture prefix from URI to the cache key + {"args": "@pparam=--capture-prefix-uri=(test_prefix).*:.*(object).*$", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/test_prefix/object/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a capture prefix from with replacement string defined + {"args": "@pparam=--capture-prefix-uri=/(test_prefix).*:.*(object).*$/$1_$2/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/test_prefix_object/path/to/object?a=1&b=2&c=3" + }, + # Testing adding both static and capture prefix to the cache key + {"args": "@pparam=--static-prefix=static_prefix @pparam=--capture-prefix=(test_prefix).*", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/static_prefix/test_prefix/path/to/object?a=1&b=2&c=3" + }, + # Testing adding static and capture prefix and capture prefix from URI to the cache key + {"args": "@pparam=--static-prefix=static_prefix @pparam=--capture-prefix=(test_prefix).* @pparam=--capture-prefix-uri=(object).*", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/static_prefix/test_prefix/object/path/to/object?a=1&b=2&c=3" + }, +] path_bench = [ - # Testing adding default path to the cache key - { "args": "", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" - }, - # Testing adding a path capture to the cache key - { "args": "@pparam=--capture-path=.*(object).*", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/object?a=1&b=2&c=3" - }, - # Testing adding a path capture/replacement to the cache key - { "args": "@pparam=--capture-path=/.*(object).*/const_path_$1/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/const_path_object?a=1&b=2&c=3" - }, - # Testing adding an URI capture to the cache key - { "args": "@pparam=--capture-path-uri=(test_path).*(object).*", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/test_path/object?a=1&b=2&c=3" - }, - # Testing adding an URI capture/replacement to the cache key - { "args": "@pparam=--capture-path-uri=/(test_path).*(object).*/$1_$2/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/test_path_object?a=1&b=2&c=3" - }, - # Testing adding an URI and path capture/replacement together to the cache key - { "args": "@pparam=--capture-path=/.*(object).*/const_path_$1/ @pparam=--capture-path-uri=/(test_path).*(object).*/$1_$2/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [], - "cookies": [], - "key": "/{0}/{1}/test_path_object/const_path_object?a=1&b=2&c=3" - }, - ] + # Testing adding default path to the cache key + {"args": "", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" + }, + # Testing adding a path capture to the cache key + {"args": "@pparam=--capture-path=.*(object).*", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/object?a=1&b=2&c=3" + }, + # Testing adding a path capture/replacement to the cache key + {"args": "@pparam=--capture-path=/.*(object).*/const_path_$1/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/const_path_object?a=1&b=2&c=3" + }, + # Testing adding an URI capture to the cache key + {"args": "@pparam=--capture-path-uri=(test_path).*(object).*", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/test_path/object?a=1&b=2&c=3" + }, + # Testing adding an URI capture/replacement to the cache key + {"args": "@pparam=--capture-path-uri=/(test_path).*(object).*/$1_$2/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/test_path_object?a=1&b=2&c=3" + }, + # Testing adding an URI and path capture/replacement together to the cache key + {"args": "@pparam=--capture-path=/.*(object).*/const_path_$1/ @pparam=--capture-path-uri=/(test_path).*(object).*/$1_$2/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [], + "cookies": [], + "key": "/{0}/{1}/test_path_object/const_path_object?a=1&b=2&c=3" + }, +] # User-Agent header capture related tests. Doesn't use the meta_bench. ua_captures_bench = [ - # Testing single match without grouping. - { "args": "@pparam=--ua-capture=Mozilla\/[^\s]*", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0/path/to/object?a=1&b=2&c=3" - }, - # Testing single match with grouping. - { "args": "@pparam=--ua-capture=(Mozilla\/[^\s]*)", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0/path/to/object?a=1&b=2&c=3" - }, - # Testing multiple capturing group match. - { "args": "@pparam=--ua-capture=(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0/AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" - }, - # Testing multiple capturing group match with empty replacement string. - { "args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)//", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0/AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" - }, - # Testing multiple capturing group match with the replacement. - { "args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$1_$2/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0_AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" - }, - # Testing multiple capturing group match with $0 (zero group) in the replacement. - { "args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$0/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_9_3)%20AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" - }, - # Testing an extra invalid variable in the replacement, the whole capture will be ignored (TODO verify the error message in the log). - { "args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$1_$2_$3/", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" - }, - ] + # Testing single match without grouping. + {"args": "@pparam=--ua-capture=Mozilla\/[^\s]*", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0/path/to/object?a=1&b=2&c=3" + }, + # Testing single match with grouping. + {"args": "@pparam=--ua-capture=(Mozilla\/[^\s]*)", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0/path/to/object?a=1&b=2&c=3" + }, + # Testing multiple capturing group match. + {"args": "@pparam=--ua-capture=(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0/AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" + }, + # Testing multiple capturing group match with empty replacement string. + {"args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)//", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0/AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" + }, + # Testing multiple capturing group match with the replacement. + {"args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$1_$2/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0_AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" + }, + # Testing multiple capturing group match with $0 (zero group) in the replacement. + {"args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$0/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/Mozilla/5.0%20(Macintosh;%20Intel%20Mac%20OS%20X%2010_9_3)%20AppleWebKit/537.75.14/path/to/object?a=1&b=2&c=3" + }, + # Testing an extra invalid variable in the replacement, the whole capture will be ignored (TODO verify the error message in the log). + {"args": "@pparam=--ua-capture=/(Mozilla\/[^\s]*).*(AppleWebKit\/[^\s]*)/$1_$2_$3/", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/path/to/object?a=1&b=2&c=3" + }, +] ua_classifier_bench = [ - # Testing ua-blacklist. - { "args": "@pparam=--ua-blacklist=class1:class1_blacklist.config", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Bozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", - "files": [("class1_blacklist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], - }, - # Testing ua-whitelist. - { "args": "@pparam=--ua-whitelist=class1:class1_blacklist.config", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", - "files": [("class1_blacklist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], - }, - # Testing ua-whitelist and ua-blacklist together, whitelist specified before blacklist. - { "args": "@pparam=--ua-whitelist=class1:class1_whitelist.config @pparam=--ua-blacklist=class2:class2_blacklist.config", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", - "files": [("class1_whitelist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n"), - ("class2_blacklist.config", "^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], - }, - # Testing ua-whitelist and ua-blacklist together, blacklist specified before whitelist. - { "args": "@pparam=--ua-blacklist=class2:class2_blacklist.config @pparam=--ua-whitelist=class1:class1_whitelist.config", - "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", - "headers": [("User-Agent", "Bozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], - "cookies": [], - "key": "/{0}/{1}/class2/path/to/object?a=1&b=2&c=3", - "files": [("class1_whitelist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n"), - ("class2_blacklist.config", "^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], - }, - ] + # Testing ua-blacklist. + {"args": "@pparam=--ua-blacklist=class1:class1_blacklist.config", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Bozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", + "files": [("class1_blacklist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], + }, + # Testing ua-whitelist. + {"args": "@pparam=--ua-whitelist=class1:class1_blacklist.config", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", + "files": [("class1_blacklist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], + }, + # Testing ua-whitelist and ua-blacklist together, whitelist specified before blacklist. + {"args": "@pparam=--ua-whitelist=class1:class1_whitelist.config @pparam=--ua-blacklist=class2:class2_blacklist.config", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/class1/path/to/object?a=1&b=2&c=3", + "files": [("class1_whitelist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n"), + ("class2_blacklist.config", "^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], + }, + # Testing ua-whitelist and ua-blacklist together, blacklist specified before whitelist. + {"args": "@pparam=--ua-blacklist=class2:class2_blacklist.config @pparam=--ua-whitelist=class1:class1_whitelist.config", + "uri": "{0}:{1}/path/to/object?a=1&b=2&c=3", + "headers": [("User-Agent", "Bozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A")], + "cookies": [], + "key": "/{0}/{1}/class2/path/to/object?a=1&b=2&c=3", + "files": [("class1_whitelist.config", "^Mozilla.*\n^AdSheet.*\n^iTube.*\n^TuneIn.*\n"), + ("class2_blacklist.config", "^iHeartRadio.*\n^Ruby.*\n^python.*\n^Twitter.*\n^Facebo.*\n")], + }, +] + def prepare_query_bench(bench): new_bench = [] @@ -370,26 +371,26 @@ def prepare_query_bench(bench): for arg in test['args']: args += '@pparam=--{0}-params='.format(arg[0]) - args += ','.join(map(str,arg[1])) + args += ','.join(map(str, arg[1])) args += ' ' uri = '{0}:{1}/?' kvp_list = [] - for (k,v) in test['uri']: - kvp_list.append('{0}={1}'.format(k,v)) + for (k, v) in test['uri']: + kvp_list.append('{0}={1}'.format(k, v)) uri += '&'.join(map(str, kvp_list)) key = '/{0}/{1}' if len(test['key']) != 0: key += '?' kvp_list = [] - for (k,v) in test['key']: - kvp_list.append('{0}={1}'.format(k,v)) + for (k, v) in test['key']: + kvp_list.append('{0}={1}'.format(k, v)) key += '&'.join(map(str, kvp_list)) headers = [] - new_test = { "args": args.strip(), "uri": uri.strip(), "headers": headers, "cookies": [], "key": key.strip() } + new_test = {"args": args.strip(), "uri": uri.strip(), "headers": headers, "cookies": [], "key": key.strip()} new_bench.append(new_test) return new_bench @@ -408,14 +409,14 @@ def prepare_headers_bench(bench): # 'exclude', 'exclude-match', 'sort', 'remove-all' don't make sense for headers as far cachekey is concerned. # headers always sorted and never included by default. if arg[0] == 'exclude' or arg[0] == 'sort' or arg[0] == 'remove-all' or arg[0] == 'include-match' or arg[0] == 'exclude-match': - ignore_test=True + ignore_test = True break if arg[0] == 'include' and len(arg[1]) != 0: include.append(arg[1]) args += '@pparam=--{0}-headers='.format(arg[0]) - args += ','.join(map(str,arg[1])) + args += ','.join(map(str, arg[1])) args += ' ' if ignore_test: @@ -432,16 +433,17 @@ def prepare_headers_bench(bench): if len(test['key']) != 0: key += '/' kvp_list = [] - for (k,v) in test['key']: - kvp_list.append('{0}:{1}'.format(k,v)) + for (k, v) in test['key']: + kvp_list.append('{0}:{1}'.format(k, v)) kvp_list.sort() key += '/'.join(map(str, kvp_list)) - new_test = { "args": args.strip(), "uri": uri.strip(), "headers": headers, "cookies": [], "key": key.strip() } + new_test = {"args": args.strip(), "uri": uri.strip(), "headers": headers, "cookies": [], "key": key.strip()} new_bench.append(new_test) return new_bench + def prepare_cookies_bench(bench): new_bench = [] for test in bench: @@ -455,17 +457,16 @@ def prepare_cookies_bench(bench): # 'exclude', 'exclude-match', 'sort', 'remove-all' don't make sense for cookies as far cachekey is concerned. # headers always sorted and never included by default. if arg[0] == 'exclude' or arg[0] == 'sort' or arg[0] == 'remove-all' or arg[0] == 'include-match' or arg[0] == 'exclude-match': - ignore_test=True + ignore_test = True break if arg[0] == 'include' and len(arg[1]) != 0: include.append(arg[1]) args += '@pparam=--{0}-cookies='.format(arg[0]) - args += ','.join(map(str,arg[1])) + args += ','.join(map(str, arg[1])) args += ' ' - if ignore_test: continue @@ -479,12 +480,12 @@ def prepare_cookies_bench(bench): if len(test['key']) != 0: key += '/' kvp_list = [] - for (k,v) in test['key']: - kvp_list.append('{0}={1}'.format(k,v)) + for (k, v) in test['key']: + kvp_list.append('{0}={1}'.format(k, v)) kvp_list.sort() key += ';'.join(map(str, kvp_list)) - new_test = { "args": args.strip(), "uri": uri.strip(), "headers": [], "cookies": cookies, "key": key.strip() } + new_test = {"args": args.strip(), "uri": uri.strip(), "headers": [], "cookies": cookies, "key": key.strip()} new_bench.append(new_test) return new_bench @@ -502,6 +503,7 @@ def getEnv(cls): env.clone(layout=layout) return env + class TestCacheKey(tsqa.test_cases.DynamicHTTPEndpointCase, StaticEnvironmentCase): @classmethod @@ -525,12 +527,13 @@ def add_remap_rule(remap_prefix, remap_index, test): host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index) port = cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] args = test['args'] - remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2} @plugin=cachekey.so {3}'.format(host, port, cls.http_endpoint.address[1], args) + remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2} @plugin=cachekey.so {3}'.format( + host, port, cls.http_endpoint.address[1], args) log.info(' {0}'.format(remap_rule)) cls.configs['remap.config'].add_line(remap_rule) log.info("Preparing cache key query hadnling test bench") - query_bench = prepare_query_bench(meta_bench); + query_bench = prepare_query_bench(meta_bench) log.info("Preparing cache key headers handling test bench") headers_bench = prepare_headers_bench(meta_bench) @@ -542,37 +545,37 @@ def add_remap_rule(remap_prefix, remap_index, test): i = 0 for test in query_bench: add_remap_rule("query", i, test) - i+=1 + i += 1 # Prepare headers tests related remap rules. i = 0 for test in headers_bench: add_remap_rule("headers", i, test) - i+=1 + i += 1 # Prepare headers tests related remap rules. i = 0 for test in cookies_bench: add_remap_rule("cookies", i, test) - i+=1 + i += 1 # Prepare prefix tests related remap rules. i = 0 for test in prefix_bench: add_remap_rule("prefix", i, test) - i+=1 + i += 1 # Prepare path tests related remap rules. i = 0 for test in path_bench: add_remap_rule("path", i, test) - i+=1 + i += 1 # Prepare ua-capture tests related remap rules. i = 0 for test in ua_captures_bench: add_remap_rule("ua_captures", i, test) - i+=1 + i += 1 # Prepare ua-classifier tests related remap rules. i = 0 @@ -583,11 +586,11 @@ def add_remap_rule(remap_prefix, remap_index, test): for file in test['files']: filename = file[0] content = file[1] - path = os.path.join(env.layout.prefix, 'etc/trafficserver', filename); + path = os.path.join(env.layout.prefix, 'etc/trafficserver', filename) with open(path, 'w') as fh: fh.write(content) - i+=1 + i += 1 # Set up an origin server which returns OK all the time. def handler(request): @@ -596,7 +599,6 @@ def handler(request): cls.http_endpoint.add_handler('/', handler) cls.http_endpoint.add_handler('/path/to/object', handler) - def get_cachekey(self, host, port, uri, headers, cookies): ''' Sends a request to the traffic server and gets the cache key used while processing the request. @@ -614,7 +616,7 @@ def get_cachekey(self, host, port, uri, headers, cookies): return response.headers['X-Cache-Key'] def verify_key(self, remap_prefix, remap_index, test): - host = 'test_{0}_{1}.example.com'.format( remap_prefix, remap_index) + host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index) port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] expected_key = test['key'].format(host, port) key = self.get_cachekey(host, port, test['uri'], test['headers'], test['cookies']) @@ -622,10 +624,10 @@ def verify_key(self, remap_prefix, remap_index, test): log.info(" map : cachekey.so {0}".format(test['args'])) log.info(" uri :'{0}'".format(test['uri'])) headers = '' - for name,value in test['headers']: + for name, value in test['headers']: headers += "'{0}: {1}' ".format(name, value) cookies = '' - for name,value in test['cookies']: + for name, value in test['cookies']: cookies += "'{0}: {1}' ".format(name, value) log.info(" headers: {0}".format(headers)) log.info(" cookies: {0}".format(cookies)) diff --git a/plugins/experimental/collapsed_connection/collapsed_connection.cc b/plugins/experimental/collapsed_connection/collapsed_connection.cc index e8e1eb74e06..eb3b92865f6 100644 --- a/plugins/experimental/collapsed_connection/collapsed_connection.cc +++ b/plugins/experimental/collapsed_connection/collapsed_connection.cc @@ -21,16 +21,16 @@ limitations under the License. */ -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include #include "MurmurHash3.h" -#include +#include #include "P_collapsed_connection.h" diff --git a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc index 9e2d82cd086..7526a4ddb88 100644 --- a/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc +++ b/plugins/experimental/collapsed_forwarding/collapsed_forwarding.cc @@ -56,11 +56,11 @@ #include #include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include #include diff --git a/plugins/experimental/custom_redirect/custom_redirect.cc b/plugins/experimental/custom_redirect/custom_redirect.cc index 0ebf5f8d0ce..30ab6229cfe 100644 --- a/plugins/experimental/custom_redirect/custom_redirect.cc +++ b/plugins/experimental/custom_redirect/custom_redirect.cc @@ -24,15 +24,15 @@ /* custom_redirect.cc: Allows read header set by origin for internal redirects */ -#include +#include #include #include #include #include #include #include -#include -#include +#include +#include static char *redirect_url_header = nullptr; static int redirect_url_header_len = 0; diff --git a/plugins/experimental/epic/epic.cc b/plugins/experimental/epic/epic.cc index fd397345f98..e4550857b73 100644 --- a/plugins/experimental/epic/epic.cc +++ b/plugins/experimental/epic/epic.cc @@ -17,16 +17,16 @@ */ #include -#include -#include -#include -#include +#include +#include +#include +#include #include -#include -#include +#include +#include #include #include -#include +#include #include #include @@ -235,6 +235,9 @@ static const std::set epic_gauges = { "proxy.process.update.state_machines", "proxy.process.version.server.build_time", "proxy.process.websocket.current_active_client_connections", + "proxy.process.cache.span.failing", + "proxy.process.cache.span.offline", + "proxy.process.cache.span.online", }; struct epic_sample_context { diff --git a/plugins/experimental/escalate/escalate.cc b/plugins/experimental/escalate/escalate.cc index 142de29f85c..f6e88629eb3 100644 --- a/plugins/experimental/escalate/escalate.cc +++ b/plugins/experimental/escalate/escalate.cc @@ -23,10 +23,10 @@ #include #include #include -#include -#include +#include +#include #include -#include +#include #include #include #include diff --git a/plugins/experimental/geoip_acl/geoip_acl.cc b/plugins/experimental/geoip_acl/geoip_acl.cc index 6c17343a9a1..f3a0d5c6aea 100644 --- a/plugins/experimental/geoip_acl/geoip_acl.cc +++ b/plugins/experimental/geoip_acl/geoip_acl.cc @@ -22,8 +22,8 @@ // #include #include -#include -#include +#include +#include #include "lulu.h" #include "acl.h" diff --git a/plugins/experimental/header_freq/header_freq.cc b/plugins/experimental/header_freq/header_freq.cc index f8520fbebc6..56f787ae93f 100644 --- a/plugins/experimental/header_freq/header_freq.cc +++ b/plugins/experimental/header_freq/header_freq.cc @@ -25,8 +25,8 @@ #include #include #include -#include -#include +#include +#include #include #include diff --git a/plugins/experimental/header_normalize/header_normalize.cc b/plugins/experimental/header_normalize/header_normalize.cc index 5af6fbf8e30..c4d9e6ba58e 100644 --- a/plugins/experimental/header_normalize/header_normalize.cc +++ b/plugins/experimental/header_normalize/header_normalize.cc @@ -45,10 +45,10 @@ static char UNUSED rcsId__header_normalize_cc[] = #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include diff --git a/plugins/experimental/hipes/hipes.cc b/plugins/experimental/hipes/hipes.cc index b1a0516b17f..db2e3bf9cd8 100644 --- a/plugins/experimental/hipes/hipes.cc +++ b/plugins/experimental/hipes/hipes.cc @@ -18,10 +18,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include -#include -#include +#include +#include #include #include @@ -193,7 +193,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE for (int ix = 2; ix < argc; ++ix) { std::string arg = argv[ix]; - std::string::size_type sep = arg.find_first_of(":"); + std::string::size_type sep = arg.find_first_of(':'); if (sep == std::string::npos) { TSError("[hipes] Malformed options in url_remap: %s", argv[ix]); @@ -212,7 +212,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE } else if (arg.compare(0, 3, "ssl") == 0) { ri->ssl = true; } else if (arg.compare(0, 7, "service") == 0) { - std::string::size_type port = arg_val.find_first_of(":"); + std::string::size_type port = arg_val.find_first_of(':'); if (port == std::string::npos) { ri->svc_server = arg_val; @@ -221,7 +221,7 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE ri->svc_port = atoi(arg_val.substr(port + 1).c_str()); } } else if (arg.compare(0, 6, "server") == 0) { - std::string::size_type port = arg_val.find_first_of(":"); + std::string::size_type port = arg_val.find_first_of(':'); if (port == std::string::npos) { ri->hipes_server = arg_val; diff --git a/plugins/experimental/inliner/ats-inliner.cc b/plugins/experimental/inliner/ats-inliner.cc index 164e174c0b1..48eaac3802b 100644 --- a/plugins/experimental/inliner/ats-inliner.cc +++ b/plugins/experimental/inliner/ats-inliner.cc @@ -20,12 +20,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include -#include +#include #include -#include +#include #include #include "inliner-handler.h" diff --git a/plugins/experimental/inliner/chunk-decoder.cc b/plugins/experimental/inliner/chunk-decoder.cc index 0a458ce5408..455fa168575 100644 --- a/plugins/experimental/inliner/chunk-decoder.cc +++ b/plugins/experimental/inliner/chunk-decoder.cc @@ -41,7 +41,7 @@ limitations under the License. */ #include -#include +#include #include #include "chunk-decoder.h" @@ -111,7 +111,7 @@ ChunkDecoder::parseSize(const char *p, const int64_t s) } bool -ChunkDecoder::isSizeState(void) const +ChunkDecoder::isSizeState() const { return state_ == State::kDataN || state_ == State::kEndN || state_ == State::kSize || state_ == State::kSizeN || state_ == State::kSizeR; diff --git a/plugins/experimental/inliner/fetcher.cc b/plugins/experimental/inliner/fetcher.cc index 644c0bc82e7..ad492d91e9c 100644 --- a/plugins/experimental/inliner/fetcher.cc +++ b/plugins/experimental/inliner/fetcher.cc @@ -45,7 +45,7 @@ namespace ats { void -HttpParser::destroyParser(void) +HttpParser::destroyParser() { if (parser_ != nullptr) { TSHttpParserClear(parser_); diff --git a/plugins/experimental/inliner/html-parser.cc b/plugins/experimental/inliner/html-parser.cc index 98ac85d723c..535fcdb7074 100644 --- a/plugins/experimental/inliner/html-parser.cc +++ b/plugins/experimental/inliner/html-parser.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include #include @@ -31,15 +31,15 @@ namespace ats { namespace inliner { - Attributes::operator std::string(void) const + Attributes::operator std::string() const { std::string result; - for (Attributes::const_iterator item = begin(); item != end(); ++item) { - if (!item->first.empty()) { - if (!item->second.empty()) { - result += item->first + "=\"" + item->second += "\" "; + for (const auto &item : *this) { + if (!item.first.empty()) { + if (!item.second.empty()) { + result += item.first + "=\"" + item.second += "\" "; } else { - result += item->first; + result += item.first; } } } diff --git a/plugins/experimental/inliner/inliner-handler.cc b/plugins/experimental/inliner/inliner-handler.cc index dbebdf310f6..b13f4e544b2 100644 --- a/plugins/experimental/inliner/inliner-handler.cc +++ b/plugins/experimental/inliner/inliner-handler.cc @@ -20,7 +20,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include @@ -67,7 +67,7 @@ namespace inliner } void - Handler::parse(void) + Handler::parse() { assert(reader_ != nullptr); TSIOBufferBlock block = TSIOBufferReaderStart(reader_); @@ -94,28 +94,28 @@ namespace inliner { std::string src; - for (Attributes::const_iterator item = a.begin(); item != a.end(); ++item) { - if (!item->first.empty()) { - src = item->second; + for (const auto &item : a) { + if (!item.first.empty()) { + src = item.second; } } const bool isTagged = - (src.find("http://") == 0 || src.find("https://") == 0) && src.find("inline", src.find("#")) != std::string::npos; + (src.find("http://") == 0 || src.find("https://") == 0) && src.find("inline", src.find('#')) != std::string::npos; if (isTagged) { std::string classes, original = " "; - for (Attributes::const_iterator item = a.begin(); item != a.end(); ++item) { - if (!item->first.empty()) { - if (!item->second.empty()) { - if (item->first == "class") { - classes = item->second; - } else if (item->first.find("src") == std::string::npos) { - original += item->first + "=\"" + item->second += "\" "; + for (const auto &item : a) { + if (!item.first.empty()) { + if (!item.second.empty()) { + if (item.first == "class") { + classes = item.second; + } else if (item.first.find("src") == std::string::npos) { + original += item.first + "=\"" + item.second += "\" "; } } } else { - original += item->first + " "; + original += item.first + " "; } } @@ -130,7 +130,7 @@ namespace inliner } std::string - Handler::generateId(void) + Handler::generateId() { std::stringstream ss; // TODO(dmorilha): stop using memory address here. @@ -139,7 +139,7 @@ namespace inliner } void - Handler::abort(void) + Handler::abort() { abort_ = true; assert(ioSink_); diff --git a/plugins/experimental/inliner/ts.cc b/plugins/experimental/inliner/ts.cc index 9584d40346f..752d43c7993 100644 --- a/plugins/experimental/inliner/ts.cc +++ b/plugins/experimental/inliner/ts.cc @@ -58,7 +58,7 @@ namespace io } int64_t - IO::consume(void) const + IO::consume() const { assert(reader != nullptr); const int64_t available = TSIOBufferReaderAvail(reader); @@ -69,7 +69,7 @@ namespace io } int64_t - IO::done(void) const + IO::done() const { assert(vio != nullptr); assert(reader != nullptr); @@ -257,7 +257,7 @@ namespace io } void - WriteOperation::close(void) + WriteOperation::close() { assert(mutex_ != nullptr); const Lock lock(mutex_); @@ -269,7 +269,7 @@ namespace io } void - WriteOperation::abort(void) + WriteOperation::abort() { assert(mutex_ != nullptr); const Lock lock(mutex_); @@ -287,7 +287,7 @@ namespace io } void - IOSink::process(void) + IOSink::process() { const WriteOperationPointer operation = operation_.lock(); @@ -308,7 +308,7 @@ namespace io } Lock - IOSink::lock(void) + IOSink::lock() { const WriteOperationPointer operation = operation_.lock(); @@ -323,7 +323,7 @@ namespace io } void - IOSink::abort(void) + IOSink::abort() { const WriteOperationPointer operation = operation_.lock(); if (operation) { @@ -394,7 +394,7 @@ namespace io } SinkPointer - IOSink::branch(void) + IOSink::branch() { if (!data_) { data_.reset(new Data(shared_from_this())); @@ -406,7 +406,7 @@ namespace io } SinkPointer - Sink::branch(void) + Sink::branch() { DataPointer data; if (data_) { diff --git a/plugins/experimental/memcached_remap/memcached_remap.cc b/plugins/experimental/memcached_remap/memcached_remap.cc index 433089716a8..23e5dc1754a 100644 --- a/plugins/experimental/memcached_remap/memcached_remap.cc +++ b/plugins/experimental/memcached_remap/memcached_remap.cc @@ -18,8 +18,8 @@ #include #include -#include -#include +#include +#include #include // change this on your box diff --git a/plugins/experimental/memcached_remap/sample.py b/plugins/experimental/memcached_remap/sample.py index 3fa1bd79dd3..07f28209ff8 100755 --- a/plugins/experimental/memcached_remap/sample.py +++ b/plugins/experimental/memcached_remap/sample.py @@ -29,6 +29,5 @@ mc.set("http://localhost:80/", "http://localhost:8080"); # Print the keys that are saved -print "response-1 is '%s'" %(mc.get("http://127.0.0.1:80/")) -print "response-2 is '%s'" %(mc.get("http://localhost:80/")) - +print "response-1 is '%s'" % (mc.get("http://127.0.0.1:80/")) +print "response-2 is '%s'" % (mc.get("http://localhost:80/")) diff --git a/plugins/experimental/metalink/test/chunkedEncoding b/plugins/experimental/metalink/test/chunkedEncoding index 9d979017ab4..000019be12b 100755 --- a/plugins/experimental/metalink/test/chunkedEncoding +++ b/plugins/experimental/metalink/test/chunkedEncoding @@ -22,78 +22,84 @@ print '''1..1 chunkedEncoding from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - No final chunk yet' + print 'not ok 1 - No final chunk yet' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version - ctx.write('chunkedEncoding') + ctx.write('chunkedEncoding') + + # If the proxy reads the final chunk before it sends the + # response headers, it may send a Content-Length header vs. a + # chunked response + reactor.callLater(1, ctx.finish) - # If the proxy reads the final chunk before it sends the - # response headers, it may send a Content-Length header vs. a - # chunked response - reactor.callLater(1, ctx.finish) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() - - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - print 'not ok 1 - Got a Content-Length header vs. a chunked response' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - # No hope of a final chunk now - reactor.stop() + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + print 'not ok 1 - Got a Content-Length header vs. a chunked response' - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent a Content-Length header vs. a chunked - # response). (Override connectionLost() when the proxy crashes or - # we stop the reactor.) - # - # Data that was already received will get processed (the end of - # the headers), then shutdown events will fire (connections will - # get closed), and then finally the reactor will grind to a halt. - def handleResponseEnd(ctx): - pass - - def handleResponsePart(ctx, data): - if data.endswith('0\r\n\r\n'): - print 'ok 1 - Got the final chunk' + # No hope of a final chunk now + reactor.stop() + + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent a Content-Length header vs. a chunked + # response). (Override connectionLost() when the proxy crashes or + # we stop the reactor.) + # + # Data that was already received will get processed (the end of + # the headers), then shutdown events will fire (connections will + # get closed), and then finally the reactor will grind to a halt. + def handleResponseEnd(ctx): + pass + + def handleResponsePart(ctx, data): + if data.endswith('0\r\n\r\n'): + print 'ok 1 - Got the final chunk' + + reactor.stop() - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/chunkedEncodingDisconnect b/plugins/experimental/metalink/test/chunkedEncodingDisconnect index 8a6309d65cd..8e5cd458b9e 100755 --- a/plugins/experimental/metalink/test/chunkedEncodingDisconnect +++ b/plugins/experimental/metalink/test/chunkedEncodingDisconnect @@ -23,75 +23,81 @@ print '''1..1 chunkedEncodingDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - The client was left hanging' + print 'not ok 1 - The client was left hanging' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.write('chunkedEncodingDisconnect') - ctx.write('chunkedEncodingDisconnect') + # If the origin disconnects before the proxy sends the + # response headers, the proxy may send a Content-Length header + # vs. a chunked response + reactor.callLater(1, ctx.transport.loseConnection) - # If the origin disconnects before the proxy sends the - # response headers, the proxy may send a Content-Length header - # vs. a chunked response - reactor.callLater(1, ctx.transport.loseConnection) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'ok 1 - The client connection closed' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'ok 1 - The client connection closed' - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - print 'not ok 1 - Got a Content-Length header vs. a chunked response' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - # Who cares what happens now? - reactor.stop() + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + print 'not ok 1 - Got a Content-Length header vs. a chunked response' - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent a Content-Length header vs. a chunked - # response). (Override connectionLost() when the proxy closes the - # client connection or we stop the reactor.) - def handleResponseEnd(ctx): - pass + # Who cares what happens now? + reactor.stop() - def handleResponsePart(ctx, data): - if data.endswith('0\r\n\r\n'): - print 'not ok 1 - Got a final chunk' + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent a Content-Length header vs. a chunked + # response). (Override connectionLost() when the proxy closes the + # client connection or we stop the reactor.) + def handleResponseEnd(ctx): + pass + + def handleResponsePart(ctx, data): + if data.endswith('0\r\n\r\n'): + print 'not ok 1 - Got a final chunk' + + # Who cares what happens now? + reactor.stop() - # Who cares what happens now? - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/clientDisconnect b/plugins/experimental/metalink/test/clientDisconnect index b9ff984c311..54398d2ecdd 100755 --- a/plugins/experimental/metalink/test/clientDisconnect +++ b/plugins/experimental/metalink/test/clientDisconnect @@ -22,67 +22,73 @@ print '''1..1 clientDissconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(3, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version - ctx.write('clientDisconnect') + ctx.write('clientDisconnect') - # The proxy crashes only after the response is complete - def callback(): - try: - ctx.finish() + # The proxy crashes only after the response is complete + def callback(): + try: + ctx.finish() - except RuntimeError: - pass + except RuntimeError: + pass - # Open another connection - class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' + # Open another connection + class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): + print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' - reactor.stop() + reactor.stop() - class protocol(protocol.Protocol): - def connectionMade(ctx): - print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' + class protocol(protocol.Protocol): + def connectionMade(ctx): + print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' - reactor.stop() + reactor.stop() - reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + + reactor.callLater(1, callback) - reactor.callLater(1, callback) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() + reactor.stop() + + class protocol(protocol.Protocol): + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - class protocol(protocol.Protocol): - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + # Disconnect after the proxy sends the response headers + reactor.callLater(1, ctx.transport.loseConnection) - # Disconnect after the proxy sends the response headers - reactor.callLater(1, ctx.transport.loseConnection) tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/contentLength b/plugins/experimental/metalink/test/contentLength index 4d1a9048aa0..d9635b2cb17 100755 --- a/plugins/experimental/metalink/test/contentLength +++ b/plugins/experimental/metalink/test/contentLength @@ -22,79 +22,85 @@ print '''1..1 contentLength from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setHeader('Content-Length', 13) + ctx.write('contentLength') - ctx.setHeader('Content-Length', 13) - ctx.write('contentLength') origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - def handleEndHeaders(ctx): - try: - reactor.stop() + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - except error.ReactorNotRunning: - pass + def handleEndHeaders(ctx): + try: + reactor.stop() - else: - print 'not ok 1 - No Content-Length header' + except error.ReactorNotRunning: + pass - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - if v != '13': - print 'not', + else: + print 'not ok 1 - No Content-Length header' - print 'ok 1 - Content-Length header' + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + if v != '13': + print 'not', - reactor.stop() + print 'ok 1 - Content-Length header' + + reactor.stop() + + # Avoid calling undefined handleResponse() at the end of the + # message. (Override connectionLost() when the proxy crashes or + # we stop the reactor.) + # + # Data that was already received will get processed (the end of + # the headers), then shutdown events will fire (connections will + # get closed), and then finally the reactor will grind to a halt. + def handleResponseEnd(ctx): + pass - # Avoid calling undefined handleResponse() at the end of the - # message. (Override connectionLost() when the proxy crashes or - # we stop the reactor.) - # - # Data that was already received will get processed (the end of - # the headers), then shutdown events will fire (connections will - # get closed), and then finally the reactor will grind to a halt. - def handleResponseEnd(ctx): - pass tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/contentLengthDisconnect b/plugins/experimental/metalink/test/contentLengthDisconnect index ee12a450a2a..17c371a7f49 100755 --- a/plugins/experimental/metalink/test/contentLengthDisconnect +++ b/plugins/experimental/metalink/test/contentLengthDisconnect @@ -23,72 +23,78 @@ print '''1..2 contentLengthDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 2 - The client was left hanging' + print 'not ok 2 - The client was left hanging' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setHeader('Content-Length', 24) + ctx.write('contentLengthDisconnect') - ctx.setHeader('Content-Length', 24) - ctx.write('contentLengthDisconnect') + # If the origin disconnects before the proxy sends the + # response headers, the proxy may send the wrong + # Content-Length header + reactor.callLater(1, ctx.transport.loseConnection) - # If the origin disconnects before the proxy sends the - # response headers, the proxy may send the wrong - # Content-Length header - reactor.callLater(1, ctx.transport.loseConnection) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() + + except error.ReactorNotRunning: + pass + + else: + print 'ok 2 - The client connection closed' - else: - print 'ok 2 - The client connection closed' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + if v != '24': + print 'not', - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - if v != '24': - print 'not', + # Who cares what happens now? + reactor.stop() - # Who cares what happens now? - reactor.stop() + print 'ok 1 - Content-Length header' - print 'ok 1 - Content-Length header' + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent the wrong Content-Length header). + # (Override connectionLost() when the proxy closes the client + # connection or we stop the reactor.) + def handleResponseEnd(ctx): + pass - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent the wrong Content-Length header). - # (Override connectionLost() when the proxy closes the client - # connection or we stop the reactor.) - def handleResponseEnd(ctx): - pass tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/finalChunkedEncodingDisconnect b/plugins/experimental/metalink/test/finalChunkedEncodingDisconnect index 6c60f4e6f74..41887a92a33 100755 --- a/plugins/experimental/metalink/test/finalChunkedEncodingDisconnect +++ b/plugins/experimental/metalink/test/finalChunkedEncodingDisconnect @@ -23,90 +23,96 @@ print '''1..1 finalChunkEncodingDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - No final chunk yet' + print 'not ok 1 - No final chunk yet' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version - ctx.write('finalChunkedEncodingDisconnect') + ctx.write('finalChunkedEncodingDisconnect') - # If the proxy reads the final chunk before it sends the - # response headers, it may send a Content-Length header vs. a - # chunked response - def callback(): - try: - ctx.finish() + # If the proxy reads the final chunk before it sends the + # response headers, it may send a Content-Length header vs. a + # chunked response + def callback(): + try: + ctx.finish() - except RuntimeError: - print 'not ok 1 - Did the proxy crash? (The origin connection closed.)' + except RuntimeError: + print 'not ok 1 - Did the proxy crash? (The origin connection closed.)' - reactor.stop() + reactor.stop() - else: - ctx.transport.loseConnection() + else: + ctx.transport.loseConnection() + + reactor.callLater(1, callback) - reactor.callLater(1, callback) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() - - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - print 'not ok 1 - Got a Content-Length header vs. a chunked response' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - # No hope of a final chunk now - reactor.stop() + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + print 'not ok 1 - Got a Content-Length header vs. a chunked response' - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent a Content-Length header vs. a chunked - # response). (Override connectionLost() when the proxy crashes or - # we stop the reactor.) - # - # Data that was already received will get processed (the end of - # the headers), then shutdown events will fire (connections will - # get closed), and then finally the reactor will grind to a halt. - def handleResponseEnd(ctx): - pass - - def handleResponsePart(ctx, data): - if data.endswith('0\r\n\r\n'): - print 'ok 1 - Got the final chunk' + # No hope of a final chunk now + reactor.stop() + + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent a Content-Length header vs. a chunked + # response). (Override connectionLost() when the proxy crashes or + # we stop the reactor.) + # + # Data that was already received will get processed (the end of + # the headers), then shutdown events will fire (connections will + # get closed), and then finally the reactor will grind to a halt. + def handleResponseEnd(ctx): + pass + + def handleResponsePart(ctx, data): + if data.endswith('0\r\n\r\n'): + print 'ok 1 - Got the final chunk' + + reactor.stop() - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/headers b/plugins/experimental/metalink/test/headers index 9e3e19352a4..adc071a12c5 100755 --- a/plugins/experimental/metalink/test/headers +++ b/plugins/experimental/metalink/test/headers @@ -22,54 +22,60 @@ print '''1..1 headers from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - No response yet' + print 'not ok 1 - No response yet' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.write('') - ctx.write('') origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() + + except error.ReactorNotRunning: + pass + + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def handleStatus(ctx, version, status, message): + print 'ok 1 - Got the response status' - def handleStatus(ctx, version, status, message): - print 'ok 1 - Got the response status' + reactor.stop() - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/http09 b/plugins/experimental/metalink/test/http09 index 5667c13679e..675459a9ef4 100755 --- a/plugins/experimental/metalink/test/http09 +++ b/plugins/experimental/metalink/test/http09 @@ -32,54 +32,60 @@ print '''1..1 http09 from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(protocol.Factory): - class protocol(protocol.Protocol): - def connectionMade(ctx): - ctx.transport.write('http09\r\n') + class protocol(protocol.Protocol): + def connectionMade(ctx): + ctx.transport.write('http09\r\n') + + # The proxy crashes only after the response is complete + ctx.transport.loseConnection() - # The proxy crashes only after the response is complete - ctx.transport.loseConnection() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() + reactor.stop() + + class protocol(protocol.Protocol): + def connectionLost(ctx, reason): - class protocol(protocol.Protocol): - def connectionLost(ctx, reason): + # Open another connection + class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): + print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' - # Open another connection - class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' + reactor.stop() - reactor.stop() + class protocol(protocol.Protocol): + def connectionMade(ctx): + print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' - class protocol(protocol.Protocol): - def connectionMade(ctx): - print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' + reactor.stop() - reactor.stop() + reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) - reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/location b/plugins/experimental/metalink/test/location index 6e293a6d275..0ab835b4285 100755 --- a/plugins/experimental/metalink/test/location +++ b/plugins/experimental/metalink/test/location @@ -22,107 +22,114 @@ print '''1..2 location from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + if target == '/location': - if target == '/location': + # Satisfy every case of + # proxy.config.http.cache.required_headers + ctx.setHeader('Cache-Control', 'max-age=1') - # Satisfy every case of - # proxy.config.http.cache.required_headers - ctx.setHeader('Cache-Control', 'max-age=1') + ctx.write('location') + ctx.finish() - ctx.write('location') - ctx.finish() + else: - else: + ctx.setHeader('Digest', 'SHA-256=5urqGOiF4QeIKbVt80iWvlq1FDno8LoAyxYkssVywQ4=') + ctx.setHeader('Location', 'http://example.com') + ctx.finish() - ctx.setHeader('Digest', 'SHA-256=5urqGOiF4QeIKbVt80iWvlq1FDno8LoAyxYkssVywQ4=') - ctx.setHeader('Location', 'http://example.com') - ctx.finish() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() + + except error.ReactorNotRunning: + pass + + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + # Get a response with a Location and a Digest header and check + # that the Location header is not rewritten. Then get the same + # response after caching a matching file from a different URL and + # check that this time the header is rewritten. + def connectionMade(ctx): + ctx.transport.write( + 'GET {0}:{1} HTTP/1.1\r\n\r\nGET {0}:{1}/location HTTP/1.1\r\n\r\nGET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - # Get a response with a Location and a Digest header and check - # that the Location header is not rewritten. Then get the same - # response after caching a matching file from a different URL and - # check that this time the header is rewritten. - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\nGET {0}:{1}/location HTTP/1.1\r\n\r\nGET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def handleResponsePart(ctx, data): + try: + h, r = data.split('0\r\n\r\n', 1) - def handleResponsePart(ctx, data): - try: - h, r = data.split('0\r\n\r\n', 1) + except ValueError: + pass - except ValueError: - pass + else: - else: + ctx.firstLine = True + ctx.setLineMode(r) - ctx.firstLine = True - ctx.setLineMode(r) + def handleStatus(ctx, version, status, message): + def handleHeader(k, v): + if k.lower() == 'location': + if v != 'http://example.com': + print 'not', - def handleStatus(ctx, version, status, message): - def handleHeader(k, v): - if k.lower() == 'location': - if v != 'http://example.com': - print 'not', + print 'ok 1 - Before' - print 'ok 1 - Before' + ctx.handleHeader = handleHeader - ctx.handleHeader = handleHeader + def handleStatus(version, status, message): + del ctx.handleHeader - def handleStatus(version, status, message): - del ctx.handleHeader + def handleStatus(version, staus, message): + def handleHeader(k, v): + if k.lower() == 'location': + if v != 'http://{0}:{1}/location'.format(*origin.socket.getsockname()): + print 'not', - def handleStatus(version, staus, message): - def handleHeader(k, v): - if k.lower() == 'location': - if v != 'http://{0}:{1}/location'.format(*origin.socket.getsockname()): - print 'not', + print 'ok 2 - After' - print 'ok 2 - After' + reactor.stop() - reactor.stop() + ctx.handleHeader = handleHeader - ctx.handleHeader = handleHeader + ctx.handleStatus = handleStatus - ctx.handleStatus = handleStatus + ctx.handleStatus = handleStatus - ctx.handleStatus = handleStatus tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/longer b/plugins/experimental/metalink/test/longer index e087e1ccbd9..d56d18c9eba 100755 --- a/plugins/experimental/metalink/test/longer +++ b/plugins/experimental/metalink/test/longer @@ -26,69 +26,75 @@ print '''1..1 longer from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - No Content-Length header' + print 'not ok 1 - No Content-Length header' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setHeader('Content-Length', 1) + ctx.write('longer') - ctx.setHeader('Content-Length', 1) - ctx.write('longer') origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - if v != '1': - print 'not', + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - print 'ok 1 - Content-Length header' + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + if v != '1': + print 'not', - reactor.stop() + print 'ok 1 - Content-Length header' + + reactor.stop() + + # Avoid calling undefined handleResponse() at the end of the + # message. (Override connectionLost() when the proxy crashes or + # we stop the reactor.) + # + # Data that was already received will get processed (the end of + # the headers), then shutdown events will fire (connections will + # get closed), and then finally the reactor will grind to a halt. + def handleResponseEnd(ctx): + pass - # Avoid calling undefined handleResponse() at the end of the - # message. (Override connectionLost() when the proxy crashes or - # we stop the reactor.) - # - # Data that was already received will get processed (the end of - # the headers), then shutdown events will fire (connections will - # get closed), and then finally the reactor will grind to a halt. - def handleResponseEnd(ctx): - pass tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/notCacheable b/plugins/experimental/metalink/test/notCacheable index 33d81847bc4..8d6c529d3f2 100755 --- a/plugins/experimental/metalink/test/notCacheable +++ b/plugins/experimental/metalink/test/notCacheable @@ -22,82 +22,89 @@ print '''1..1 notCacheable from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + if target == '/notCacheable': - if target == '/notCacheable': + ctx.write('notCacheable') + ctx.finish() - ctx.write('notCacheable') - ctx.finish() + else: - else: + ctx.setHeader('Digest', 'SHA-256=BSg5n9c6XBC3jySKsXViB71jhPIoRo3AbCC/gtNlt6k=') + ctx.setHeader('Location', 'http://example.com') + ctx.finish() - ctx.setHeader('Digest', 'SHA-256=BSg5n9c6XBC3jySKsXViB71jhPIoRo3AbCC/gtNlt6k=') - ctx.setHeader('Location', 'http://example.com') - ctx.finish() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - # A cache MUST NOT store a response to any request, unless: The - # request method is understood by the cache and defined as being - # cacheable, - ctx.transport.write('NOTCACHEABLE {0}:{1}/notCacheable HTTP/1.1\r\n\r\nGET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def connectionMade(ctx): - def handleResponsePart(ctx, data): - try: - h, r = data.split('0\r\n\r\n', 1) + # A cache MUST NOT store a response to any request, unless: The + # request method is understood by the cache and defined as being + # cacheable, + ctx.transport.write( + 'NOTCACHEABLE {0}:{1}/notCacheable HTTP/1.1\r\n\r\nGET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - except ValueError: - pass + def handleResponsePart(ctx, data): + try: + h, r = data.split('0\r\n\r\n', 1) - else: + except ValueError: + pass - ctx.firstLine = True - ctx.setLineMode(r) + else: - def handleStatus(ctx, version, status, message): - def handleStatus(version, status, message): - print 'ok 1 - The proxy didn\'t crash (got a response status)' + ctx.firstLine = True + ctx.setLineMode(r) - reactor.stop() + def handleStatus(ctx, version, status, message): + def handleStatus(version, status, message): + print 'ok 1 - The proxy didn\'t crash (got a response status)' + + reactor.stop() + + ctx.handleStatus = handleStatus - ctx.handleStatus = handleStatus tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/notModified b/plugins/experimental/metalink/test/notModified index 2719458423a..4e664aeb184 100755 --- a/plugins/experimental/metalink/test/notModified +++ b/plugins/experimental/metalink/test/notModified @@ -22,56 +22,62 @@ print '''1..2 notModified from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'ok 2 - The proxy didn\'t crash (the client connection didn\'t close yet)' + print 'ok 2 - The proxy didn\'t crash (the client connection didn\'t close yet)' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setResponseCode(304) + ctx.finish() - ctx.setResponseCode(304) - ctx.finish() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() + + except error.ReactorNotRunning: + pass + + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def handleStatus(ctx, version, status, message): + if status != '304': + print 'not', - def handleStatus(ctx, version, status, message): - if status != '304': - print 'not', + print 'ok 1 - 304 Not Modified response status' - print 'ok 1 - 304 Not Modified response status' tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/pipeliningDisconnect b/plugins/experimental/metalink/test/pipeliningDisconnect index 2d97dc1b06f..501576fa74a 100755 --- a/plugins/experimental/metalink/test/pipeliningDisconnect +++ b/plugins/experimental/metalink/test/pipeliningDisconnect @@ -23,70 +23,77 @@ print '''1..1 pipeliningDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'ok 1 - Did the connection close before the proxy made the second request?' + print 'ok 1 - Did the connection close before the proxy made the second request?' + + reactor.stop() - reactor.stop() reactor.callLater(2, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + def requestReceived(ctx, method, target, version): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version - ctx.write('pipeliningDisconnect') - ctx.finish() + ctx.write('pipeliningDisconnect') + ctx.finish() - # Open another connection - class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' + # Open another connection + class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): + print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' - reactor.stop() + reactor.stop() - class protocol(protocol.Protocol): - def connectionMade(ctx): - print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' + class protocol(protocol.Protocol): + def connectionMade(ctx): + print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' - reactor.stop() + reactor.stop() - reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) - ctx.channel.requestFactory = requestFactory + ctx.channel.requestFactory = requestFactory - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version + + ctx.write('pipeliningDisconnect') + ctx.finish() - ctx.write('pipeliningDisconnect') - ctx.finish() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() + reactor.stop() + + class protocol(protocol.Protocol): + def connectionMade(ctx): - class protocol(protocol.Protocol): - def connectionMade(ctx): + # Somehow these magic words frequently cause + # INKVConnInternal::do_io_close() to get called after a message + # is already complete + ctx.transport.write( + 'GET {0}:{1}/pipeliningDisconnect0 HTTP/1.1\r\n\r\nGET {0}:{1}/pipeliningDisconnect1 HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + ctx.transport.loseConnection() - # Somehow these magic words frequently cause - # INKVConnInternal::do_io_close() to get called after a message - # is already complete - ctx.transport.write('GET {0}:{1}/pipeliningDisconnect0 HTTP/1.1\r\n\r\nGET {0}:{1}/pipeliningDisconnect1 HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - ctx.transport.loseConnection() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/shortChunkedEncodingDisconnect b/plugins/experimental/metalink/test/shortChunkedEncodingDisconnect index fa2ae59d5ab..f4d7e74ec7b 100755 --- a/plugins/experimental/metalink/test/shortChunkedEncodingDisconnect +++ b/plugins/experimental/metalink/test/shortChunkedEncodingDisconnect @@ -24,73 +24,79 @@ print '''1..1 shortChunkedEncodingDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - The client was left hanging' + print 'not ok 1 - The client was left hanging' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.write('shortChunkedEncodingDisconnect') - ctx.write('shortChunkedEncodingDisconnect') + # Disconnect before the proxy sends the response headers + ctx.transport.loseConnection() - # Disconnect before the proxy sends the response headers - ctx.transport.loseConnection() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'ok 1 - The client connection closed' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'ok 1 - The client connection closed' - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - print 'not ok 1 - Got a Content-Length header vs. a chunked response' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - # Who cares what happens now? - reactor.stop() + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + print 'not ok 1 - Got a Content-Length header vs. a chunked response' - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent a Content-Length header vs. a chunked - # response). (Override connectionLost() when the proxy closes the - # client connection or we stop the reactor.) - def handleResponseEnd(ctx): - pass + # Who cares what happens now? + reactor.stop() - def handleResponsePart(ctx, data): - if data.endswith('0\r\n\r\n'): - print 'not ok 1 - Got a final chunk' + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent a Content-Length header vs. a chunked + # response). (Override connectionLost() when the proxy closes the + # client connection or we stop the reactor.) + def handleResponseEnd(ctx): + pass + + def handleResponsePart(ctx, data): + if data.endswith('0\r\n\r\n'): + print 'not ok 1 - Got a final chunk' + + # Who cares what happens now? + reactor.stop() - # Who cares what happens now? - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/shortClientDisconnect b/plugins/experimental/metalink/test/shortClientDisconnect index a36169e8faa..03ec5a52645 100755 --- a/plugins/experimental/metalink/test/shortClientDisconnect +++ b/plugins/experimental/metalink/test/shortClientDisconnect @@ -23,62 +23,68 @@ print '''1..1 shortClientDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(3, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): - ctx.client = None - ctx.clientproto = version + ctx.client = None + ctx.clientproto = version - ctx.write('shortClientDisconnect0') + ctx.write('shortClientDisconnect0') - def callback(): - ctx.write('shortClientDisconnect1') + def callback(): + ctx.write('shortClientDisconnect1') - # Open another connection - class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' + # Open another connection + class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): + print 'not ok 1 - Did the proxy crash? (Can\'t open another connection to it.)' - reactor.stop() + reactor.stop() - class protocol(protocol.Protocol): - def connectionMade(ctx): - print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' + class protocol(protocol.Protocol): + def connectionMade(ctx): + print 'ok 1 - The proxy didn\'t crash (opened another connection to it)' - reactor.stop() + reactor.stop() - reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + reactor.callLater(1, tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect) + + reactor.callLater(1, callback) - reactor.callLater(1, callback) origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) + class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): + def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() + print 'Bail out!' + reason.printTraceback() - reactor.stop() + reactor.stop() + + class protocol(protocol.Protocol): + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - class protocol(protocol.Protocol): - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + # Disconnect before the proxy sends the response headers + ctx.transport.loseConnection() - # Disconnect before the proxy sends the response headers - ctx.transport.loseConnection() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/shortContentLengthDisconnect b/plugins/experimental/metalink/test/shortContentLengthDisconnect index 4ff28cc00b0..9c40f398469 100755 --- a/plugins/experimental/metalink/test/shortContentLengthDisconnect +++ b/plugins/experimental/metalink/test/shortContentLengthDisconnect @@ -24,70 +24,76 @@ print '''1..2 shortContentLengthDisconnect from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 2 - The client was left hanging' + print 'not ok 2 - The client was left hanging' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setHeader('Content-Length', 29) + ctx.write('shortContentLengthDisconnect') - ctx.setHeader('Content-Length', 29) - ctx.write('shortContentLengthDisconnect') + # Disconnect before the proxy sends the response headers + ctx.transport.loseConnection() - # Disconnect before the proxy sends the response headers - ctx.transport.loseConnection() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() + + except error.ReactorNotRunning: + pass + + else: + print 'ok 2 - The client connection closed' - else: - print 'ok 2 - The client connection closed' + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + if v != '29': + print 'not', - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - if v != '29': - print 'not', + # Who cares what happens now? + reactor.stop() - # Who cares what happens now? - reactor.stop() + print 'ok 1 - Content-Length header' - print 'ok 1 - Content-Length header' + # Avoid calling undefined handleResponse() at the end of the + # message (if the proxy sent the wrong Content-Length header). + # (Override connectionLost() when the proxy closes the client + # connection or we stop the reactor.) + def handleResponseEnd(ctx): + pass - # Avoid calling undefined handleResponse() at the end of the - # message (if the proxy sent the wrong Content-Length header). - # (Override connectionLost() when the proxy closes the client - # connection or we stop the reactor.) - def handleResponseEnd(ctx): - pass tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/metalink/test/zero b/plugins/experimental/metalink/test/zero index 4be1e5cd610..71fe60d8961 100755 --- a/plugins/experimental/metalink/test/zero +++ b/plugins/experimental/metalink/test/zero @@ -22,69 +22,75 @@ print '''1..1 zero from twisted.internet import error, protocol, reactor, tcp from twisted.web import http + def callback(): - print 'not ok 1 - Why didn\'t the test finish yet?' + print 'not ok 1 - Why didn\'t the test finish yet?' + + reactor.stop() - reactor.stop() reactor.callLater(1, callback) + class factory(http.HTTPFactory): - class protocol(http.HTTPChannel): - class requestFactory(http.Request): - def requestReceived(ctx, method, target, version): + class protocol(http.HTTPChannel): + class requestFactory(http.Request): + def requestReceived(ctx, method, target, version): + + ctx.client = None + ctx.clientproto = version - ctx.client = None - ctx.clientproto = version + ctx.setHeader('Content-Length', 0) + ctx.finish() - ctx.setHeader('Content-Length', 0) - ctx.finish() origin = tcp.Port(0, factory()) origin.startListening() print '# Listening on {0}:{1}'.format(*origin.socket.getsockname()) -class factory(protocol.ClientFactory): - def clientConnectionFailed(ctx, connector, reason): - print 'Bail out!' - reason.printTraceback() +class factory(protocol.ClientFactory): + def clientConnectionFailed(ctx, connector, reason): - reactor.stop() + print 'Bail out!' + reason.printTraceback() - class protocol(http.HTTPClient): - def connectionLost(ctx, reason): - try: reactor.stop() - except error.ReactorNotRunning: - pass + class protocol(http.HTTPClient): + def connectionLost(ctx, reason): + try: + reactor.stop() - else: - print 'not ok 1 - Did the proxy crash? (The client connection closed.)' + except error.ReactorNotRunning: + pass - def connectionMade(ctx): - ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) + else: + print 'not ok 1 - Did the proxy crash? (The client connection closed.)' - def handleEndHeaders(ctx): - try: - reactor.stop() + def connectionMade(ctx): + ctx.transport.write('GET {0}:{1} HTTP/1.1\r\n\r\n'.format(*origin.socket.getsockname())) - except error.ReactorNotRunning: - pass + def handleEndHeaders(ctx): + try: + reactor.stop() - else: - print 'not ok 1 - No Content-Length header' + except error.ReactorNotRunning: + pass - def handleHeader(ctx, k, v): - if k.lower() == 'content-length': - if v != '0': - print 'not', + else: + print 'not ok 1 - No Content-Length header' - print 'ok 1 - Content-Length header' + def handleHeader(ctx, k, v): + if k.lower() == 'content-length': + if v != '0': + print 'not', + + print 'ok 1 - Content-Length header' + + reactor.stop() - reactor.stop() tcp.Connector('localhost', 8080, factory(), 30, None, reactor).connect() diff --git a/plugins/experimental/money_trace/money_trace.cc b/plugins/experimental/money_trace/money_trace.cc index da8acc1cab2..bbdf436d012 100644 --- a/plugins/experimental/money_trace/money_trace.cc +++ b/plugins/experimental/money_trace/money_trace.cc @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include #include "ts/ts.h" #include "ts/remap.h" diff --git a/plugins/experimental/multiplexer/ats-multiplexer.cc b/plugins/experimental/multiplexer/ats-multiplexer.cc index 386c148186b..e89f23dfe10 100644 --- a/plugins/experimental/multiplexer/ats-multiplexer.cc +++ b/plugins/experimental/multiplexer/ats-multiplexer.cc @@ -24,7 +24,7 @@ #include #include -#include +#include #include "dispatch.h" #include "fetcher.h" diff --git a/plugins/experimental/multiplexer/chunk-decoder.cc b/plugins/experimental/multiplexer/chunk-decoder.cc index 890695787f1..ce471560770 100644 --- a/plugins/experimental/multiplexer/chunk-decoder.cc +++ b/plugins/experimental/multiplexer/chunk-decoder.cc @@ -21,7 +21,7 @@ limitations under the License. */ #include -#include +#include #include "chunk-decoder.h" @@ -91,7 +91,7 @@ ChunkDecoder::parseSize(const char *p, const int64_t s) } bool -ChunkDecoder::isSizeState(void) const +ChunkDecoder::isSizeState() const { return state_ == State::kDataN || state_ == State::kEndN || state_ == State::kSize || state_ == State::kSizeN || state_ == State::kSizeR; diff --git a/plugins/experimental/multiplexer/dispatch.cc b/plugins/experimental/multiplexer/dispatch.cc index ca031bdda5e..bdbd606d209 100644 --- a/plugins/experimental/multiplexer/dispatch.cc +++ b/plugins/experimental/multiplexer/dispatch.cc @@ -20,7 +20,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include "dispatch.h" @@ -150,14 +150,14 @@ class Handler } void - error(void) + error() { TSError("[" PLUGIN_TAG "] error when communicating with \"%s\"\n", url.c_str()); TSStatIntIncrement(statistics.failures, 1); } void - timeout(void) + timeout() { TSError("[" PLUGIN_TAG "] timeout when communicating with \"%s\"\n", url.c_str()); TSStatIntIncrement(statistics.timeouts, 1); @@ -189,7 +189,7 @@ class Handler } void - done(void) + done() { struct timeval end; diff --git a/plugins/experimental/multiplexer/fetcher.cc b/plugins/experimental/multiplexer/fetcher.cc index 7f51887635c..54cfb2dfae0 100644 --- a/plugins/experimental/multiplexer/fetcher.cc +++ b/plugins/experimental/multiplexer/fetcher.cc @@ -25,7 +25,7 @@ namespace ats { void -HttpParser::destroyParser(void) +HttpParser::destroyParser() { if (parser_ != nullptr) { TSHttpParserClear(parser_); diff --git a/plugins/experimental/multiplexer/original-request.cc b/plugins/experimental/multiplexer/original-request.cc index 75077400ca9..1bedea0ba4c 100644 --- a/plugins/experimental/multiplexer/original-request.cc +++ b/plugins/experimental/multiplexer/original-request.cc @@ -77,7 +77,7 @@ OriginalRequest::OriginalRequest(const TSMBuffer b, const TSMLoc l) : buffer_(b) } } -OriginalRequest::~OriginalRequest(void) +OriginalRequest::~OriginalRequest() { urlScheme(original.urlScheme); urlHost(original.urlHost); diff --git a/plugins/experimental/multiplexer/post.cc b/plugins/experimental/multiplexer/post.cc index 89f8910db60..c3419b0556f 100644 --- a/plugins/experimental/multiplexer/post.cc +++ b/plugins/experimental/multiplexer/post.cc @@ -20,7 +20,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include "post.h" diff --git a/plugins/experimental/mysql_remap/mysql_remap.cc b/plugins/experimental/mysql_remap/mysql_remap.cc index f0818b1f22b..f7f6527c7b8 100644 --- a/plugins/experimental/mysql_remap/mysql_remap.cc +++ b/plugins/experimental/mysql_remap/mysql_remap.cc @@ -18,7 +18,7 @@ #include #include -#include +#include #include #include "mysql/mysql.h" @@ -118,7 +118,7 @@ do_mysql_remap(TSCont contp, TSHttpTxn txnp) TSUrlHostSet(reqp, url_loc, row[1], -1); TSUrlSchemeSet(reqp, url_loc, row[0], -1); TSUrlPortSet(reqp, url_loc, atoi(row[2])); - } while (0); + } while (false); ret_val = true; @@ -208,8 +208,8 @@ TSPluginInit(int argc, const char *argv[]) host = iniparser_getstring(ini, "mysql_remap:mysql_host", (char *)"localhost"); port = iniparser_getint(ini, "mysql_remap:mysql_port", 3306); - username = iniparser_getstring(ini, "mysql_remap:mysql_username", NULL); - password = iniparser_getstring(ini, "mysql_remap:mysql_password", NULL); + username = iniparser_getstring(ini, "mysql_remap:mysql_username", nullptr); + password = iniparser_getstring(ini, "mysql_remap:mysql_password", nullptr); db = iniparser_getstring(ini, "mysql_remap:mysql_database", (char *)"mysql_remap"); if (mysql_library_init(0, NULL, NULL)) { diff --git a/plugins/experimental/ssl_cert_loader/domain-tree.cc b/plugins/experimental/ssl_cert_loader/domain-tree.cc index 0513d56722e..47324b3ac9b 100644 --- a/plugins/experimental/ssl_cert_loader/domain-tree.cc +++ b/plugins/experimental/ssl_cert_loader/domain-tree.cc @@ -20,9 +20,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include -#include +#include #include "domain-tree.h" // return true if comparable. Return type of compare in relative parameter @@ -30,7 +30,7 @@ bool DomainNameTree::DomainNameNode::compare(std::string key, int &relative) { - size_t star_loc = key.find("*"); + size_t star_loc = key.find('*'); bool is_wild = false; if (star_loc != std::string::npos) { @@ -43,7 +43,7 @@ DomainNameTree::DomainNameNode::compare(std::string key, int &relative) } bool -DomainNameTree::DomainNameNode::prunedCompare(std::string key, int &relative, bool is_wild) +DomainNameTree::DomainNameNode::prunedCompare(const std::string &key, int &relative, bool is_wild) { if (key == this->key) { relative = 0; @@ -89,7 +89,7 @@ DomainNameTree::find(std::string key, bool best_match) { DomainNameNode *retval = nullptr; DomainNameNode *first = nullptr; - size_t star_loc = key.find("*"); + size_t star_loc = key.find('*'); bool is_wild = false; if (star_loc != std::string::npos) { @@ -152,7 +152,7 @@ DomainNameTree::insert(std::string key, void *payload, int order) int relative; if (node->compare(key, relative)) { - size_t star_loc = key.find("*"); + size_t star_loc = key.find('*'); bool is_wild = false; if (star_loc != std::string::npos) { diff --git a/plugins/experimental/ssl_cert_loader/domain-tree.h b/plugins/experimental/ssl_cert_loader/domain-tree.h index a5697bd3d05..c957a7d7f6e 100644 --- a/plugins/experimental/ssl_cert_loader/domain-tree.h +++ b/plugins/experimental/ssl_cert_loader/domain-tree.h @@ -52,7 +52,7 @@ class DomainNameTree // 0 if eq. < 0 if node key is broader. > 0 if parameter key is broader bool compare(std::string key, int &relative); // The wildcard is pruned out of the key - bool prunedCompare(std::string key, int &relative, bool is_wild); + bool prunedCompare(const std::string &key, int &relative, bool is_wild); std::string key; // The string trailing the * (if any) int order; // Track insert order for conflict resolution void *payload; diff --git a/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc b/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc index 5f0455cfcdf..4e339c780c4 100644 --- a/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc +++ b/plugins/experimental/ssl_cert_loader/ssl-cert-loader.cc @@ -22,9 +22,9 @@ limitations under the License. */ -#include +#include #include -#include +#include #include #include #include @@ -78,7 +78,7 @@ class SslEntry std::string ConfigPath; typedef std::pair IpRange; -typedef std::deque IpRangeQueue; +using IpRangeQueue = std::deque; Configuration Config; // global configuration @@ -88,7 +88,7 @@ Parse_Addr_String(ts::ConstBuffer const &text, IpRange &range) IpAddr newAddr; std::string textstr(text._ptr, text._size); // Is there a hyphen? - size_t hyphen_pos = textstr.find("-"); + size_t hyphen_pos = textstr.find('-'); if (hyphen_pos != std::string::npos) { std::string addr1 = textstr.substr(0, hyphen_pos); @@ -301,19 +301,19 @@ Parse_Config(Value &parent, ParsedSslValues &orig_values) Lookup.tree.insert(cur_values.server_name, entry, Parse_order++); } if (cur_values.server_ips.size() > 0) { - for (size_t i = 0; i < cur_values.server_ips.size(); i++) { + for (auto &server_ip : cur_values.server_ips) { IpEndpoint first, second; - first.assign(cur_values.server_ips[i].first); - second.assign(cur_values.server_ips[i].second); + first.assign(server_ip.first); + second.assign(server_ip.second); Lookup.ipmap.fill(&first, &second, entry); char val1[256], val2[256]; - cur_values.server_ips[i].first.toString(val1, sizeof(val1)); - cur_values.server_ips[i].second.toString(val2, sizeof(val2)); + server_ip.first.toString(val1, sizeof(val1)); + server_ip.second.toString(val2, sizeof(val2)); } } if (entry != nullptr) { - for (size_t i = 0; i < cert_names.size(); i++) { - Lookup.tree.insert(cert_names[i], entry, Parse_order++); + for (const auto &cert_name : cert_names) { + Lookup.tree.insert(cert_name, entry, Parse_order++); } } } @@ -348,8 +348,8 @@ Load_Certificate_Thread(void *arg) TSVConnReenable(vc); } TSMutexUnlock(entry->mutex); - for (size_t i = 0; i < cert_names.size(); i++) { - Lookup.tree.insert(cert_names[i], entry, Parse_order++); + for (const auto &cert_name : cert_names) { + Lookup.tree.insert(cert_name, entry, Parse_order++); } } else { TSMutexUnlock(entry->mutex); diff --git a/plugins/experimental/sslheaders/expand.cc b/plugins/experimental/sslheaders/expand.cc index 870ef754ce9..142a98e852e 100644 --- a/plugins/experimental/sslheaders/expand.cc +++ b/plugins/experimental/sslheaders/expand.cc @@ -22,7 +22,7 @@ #include #include -typedef void (*x509_expansion)(X509 *, BIO *); +using x509_expansion = void (*)(X509 *, BIO *); static void x509_expand_none(X509 *, BIO *) @@ -70,9 +70,17 @@ x509_expand_serial(X509 *x509, BIO *bio) static void x509_expand_signature(X509 *x509, BIO *bio) { - ASN1_BIT_STRING *sig = x509->signature; - const char *ptr = (const char *)sig->data; - const char *end = ptr + sig->length; +#ifndef HAVE_X509_GET0_SIGNATURE + const ASN1_BIT_STRING *sig = x509->signature; +#else +#if OPENSSL_VERSION_NUMBER < 0x10100000L +#define X509_get0_signature(psig, palg, x) (X509_get0_signature(const_cast(psig), (palg), (x))) +#endif + const ASN1_BIT_STRING *sig; + X509_get0_signature(&sig, nullptr, x509); +#endif + const char *ptr = (const char *)sig->data; + const char *end = ptr + sig->length; // The canonical OpenSSL way to format the signature seems to be // X509_signature_dump(). However that separates each byte with a ':', which is diff --git a/plugins/experimental/sslheaders/sslheaders.cc b/plugins/experimental/sslheaders/sslheaders.cc index acbf4c542ab..850f97d7abf 100644 --- a/plugins/experimental/sslheaders/sslheaders.cc +++ b/plugins/experimental/sslheaders/sslheaders.cc @@ -123,15 +123,15 @@ static void SslHdrExpand(SSL *ssl, const SslHdrInstance::expansion_list &expansions, TSMBuffer mbuf, TSMLoc mhdr) { if (ssl == nullptr) { - for (SslHdrInstance::expansion_list::const_iterator e = expansions.begin(); e != expansions.end(); ++e) { - SslHdrRemoveHeader(mbuf, mhdr, e->name); + for (const auto &expansion : expansions) { + SslHdrRemoveHeader(mbuf, mhdr, expansion.name); } } else { X509 *x509; BIO *exp = BIO_new(BIO_s_mem()); - for (SslHdrInstance::expansion_list::const_iterator e = expansions.begin(); e != expansions.end(); ++e) { - switch (e->scope) { + for (const auto &expansion : expansions) { + switch (expansion.scope) { case SSL_HEADERS_SCOPE_CLIENT: x509 = SSL_get_peer_certificate(ssl); break; @@ -146,15 +146,15 @@ SslHdrExpand(SSL *ssl, const SslHdrInstance::expansion_list &expansions, TSMBuff continue; } - SslHdrExpandX509Field(exp, x509, e->field); + SslHdrExpandX509Field(exp, x509, expansion.field); if (BIO_pending(exp)) { - SslHdrSetHeader(mbuf, mhdr, e->name, exp); + SslHdrSetHeader(mbuf, mhdr, expansion.name, exp); } else { - SslHdrRemoveHeader(mbuf, mhdr, e->name); + SslHdrRemoveHeader(mbuf, mhdr, expansion.name); } // Getting the peer certificate takes a reference count, but the server certificate doesn't. - if (x509 && e->scope == SSL_HEADERS_SCOPE_CLIENT) { + if (x509 && expansion.scope == SSL_HEADERS_SCOPE_CLIENT) { X509_free(x509); } } diff --git a/plugins/experimental/sslheaders/test_sslheaders.cc b/plugins/experimental/sslheaders/test_sslheaders.cc index 25ee1eacade..fa925db1e2d 100644 --- a/plugins/experimental/sslheaders/test_sslheaders.cc +++ b/plugins/experimental/sslheaders/test_sslheaders.cc @@ -18,8 +18,8 @@ #include "sslheaders.h" #include -#include -#include +#include +#include #include #include #include diff --git a/plugins/experimental/stream_editor/stream_editor.cc b/plugins/experimental/stream_editor/stream_editor.cc index b1fcb1d2220..d19e133ba1a 100644 --- a/plugins/experimental/stream_editor/stream_editor.cc +++ b/plugins/experimental/stream_editor/stream_editor.cc @@ -88,21 +88,21 @@ #define MAX_RX_MATCH 10 #define WHITESPACE " \t\r\n" -#include +#include #include #include #include -#include -#include -#include +#include +#include +#include #include -#include +#include #include "ts/ts.h" struct edit_t; -typedef std::set editset_t; -typedef editset_t::const_iterator edit_p; +using editset_t = std::set; +using edit_p = editset_t::const_iterator; struct edit_t { const size_t start; @@ -207,9 +207,9 @@ class rxscope : public scope_t { private: regex_t rx; - virtual bool + bool - match(const char *str) const + match(const char *str) const override { return (regexec(&rx, str, 0, nullptr, 0) == 0) ? true : false; } @@ -228,7 +228,7 @@ class rxscope : public scope_t TSfree(str); } - virtual ~rxscope() { regfree(&rx); } + ~rxscope() override { regfree(&rx); } }; class strscope : public scope_t @@ -236,16 +236,16 @@ class strscope : public scope_t private: const bool icase; char *str; - virtual bool + bool - match(const char *p) const + match(const char *p) const override { return ((icase ? strncasecmp : strncmp)(str, p, strlen(str)) == 0) ? true : false; } public: strscope(const bool u, const bool i, const char *pattern, int len) : scope_t(u), icase(i) { str = TSstrndup(pattern, len); } - virtual ~strscope() + ~strscope() override { if (str) { TSfree(str); @@ -268,8 +268,8 @@ class strmatch : public match_t const size_t slen; public: - virtual bool - find(const char *buf, size_t len, size_t &found, size_t &found_len, const char *to, std::string &repl) const + bool + find(const char *buf, size_t len, size_t &found, size_t &found_len, const char *to, std::string &repl) const override { const char *match = icase ? strcasestr(buf, str) : strstr(buf, str); if (match) { @@ -283,15 +283,15 @@ class strmatch : public match_t } strmatch(const bool i, const char *pattern, int len) : icase(i), slen(len) { str = TSstrndup(pattern, len); } - virtual ~strmatch() + ~strmatch() override { if (str) { TSfree(str); } } - virtual size_t - cont_size() const + size_t + cont_size() const override { return slen; } @@ -303,8 +303,8 @@ class rxmatch : public match_t regex_t rx; public: - virtual bool - find(const char *buf, size_t len, size_t &found, size_t &found_len, const char *tmpl, std::string &repl) const + bool + find(const char *buf, size_t len, size_t &found, size_t &found_len, const char *tmpl, std::string &repl) const override { regmatch_t pmatch[MAX_RX_MATCH]; if (regexec(&rx, buf, MAX_RX_MATCH, pmatch, REG_NOTEOL) == 0) { @@ -343,8 +343,8 @@ class rxmatch : public match_t } } - virtual size_t - cont_size() const + size_t + cont_size() const override { return match_len; } @@ -362,7 +362,7 @@ class rxmatch : public match_t TSfree(str); } - virtual ~rxmatch() { regfree(&rx); } + ~rxmatch() override { regfree(&rx); } }; #define PARSE_VERIFY(line, x, str) \ @@ -547,8 +547,8 @@ class rule_t } } }; -typedef std::vector ruleset_t; -typedef ruleset_t::const_iterator rule_p; +using ruleset_t = std::vector; +using rule_p = ruleset_t::const_iterator; typedef struct contdata_t { TSCont cont; diff --git a/plugins/experimental/ts_lua/ts_lua_http_config.c b/plugins/experimental/ts_lua/ts_lua_http_config.c index de0ca0efa15..b3560abcba8 100644 --- a/plugins/experimental/ts_lua/ts_lua_http_config.c +++ b/plugins/experimental/ts_lua/ts_lua_http_config.c @@ -22,7 +22,7 @@ typedef enum { TS_LUA_CONFIG_URL_REMAP_PRISTINE_HOST_HDR = TS_CONFIG_URL_REMAP_PRISTINE_HOST_HDR, TS_LUA_CONFIG_HTTP_CHUNKING_ENABLED = TS_CONFIG_HTTP_CHUNKING_ENABLED, TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_ENABLED = TS_CONFIG_HTTP_NEGATIVE_CACHING_ENABLED, - TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME = TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE, + TS_LUA_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME = TS_CONFIG_HTTP_NEGATIVE_CACHING_LIFETIME, TS_LUA_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE = TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE, TS_LUA_CONFIG_HTTP_KEEP_ALIVE_ENABLED_IN = TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_IN, TS_LUA_CONFIG_HTTP_KEEP_ALIVE_ENABLED_OUT = TS_CONFIG_HTTP_KEEP_ALIVE_ENABLED_OUT, @@ -112,17 +112,23 @@ typedef enum { TS_LUA_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES = TS_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES, TS_LUA_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY = TS_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY, TS_LUA_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT = TS_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT, + TS_LUA_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE = TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE, TS_LUA_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE = TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE, TS_LUA_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT = TS_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT, TS_LUA_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT = TS_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT, TS_LUA_CONFIG_HTTP_UNCACHEABLE_REQUESTS_BYPASS_PARENT = TS_CONFIG_HTTP_UNCACHEABLE_REQUESTS_BYPASS_PARENT, TS_LUA_CONFIG_HTTP_PARENT_PROXY_TOTAL_CONNECT_ATTEMPTS = TS_CONFIG_HTTP_PARENT_PROXY_TOTAL_CONNECT_ATTEMPTS, TS_LUA_CONFIG_HTTP_TRANSACTION_ACTIVE_TIMEOUT_IN = TS_CONFIG_HTTP_TRANSACTION_ACTIVE_TIMEOUT_IN, - TS_LUA_CONFIG_LAST_ENTRY = TS_CONFIG_LAST_ENTRY, TS_LUA_CONFIG_SRV_ENABLED = TS_CONFIG_SRV_ENABLED, TS_LUA_CONFIG_HTTP_FORWARD_CONNECT_METHOD = TS_CONFIG_HTTP_FORWARD_CONNECT_METHOD, TS_LUA_CONFIG_SSL_CERT_FILENAME = TS_CONFIG_SSL_CERT_FILENAME, TS_LUA_CONFIG_SSL_CERT_FILEPATH = TS_CONFIG_SSL_CERT_FILEPATH, + TS_LUA_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB = TS_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB, + TS_LUA_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD = TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD, + TS_LUA_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME = TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME, + TS_LUA_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS = TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS, + TS_LUA_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT = TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT, + TS_LUA_CONFIG_LAST_ENTRY = TS_CONFIG_LAST_ENTRY, } TSLuaOverridableConfigKey; typedef enum { @@ -226,6 +232,7 @@ ts_lua_var_item ts_lua_http_config_vars[] = { TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_CACHE_MAX_OPEN_WRITE_RETRIES), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_REDIRECT_USE_ORIG_CACHE_KEY), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_ATTACH_SERVER_SESSION_TO_CLIENT), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_WEBSOCKET_NO_ACTIVITY_TIMEOUT), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_WEBSOCKET_ACTIVE_TIMEOUT), @@ -236,6 +243,11 @@ ts_lua_var_item ts_lua_http_config_vars[] = { TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_FORWARD_CONNECT_METHOD), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_SSL_CERT_FILENAME), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_SSL_CERT_FILEPATH), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS), + TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT), TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_LAST_ENTRY), }; diff --git a/plugins/experimental/webp_transform/ImageTransform.cc b/plugins/experimental/webp_transform/ImageTransform.cc index 7ee68d28713..82e3cb8c68e 100644 --- a/plugins/experimental/webp_transform/ImageTransform.cc +++ b/plugins/experimental/webp_transform/ImageTransform.cc @@ -43,7 +43,7 @@ class ImageTransform : public TransformationPlugin } void - handleReadResponseHeaders(Transaction &transaction) + handleReadResponseHeaders(Transaction &transaction) override { transaction.getServerResponse().getHeaders()["Content-Type"] = "image/webp"; transaction.getServerResponse().getHeaders()["Vary"] = "Content-Type"; // to have a separate cache entry. @@ -53,13 +53,13 @@ class ImageTransform : public TransformationPlugin } void - consume(const string &data) + consume(const string &data) override { _img.write(data.data(), data.size()); } void - handleInputComplete() + handleInputComplete() override { string input_data = _img.str(); Blob input_blob(input_data.data(), input_data.length()); @@ -75,7 +75,7 @@ class ImageTransform : public TransformationPlugin setOutputComplete(); } - virtual ~ImageTransform() {} + ~ImageTransform() override {} private: std::stringstream _img; }; @@ -84,8 +84,8 @@ class GlobalHookPlugin : public GlobalPlugin { public: GlobalHookPlugin() { registerHook(HOOK_READ_RESPONSE_HEADERS); } - virtual void - handleReadResponseHeaders(Transaction &transaction) + void + handleReadResponseHeaders(Transaction &transaction) override { string ctype = transaction.getServerResponse().getHeaders().values("Content-Type"); string user_agent = transaction.getServerRequest().getHeaders().values("User-Agent"); diff --git a/plugins/generator/generator.cc b/plugins/generator/generator.cc index d72149ecebe..276313a75a2 100644 --- a/plugins/generator/generator.cc +++ b/plugins/generator/generator.cc @@ -23,13 +23,13 @@ #include #include -#include -#include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include #include // Generator plugin @@ -626,7 +626,7 @@ GeneratorTxnHook(TSCont contp, TSEvent event, void *edata) } static void -GeneratorInitialize(void) +GeneratorInitialize() { TxnHook = TSContCreate(GeneratorTxnHook, nullptr); memset(GeneratorData, 'x', sizeof(GeneratorData)); diff --git a/plugins/gzip/Makefile.inc b/plugins/gzip/Makefile.inc index 3cd845c5eb4..8762e21f1d5 100644 --- a/plugins/gzip/Makefile.inc +++ b/plugins/gzip/Makefile.inc @@ -16,3 +16,7 @@ pkglib_LTLIBRARIES += gzip/gzip.la gzip_gzip_la_SOURCES = gzip/gzip.cc gzip/configuration.cc gzip/misc.cc + +gzip_gzip_la_LDFLAGS = \ + $(AM_LDFLAGS) $(LIB_BROTLIENC) + diff --git a/plugins/gzip/README b/plugins/gzip/README index 48723691d31..b687cad1c8a 100644 --- a/plugins/gzip/README +++ b/plugins/gzip/README @@ -9,7 +9,7 @@ make && sudo make install if no makefile is present, you can compile it using tsxs -o gzip.so *.cc -and then install it using +and then install it using tsxs -i -o gzip.so after installation, add a line to plugin.config: @@ -43,7 +43,12 @@ a sample configuration (sample.gzip.config): # # compressible-content-type: wildcard pattern for matching compressible content types # -# disallow: wildcard pattern for disablign compression on urls +# disallow: wildcard pattern for disabling compression on urls +# +# allow: wildcard pattern for enabling compression on urls +# +# supported-algorithms: compression algorithms supported. comma separated algorithms. Default is gzip +# ###################################################################### #first, we configure the default/global plugin behaviour @@ -60,7 +65,14 @@ disallow /notthis/*.js disallow /notthat* disallow */bla* -#override the global configuration for a host. +allow */here/* +#disabling is possible too. trying to deprecate disallow +allow !*/nothere/* + +#supported algorithms +supported-algorithms br,gzip + +#override the global configuration for a host. #www.foo.nl does NOT inherit anything [www.foo.nl] enabled true @@ -70,3 +82,6 @@ compressible-content-type text/* cache false disallow /notthis/*.js disallow /notthat* + +allow /this/*.js +allow !/notthat/*.css diff --git a/plugins/gzip/configuration.cc b/plugins/gzip/configuration.cc index 80d8cf4386e..c27352e5ca0 100644 --- a/plugins/gzip/configuration.cc +++ b/plugins/gzip/configuration.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include namespace Gzip @@ -68,14 +69,14 @@ tokenize(const string &s, int (*fp)(int)) vector r; string tmp; - for (size_t i = 0; i < s.size(); i++) { - if (fp(s[i])) { + for (char i : s) { + if (fp(i)) { if (tmp.size()) { r.push_back(tmp); tmp = ""; } } else { - tmp += s[i]; + tmp += i; } } @@ -93,7 +94,9 @@ enum ParserState { kParseEnable, kParseCache, kParseDisallow, - kParseFlush + kParseFlush, + kParseAlgorithms, + kParseAllow }; void @@ -109,6 +112,12 @@ HostConfiguration::add_disallow(const std::string &disallow) disallows_.push_back(disallow); } +void +HostConfiguration::add_allow(const std::string &allow) +{ + allows_.push_back(allow); +} + void HostConfiguration::add_compressible_content_type(const std::string &content_type) { @@ -139,8 +148,8 @@ Configuration::find(const char *host, int host_length) void Configuration::release_all() { - for (HostContainer::iterator it = host_configurations_.begin(); it != host_configurations_.end(); ++it) { - (*it)->release(); + for (auto &host_configuration : host_configurations_) { + host_configuration->release(); } } @@ -148,14 +157,31 @@ bool HostConfiguration::is_url_allowed(const char *url, int url_len) { string surl(url, url_len); - - for (StringContainer::iterator it = disallows_.begin(); it != disallows_.end(); ++it) { - if (fnmatch(it->c_str(), surl.c_str(), 0) == 0) { - info("url [%s] disabled for compression, matched on pattern [%s]", surl.c_str(), it->c_str()); - return false; + if (has_disallows()) { + for (StringContainer::iterator it = disallows_.begin(); it != disallows_.end(); ++it) { + if (fnmatch(it->c_str(), surl.c_str(), 0) == 0) { + info("url [%s] disabled for compression, matched disallow pattern [%s]", surl.c_str(), it->c_str()); + return false; + } } } - + if (has_allows()) { + for (StringContainer::iterator allow_it = allows_.begin(); allow_it != allows_.end(); ++allow_it) { + const char *match_string = allow_it->c_str(); + bool exclude = match_string[0] == '!'; + if (exclude) { + ++match_string; // skip ! + } + if (fnmatch(match_string, surl.c_str(), 0) == 0) { + info("url [%s] %s for compression, matched allow pattern [%s]", surl.c_str(), exclude ? "disabled" : "enabled", + allow_it->c_str()); + return !exclude; + } + } + info("url [%s] disabled for compression, did not match any allows pattern", surl.c_str()); + return false; + } + info("url [%s] enabled for compression, did not match and disallow pattern ", surl.c_str()); return true; } @@ -181,6 +207,34 @@ HostConfiguration::is_content_type_compressible(const char *content_type, int co return is_match; } +void +HostConfiguration::add_compression_algorithms(const string &algorithms) +{ + istringstream compress_algo(algorithms); + string token; + compression_algorithms_ = ALGORITHM_DEFAULT; // remove the default gzip. + while (getline(compress_algo, token, ',')) { + if (token.find("br") != string::npos) { +#ifdef HAVE_BROTLI_ENCODE_H + compression_algorithms_ |= ALGORITHM_BROTLI; +#else + error("supported-algorithms: brotli support not compiled in."); +#endif + } else if (token.find("gzip") != string::npos) + compression_algorithms_ |= ALGORITHM_GZIP; + else if (token.find("deflate") != string::npos) + compression_algorithms_ |= ALGORITHM_DEFLATE; + else + error("Unknown compression type. Supported compression-algorithms ."); + } +} + +int +HostConfiguration::compression_algorithms() +{ + return compression_algorithms_; +} + Configuration * Configuration::Parse(const char *path) { @@ -231,8 +285,7 @@ Configuration::Parse(const char *path) vector v = tokenize(line, isspace); - for (size_t i = 0; i < v.size(); i++) { - string token = v[i]; + for (auto token : v) { trim_if(token, isspace); // should not happen @@ -263,6 +316,10 @@ Configuration::Parse(const char *path) state = kParseDisallow; } else if (token == "flush") { state = kParseFlush; + } else if (token == "supported-algorithms") { + state = kParseAlgorithms; + } else if (token == "allow") { + state = kParseAllow; } else { warning("failed to interpret \"%s\" at line %zu", token.c_str(), lineno); } @@ -291,6 +348,14 @@ Configuration::Parse(const char *path) current_host_configuration->set_flush(token == "true"); state = kParseStart; break; + case kParseAlgorithms: + current_host_configuration->add_compression_algorithms(token); + state = kParseStart; + break; + case kParseAllow: + current_host_configuration->add_allow(token); + state = kParseStart; + break; } } } diff --git a/plugins/gzip/configuration.h b/plugins/gzip/configuration.h index 04ff023b2b8..ba34cde76e8 100644 --- a/plugins/gzip/configuration.h +++ b/plugins/gzip/configuration.h @@ -33,11 +33,24 @@ namespace Gzip { typedef std::vector StringContainer; +enum CompressionAlgorithm { + ALGORITHM_DEFAULT = 0, + ALGORITHM_DEFLATE = 1, + ALGORITHM_GZIP = 2, + ALGORITHM_BROTLI = 4 // For bit manipulations +}; + class HostConfiguration { public: explicit HostConfiguration(const std::string &host) - : host_(host), enabled_(true), cache_(true), remove_accept_encoding_(false), flush_(false), ref_count_(0) + : host_(host), + enabled_(true), + cache_(true), + remove_accept_encoding_(false), + flush_(false), + compression_algorithms_(ALGORITHM_GZIP), + ref_count_(0) { } @@ -92,10 +105,19 @@ class HostConfiguration return !disallows_.empty(); } + bool + has_allows() const + { + return !allows_.empty(); + } + void add_disallow(const std::string &disallow); + void add_allow(const std::string &allow); void add_compressible_content_type(const std::string &content_type); bool is_url_allowed(const char *url, int url_len); bool is_content_type_compressible(const char *content_type, int content_type_length); + void add_compression_algorithms(const std::string &algorithms); + int compression_algorithms(); // Ref-counting these host configuration objects void @@ -118,10 +140,12 @@ class HostConfiguration bool cache_; bool remove_accept_encoding_; bool flush_; + int compression_algorithms_; volatile int ref_count_; StringContainer compressible_content_types_; StringContainer disallows_; + StringContainer allows_; DISALLOW_COPY_AND_ASSIGN(HostConfiguration); }; diff --git a/plugins/gzip/gzip.cc b/plugins/gzip/gzip.cc index 51110b12890..51718f858ff 100644 --- a/plugins/gzip/gzip.cc +++ b/plugins/gzip/gzip.cc @@ -21,9 +21,13 @@ limitations under the License. */ -#include +#include #include +#if HAVE_BROTLI_ENCODE_H +#include +#endif + #include "ts/ts.h" #include "ts/ink_defs.h" @@ -36,7 +40,6 @@ using namespace std; using namespace Gzip; // FIXME: custom dictionaries would be nice. configurable/content-type? -// FIXME: look into autoscaling the compression level based on connection speed // a gprs device might benefit from a higher compression ratio, whereas a desktop w. high bandwith // might be served better with little or no compression at all // FIXME: look into compressing from the task thread pool @@ -50,37 +53,48 @@ using namespace Gzip; const int ZLIB_COMPRESSION_LEVEL = 6; const char *global_hidden_header_name; -const char *dictionary = nullptr; +const char *dictionary = nullptr; +const char *TS_HTTP_VALUE_BROTLI = "br"; +const int TS_HTTP_LEN_BROTLI = 2; + +// brotli compression quality 1-11. Testing proved level '6' +#if HAVE_BROTLI_ENCODE_H +const int BROTLI_COMPRESSION_LEVEL = 6; +const int BROTLI_LGW = 16; +#endif // Current global configuration, and the previous one (for cleanup) Configuration *cur_config = nullptr; Configuration *prev_config = nullptr; -static GzipData * -gzip_data_alloc(int compression_type) +static Data * +data_alloc(int compression_type, int compression_algorithms) { - GzipData *data; + Data *data; int err; - data = (GzipData *)TSmalloc(sizeof(GzipData)); - data->downstream_vio = nullptr; - data->downstream_buffer = nullptr; - data->downstream_reader = nullptr; - data->downstream_length = 0; - data->state = transform_state_initialized; - data->compression_type = compression_type; - data->zstrm.next_in = Z_NULL; - data->zstrm.avail_in = 0; - data->zstrm.total_in = 0; - data->zstrm.next_out = Z_NULL; - data->zstrm.avail_out = 0; - data->zstrm.total_out = 0; - data->zstrm.zalloc = gzip_alloc; - data->zstrm.zfree = gzip_free; - data->zstrm.opaque = (voidpf) nullptr; - data->zstrm.data_type = Z_ASCII; - - int window_bits = (compression_type == COMPRESSION_TYPE_GZIP) ? WINDOW_BITS_GZIP : WINDOW_BITS_DEFLATE; + data = (Data *)TSmalloc(sizeof(Data)); + data->downstream_vio = nullptr; + data->downstream_buffer = nullptr; + data->downstream_reader = nullptr; + data->downstream_length = 0; + data->state = transform_state_initialized; + data->compression_type = compression_type; + data->compression_algorithms = compression_algorithms; + data->zstrm.next_in = Z_NULL; + data->zstrm.avail_in = 0; + data->zstrm.total_in = 0; + data->zstrm.next_out = Z_NULL; + data->zstrm.avail_out = 0; + data->zstrm.total_out = 0; + data->zstrm.zalloc = gzip_alloc; + data->zstrm.zfree = gzip_free; + data->zstrm.opaque = (voidpf) nullptr; + data->zstrm.data_type = Z_ASCII; + + int window_bits = WINDOW_BITS_GZIP; + if (compression_type & COMPRESSION_TYPE_DEFLATE) + window_bits = WINDOW_BITS_DEFLATE; err = deflateInit2(&data->zstrm, ZLIB_COMPRESSION_LEVEL, Z_DEFLATED, window_bits, ZLIB_MEMLEVEL, Z_DEFAULT_STRATEGY); @@ -94,12 +108,29 @@ gzip_data_alloc(int compression_type) fatal("gzip-transform: ERROR: deflateSetDictionary (%d)!", err); } } - +#if HAVE_BROTLI_ENCODE_H + data->bstrm.br = nullptr; + if (compression_type & COMPRESSION_TYPE_BROTLI) { + debug("gzip-transform: brotli compression. Create Brotli Encoder Instance."); + data->bstrm.br = BrotliEncoderCreateInstance(0, 0, 0); + if (!data->bstrm.br) { + fatal("gzip-transform: ERROR: Brotli Encoder Instance Failed"); + } + BrotliEncoderSetParameter(data->bstrm.br, BROTLI_PARAM_QUALITY, BROTLI_COMPRESSION_LEVEL); + BrotliEncoderSetParameter(data->bstrm.br, BROTLI_PARAM_LGWIN, BROTLI_LGW); + data->bstrm.next_in = nullptr; + data->bstrm.avail_in = 0; + data->bstrm.total_in = 0; + data->bstrm.next_out = nullptr; + data->bstrm.avail_out = 0; + data->bstrm.total_out = 0; + } +#endif return data; } static void -gzip_data_destroy(GzipData *data) +data_destroy(Data *data) { TSReleaseAssert(data); @@ -111,24 +142,41 @@ gzip_data_destroy(GzipData *data) TSIOBufferDestroy(data->downstream_buffer); } +// brotlidestory +#if HAVE_BROTLI_ENCODE_H + BrotliEncoderDestroyInstance(data->bstrm.br); +#endif + TSfree(data); } static TSReturnCode -gzip_content_encoding_header(TSMBuffer bufp, TSMLoc hdr_loc, const int compression_type) +content_encoding_header(TSMBuffer bufp, TSMLoc hdr_loc, const int compression_type, int algorithm) { TSReturnCode ret; TSMLoc ce_loc; - + const char *value = nullptr; + int value_len = 0; // Delete Content-Encoding if present??? + if (compression_type & COMPRESSION_TYPE_BROTLI && (algorithm & ALGORITHM_BROTLI)) { + value = TS_HTTP_VALUE_BROTLI; + value_len = TS_HTTP_LEN_BROTLI; + } else if (compression_type & COMPRESSION_TYPE_GZIP && (algorithm & ALGORITHM_GZIP)) { + value = TS_HTTP_VALUE_GZIP; + value_len = TS_HTTP_LEN_GZIP; + } else if (compression_type & COMPRESSION_TYPE_DEFLATE && (algorithm & ALGORITHM_DEFLATE)) { + value = TS_HTTP_VALUE_DEFLATE; + value_len = TS_HTTP_LEN_DEFLATE; + } + + if (value_len == 0) { + error("no need to add Content-Encoding header"); + return TS_SUCCESS; + } if ((ret = TSMimeHdrFieldCreateNamed(bufp, hdr_loc, TS_MIME_FIELD_CONTENT_ENCODING, TS_MIME_LEN_CONTENT_ENCODING, &ce_loc)) == TS_SUCCESS) { - if (compression_type == COMPRESSION_TYPE_DEFLATE) { - ret = TSMimeHdrFieldValueStringInsert(bufp, hdr_loc, ce_loc, -1, TS_HTTP_VALUE_DEFLATE, TS_HTTP_LEN_DEFLATE); - } else if (compression_type == COMPRESSION_TYPE_GZIP) { - ret = TSMimeHdrFieldValueStringInsert(bufp, hdr_loc, ce_loc, -1, TS_HTTP_VALUE_GZIP, TS_HTTP_LEN_GZIP); - } + ret = TSMimeHdrFieldValueStringInsert(bufp, hdr_loc, ce_loc, -1, value, value_len); if (ret == TS_SUCCESS) { ret = TSMimeHdrFieldAppend(bufp, hdr_loc, ce_loc); } @@ -143,7 +191,7 @@ gzip_content_encoding_header(TSMBuffer bufp, TSMLoc hdr_loc, const int compressi } static TSReturnCode -gzip_vary_header(TSMBuffer bufp, TSMLoc hdr_loc) +vary_header(TSMBuffer bufp, TSMLoc hdr_loc) { TSReturnCode ret; TSMLoc ce_loc; @@ -186,7 +234,7 @@ gzip_vary_header(TSMBuffer bufp, TSMLoc hdr_loc) // FIXME: the etag alteration isn't proper. it should modify the value inside quotes // specify a very header.. static TSReturnCode -gzip_etag_header(TSMBuffer bufp, TSMLoc hdr_loc) +etag_header(TSMBuffer bufp, TSMLoc hdr_loc) { TSReturnCode ret = TS_SUCCESS; TSMLoc ce_loc; @@ -220,7 +268,7 @@ gzip_etag_header(TSMBuffer bufp, TSMLoc hdr_loc) // FIXME: some things are potentially compressible. those responses static void -gzip_transform_init(TSCont contp, GzipData *data) +compress_transform_init(TSCont contp, Data *data) { // update the vary, content-encoding, and etag response headers // prepare the downstream for transforming @@ -236,8 +284,8 @@ gzip_transform_init(TSCont contp, GzipData *data) return; } - if (gzip_content_encoding_header(bufp, hdr_loc, data->compression_type) == TS_SUCCESS && - gzip_vary_header(bufp, hdr_loc) == TS_SUCCESS && gzip_etag_header(bufp, hdr_loc) == TS_SUCCESS) { + if (content_encoding_header(bufp, hdr_loc, data->compression_type, data->compression_algorithms) == TS_SUCCESS && + vary_header(bufp, hdr_loc) == TS_SUCCESS && etag_header(bufp, hdr_loc) == TS_SUCCESS) { downstream_conn = TSTransformOutputVConnGet(contp); data->downstream_buffer = TSIOBufferCreate(); data->downstream_reader = TSIOBufferReaderAlloc(data->downstream_buffer); @@ -248,14 +296,102 @@ gzip_transform_init(TSCont contp, GzipData *data) } static void -gzip_transform_one(GzipData *data, TSIOBufferReader upstream_reader, int amount) +gzip_transform_one(Data *data, const char *upstream_buffer, int64_t upstream_length) { TSIOBufferBlock downstream_blkp; - const char *upstream_buffer; char *downstream_buffer; - int64_t upstream_length, downstream_length; + int64_t downstream_length; + int err; + data->zstrm.next_in = (unsigned char *)upstream_buffer; + data->zstrm.avail_in = upstream_length; + + while (data->zstrm.avail_in > 0) { + downstream_blkp = TSIOBufferStart(data->downstream_buffer); + downstream_buffer = TSIOBufferBlockWriteStart(downstream_blkp, &downstream_length); + + data->zstrm.next_out = (unsigned char *)downstream_buffer; + data->zstrm.avail_out = downstream_length; + + if (!data->hc->flush()) { + err = deflate(&data->zstrm, Z_NO_FLUSH); + } else { + err = deflate(&data->zstrm, Z_SYNC_FLUSH); + } + + if (err != Z_OK) { + warning("deflate() call failed: %d", err); + } + + if (downstream_length > data->zstrm.avail_out) { + TSIOBufferProduce(data->downstream_buffer, downstream_length - data->zstrm.avail_out); + data->downstream_length += (downstream_length - data->zstrm.avail_out); + } + + if (data->zstrm.avail_out > 0) { + if (data->zstrm.avail_in != 0) { + error("gzip-transform: ERROR: avail_in is (%d): should be 0", data->zstrm.avail_in); + } + } + } +} + +static void +brotli_transform_one(Data *data, const char *upstream_buffer, int64_t upstream_length) +{ +#if HAVE_BROTLI_ENCODE_H + TSIOBufferBlock downstream_blkp; + char *downstream_buffer; + int64_t downstream_length; int err; + data->bstrm.avail_in = upstream_length; + + while (data->bstrm.avail_in > 0) { + downstream_blkp = TSIOBufferStart(data->downstream_buffer); + downstream_buffer = TSIOBufferBlockWriteStart(downstream_blkp, &downstream_length); + + data->bstrm.next_out = (unsigned char *)downstream_buffer; + data->bstrm.avail_out = downstream_length; + data->bstrm.total_out = 0; + + data->bstrm.next_in = (uint8_t *)upstream_buffer; + if (!data->hc->flush()) { + err = BrotliEncoderCompressStream(data->bstrm.br, BROTLI_OPERATION_PROCESS, &data->bstrm.avail_in, + (const uint8_t **)&data->bstrm.next_in, &data->bstrm.avail_out, &data->bstrm.next_out, + &data->bstrm.total_out); + } else { + err = BrotliEncoderCompressStream(data->bstrm.br, BROTLI_OPERATION_FLUSH, &data->bstrm.avail_in, + (const uint8_t **)&data->bstrm.next_in, &data->bstrm.avail_out, &data->bstrm.next_out, + &data->bstrm.total_out); + } + + if (err != BROTLI_TRUE) { + warning("BrotliEncoderCompressStream() call failed: %d", err); + } + + if (downstream_length > (int64_t)data->bstrm.avail_out) { + TSIOBufferProduce(data->downstream_buffer, downstream_length - data->bstrm.avail_out); + data->downstream_length += (downstream_length - data->bstrm.avail_out); + } + + if (data->bstrm.avail_out > 0) { + if (data->bstrm.avail_in != 0) { + error("brotli-transform: ERROR: brotli avail_in is (%lu): should be 0", data->bstrm.avail_in); + } + } + } + data->bstrm.total_in += upstream_length; +#else + error("brotli-transform: ERROR: compile with brotli support"); +#endif +} + +static void +compress_transform_one(Data *data, TSIOBufferReader upstream_reader, int amount) +{ + TSIOBufferBlock downstream_blkp; + const char *upstream_buffer; + int64_t upstream_length; while (amount > 0) { downstream_blkp = TSIOBufferReaderStart(upstream_reader); if (!downstream_blkp) { @@ -273,38 +409,13 @@ gzip_transform_one(GzipData *data, TSIOBufferReader upstream_reader, int amount) upstream_length = amount; } - data->zstrm.next_in = (unsigned char *)upstream_buffer; - data->zstrm.avail_in = upstream_length; - - while (data->zstrm.avail_in > 0) { - downstream_blkp = TSIOBufferStart(data->downstream_buffer); - downstream_buffer = TSIOBufferBlockWriteStart(downstream_blkp, &downstream_length); - - data->zstrm.next_out = (unsigned char *)downstream_buffer; - data->zstrm.avail_out = downstream_length; - - if (!data->hc->flush()) { - debug("gzip_transform: deflate with Z_NO_FLUSH"); - err = deflate(&data->zstrm, Z_NO_FLUSH); - } else { - debug("gzip_transform: deflate with Z_SYNC_FLUSH"); - err = deflate(&data->zstrm, Z_SYNC_FLUSH); - } - - if (err != Z_OK) { - warning("deflate() call failed: %d", err); - } - - if (downstream_length > data->zstrm.avail_out) { - TSIOBufferProduce(data->downstream_buffer, downstream_length - data->zstrm.avail_out); - data->downstream_length += (downstream_length - data->zstrm.avail_out); - } - - if (data->zstrm.avail_out > 0) { - if (data->zstrm.avail_in != 0) { - error("gzip-transform: ERROR: avail_in is (%d): should be 0", data->zstrm.avail_in); - } - } + if (data->compression_type & COMPRESSION_TYPE_BROTLI && (data->compression_algorithms & ALGORITHM_BROTLI)) { + brotli_transform_one(data, upstream_buffer, upstream_length); + } else if ((data->compression_type & (COMPRESSION_TYPE_GZIP | COMPRESSION_TYPE_DEFLATE)) && + (data->compression_algorithms & (ALGORITHM_GZIP | ALGORITHM_DEFLATE))) { + gzip_transform_one(data, upstream_buffer, upstream_length); + } else { + warning("No compression supported. Shoudn't come here."); } TSIOBufferReaderConsume(upstream_reader, upstream_length); @@ -313,7 +424,7 @@ gzip_transform_one(GzipData *data, TSIOBufferReader upstream_reader, int amount) } static void -gzip_transform_finish(GzipData *data) +gzip_transform_finish(Data *data) { if (data->state == transform_state_output) { TSIOBufferBlock downstream_blkp; @@ -350,30 +461,94 @@ gzip_transform_finish(GzipData *data) if (data->downstream_length != (int64_t)(data->zstrm.total_out)) { error("gzip-transform: ERROR: output lengths don't match (%d, %ld)", data->downstream_length, data->zstrm.total_out); } - + debug("gzip-transform: Finished gzip"); gzip_log_ratio(data->zstrm.total_in, data->downstream_length); } } static void -gzip_transform_do(TSCont contp) +brotli_transform_finish(Data *data) +{ +#if HAVE_BROTLI_ENCODE_H + if (data->state == transform_state_output) { + TSIOBufferBlock downstream_blkp; + char *downstream_buffer; + int64_t downstream_length; + int err; + + data->state = transform_state_finished; + + for (;;) { + downstream_blkp = TSIOBufferStart(data->downstream_buffer); + + downstream_buffer = TSIOBufferBlockWriteStart(downstream_blkp, &downstream_length); + data->bstrm.next_out = (unsigned char *)downstream_buffer; + data->bstrm.avail_out = downstream_length; + + err = BrotliEncoderCompressStream(data->bstrm.br, BROTLI_OPERATION_FINISH, &data->bstrm.avail_in, + (const uint8_t **)&data->bstrm.next_in, &data->bstrm.avail_out, &data->bstrm.next_out, + &data->bstrm.total_out); + + if (downstream_length > (int64_t)data->bstrm.avail_out) { + TSIOBufferProduce(data->downstream_buffer, downstream_length - data->bstrm.avail_out); + data->downstream_length += (downstream_length - data->bstrm.avail_out); + } + if (!BrotliEncoderIsFinished(data->bstrm.br)) { + continue; + } + + if (err != BROTLI_TRUE) { /* some more data to encode */ + warning("brotli_transform: BrotliEncoderCompressStream should return BROTLI_TRUE"); + } + + break; + } + + if (data->downstream_length != (int64_t)(data->bstrm.total_out)) { + error("brotli-transform: ERROR: output lengths don't match (%d, %ld)", data->downstream_length, data->bstrm.total_out); + } + debug("brotli-transform: Finished brotli"); + gzip_log_ratio(data->bstrm.total_in, data->downstream_length); + } +#else + error("brotli-transform: compile with brotli support"); +#endif +} + +static void +compress_transform_finish(Data *data) +{ + if (data->compression_type & COMPRESSION_TYPE_BROTLI && data->compression_algorithms & ALGORITHM_BROTLI) { + brotli_transform_finish(data); + debug("brotli-transform: Brotli compression finish."); + } else if ((data->compression_type & (COMPRESSION_TYPE_GZIP | COMPRESSION_TYPE_DEFLATE)) && + (data->compression_algorithms & (ALGORITHM_GZIP | ALGORITHM_DEFLATE))) { + gzip_transform_finish(data); + debug("gzip-transform: Gzip compression finish."); + } else { + warning("No Compression matched, shouldn't come here."); + } +} + +static void +compress_transform_do(TSCont contp) { TSVIO upstream_vio; - GzipData *data; + Data *data; int64_t upstream_todo; int64_t upstream_avail; int64_t downstream_bytes_written; - data = (GzipData *)TSContDataGet(contp); + data = (Data *)TSContDataGet(contp); if (data->state == transform_state_initialized) { - gzip_transform_init(contp, data); + compress_transform_init(contp, data); } upstream_vio = TSVConnWriteVIOGet(contp); downstream_bytes_written = data->downstream_length; if (!TSVIOBufferGet(upstream_vio)) { - gzip_transform_finish(data); + compress_transform_finish(data); TSVIONBytesSet(data->downstream_vio, data->downstream_length); @@ -393,7 +568,7 @@ gzip_transform_do(TSCont contp) } if (upstream_todo > 0) { - gzip_transform_one(data, TSVIOReaderGet(upstream_vio), upstream_todo); + compress_transform_one(data, TSVIOReaderGet(upstream_vio), upstream_todo); TSVIONDoneSet(upstream_vio, TSVIONDoneGet(upstream_vio) + upstream_todo); } } @@ -406,7 +581,7 @@ gzip_transform_do(TSCont contp) TSContCall(TSVIOContGet(upstream_vio), TS_EVENT_VCONN_WRITE_READY, upstream_vio); } } else { - gzip_transform_finish(data); + compress_transform_finish(data); TSVIONBytesSet(data->downstream_vio, data->downstream_length); if (data->downstream_length > downstream_bytes_written) { @@ -418,10 +593,10 @@ gzip_transform_do(TSCont contp) } static int -gzip_transform(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) +compress_transform(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) { if (TSVConnClosedGet(contp)) { - gzip_data_destroy((GzipData *)TSContDataGet(contp)); + data_destroy((Data *)TSContDataGet(contp)); TSContDestroy(contp); return 0; } else { @@ -435,14 +610,14 @@ gzip_transform(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) TSVConnShutdown(TSTransformOutputVConnGet(contp), 0, 1); break; case TS_EVENT_VCONN_WRITE_READY: - gzip_transform_do(contp); + compress_transform_do(contp); break; case TS_EVENT_IMMEDIATE: - gzip_transform_do(contp); + compress_transform_do(contp); break; default: warning("unknown event [%d]", event); - gzip_transform_do(contp); + compress_transform_do(contp); break; } } @@ -451,7 +626,7 @@ gzip_transform(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) } static int -gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configuration, int *compress_type) +transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configuration, int *compress_type, int *algorithms) { /* Server response header */ TSMBuffer bufp; @@ -468,6 +643,13 @@ gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configur int i, compression_acceptable, len; TSHttpStatus resp_status; + /* + // Before anything, check atleast one compression algorithm is supported + if (host_configuration->compression_algorithms() == ALGORITHM_DEFAULT) { + info("No compression algorithms configured"); + return 0; + } + */ if (server) { if (TS_SUCCESS != TSHttpTxnServerRespGet(txnp, &bufp, &hdr_loc)) { return 0; @@ -503,7 +685,8 @@ gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configur return 0; } - cfield = TSMimeHdrFieldFind(cbuf, chdr, TS_MIME_FIELD_ACCEPT_ENCODING, TS_MIME_LEN_ACCEPT_ENCODING); + *algorithms = host_configuration->compression_algorithms(); + cfield = TSMimeHdrFieldFind(cbuf, chdr, TS_MIME_FIELD_ACCEPT_ENCODING, TS_MIME_LEN_ACCEPT_ENCODING); if (cfield != TS_NULL_MLOC) { compression_acceptable = 0; nvalues = TSMimeHdrFieldValuesCount(cbuf, chdr, cfield); @@ -513,14 +696,18 @@ gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configur continue; } - if (strncasecmp(value, "deflate", sizeof("deflate") - 1) == 0) { - compression_acceptable = 1; - *compress_type = COMPRESSION_TYPE_DEFLATE; - break; + if (strncasecmp(value, "br", sizeof("br") - 1) == 0) { + if (*algorithms & ALGORITHM_BROTLI) + compression_acceptable = 1; + *compress_type |= COMPRESSION_TYPE_BROTLI; + } else if (strncasecmp(value, "deflate", sizeof("deflate") - 1) == 0) { + if (*algorithms & ALGORITHM_DEFLATE) + compression_acceptable = 1; + *compress_type |= COMPRESSION_TYPE_DEFLATE; } else if (strncasecmp(value, "gzip", sizeof("gzip") - 1) == 0) { - compression_acceptable = 1; - *compress_type = COMPRESSION_TYPE_GZIP; - break; + if (*algorithms & ALGORITHM_GZIP) + compression_acceptable = 1; + *compress_type |= COMPRESSION_TYPE_GZIP; } } @@ -528,7 +715,7 @@ gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configur TSHandleMLocRelease(cbuf, TS_NULL_MLOC, chdr); if (!compression_acceptable) { - info("no acceptable encoding found in request header, not compressible"); + info("no acceptable encoding match found in request header, not compressible"); TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); return 0; } @@ -574,21 +761,24 @@ gzip_transformable(TSHttpTxn txnp, bool server, HostConfiguration *host_configur } static void -gzip_transform_add(TSHttpTxn txnp, HostConfiguration *hc, int compress_type) +compress_transform_add(TSHttpTxn txnp, HostConfiguration *hc, int compress_type, int algorithms) { TSVConn connp; - GzipData *data; + Data *data; TSHttpTxnUntransformedRespCache(txnp, 1); if (!hc->cache()) { + debug("TransformedRespCache not enabled"); TSHttpTxnTransformedRespCache(txnp, 0); } else { + debug("TransformedRespCache enabled"); + TSHttpTxnUntransformedRespCache(txnp, 0); TSHttpTxnTransformedRespCache(txnp, 1); } - connp = TSTransformCreate(gzip_transform, txnp); - data = gzip_data_alloc(compress_type); + connp = TSTransformCreate(compress_transform, txnp); + data = data_alloc(compress_type, algorithms); data->txn = txnp; data->hc = hc; @@ -620,7 +810,8 @@ static int transform_plugin(TSCont contp, TSEvent event, void *edata) { TSHttpTxn txnp = (TSHttpTxn)edata; - int compress_type = COMPRESSION_TYPE_DEFLATE; + int compress_type = COMPRESSION_TYPE_DEFAULT; + int algorithms = ALGORITHM_DEFAULT; HostConfiguration *hc = (HostConfiguration *)TSContDataGet(contp); switch (event) { @@ -639,8 +830,8 @@ transform_plugin(TSCont contp, TSEvent event, void *edata) } } - if (gzip_transformable(txnp, true, hc, &compress_type)) { - gzip_transform_add(txnp, hc, compress_type); + if (transformable(txnp, true, hc, &compress_type, &algorithms)) { + compress_transform_add(txnp, hc, compress_type, algorithms); } } break; @@ -667,8 +858,8 @@ transform_plugin(TSCont contp, TSEvent event, void *edata) if (TS_ERROR != TSHttpTxnCacheLookupStatusGet(txnp, &obj_status) && (TS_CACHE_LOOKUP_HIT_FRESH == obj_status)) { if (hc != nullptr) { info("handling compression of cached object"); - if (gzip_transformable(txnp, false, hc, &compress_type)) { - gzip_transform_add(txnp, hc, compress_type); + if (transformable(txnp, false, hc, &compress_type, &algorithms)) { + compress_transform_add(txnp, hc, compress_type, algorithms); } } } else { @@ -703,7 +894,7 @@ transform_plugin(TSCont contp, TSEvent event, void *edata) * further processing */ static void -handle_gzip_request(TSHttpTxn txnp, Configuration *config) +handle_request(TSHttpTxn txnp, Configuration *config) { TSMBuffer req_buf; TSMLoc req_loc; @@ -718,17 +909,15 @@ handle_gzip_request(TSHttpTxn txnp, Configuration *config) bool allowed = false; if (hc->enabled()) { - if (hc->has_disallows()) { + if (hc->has_disallows() || hc->has_allows()) { int url_len; char *url = TSHttpTxnEffectiveUrlStringGet(txnp, &url_len); - - allowed = hc->is_url_allowed(url, url_len); + allowed = hc->is_url_allowed(url, url_len); TSfree(url); } else { allowed = true; } } - if (allowed) { TSCont transform_contp = TSContCreate(transform_plugin, nullptr); @@ -753,7 +942,7 @@ transform_global_plugin(TSCont /* contp ATS_UNUSED */, TSEvent event, void *edat switch (event) { case TS_EVENT_HTTP_READ_REQUEST_HDR: // Handle gzip request and use the global configs - handle_gzip_request(txnp, nullptr); + handle_request(txnp, nullptr); break; default: @@ -887,7 +1076,7 @@ TSRemapDoRemap(void *instance, TSHttpTxn txnp, TSRemapRequestInfo *rri) info("Remap Rules configured for gzip"); Configuration *config = (Configuration *)instance; // Handle gzip request and use the configs populated from remap instance - handle_gzip_request(txnp, config); + handle_request(txnp, config); } return TSREMAP_NO_REMAP; } diff --git a/plugins/gzip/misc.cc b/plugins/gzip/misc.cc index 161abdaa4da..646187d2df4 100644 --- a/plugins/gzip/misc.cc +++ b/plugins/gzip/misc.cc @@ -25,8 +25,8 @@ #include "ts/ink_defs.h" #include "misc.h" -#include -#include +#include +#include #include "debug_macros.h" voidpf @@ -47,7 +47,7 @@ normalize_accept_encoding(TSHttpTxn /* txnp ATS_UNUSED */, TSMBuffer reqp, TSMLo TSMLoc field = TSMimeHdrFieldFind(reqp, hdr_loc, TS_MIME_FIELD_ACCEPT_ENCODING, TS_MIME_LEN_ACCEPT_ENCODING); int deflate = 0; int gzip = 0; - + int br = 0; // remove the accept encoding field(s), // while finding out if gzip or deflate is supported. while (field) { @@ -63,6 +63,9 @@ normalize_accept_encoding(TSHttpTxn /* txnp ATS_UNUSED */, TSMBuffer reqp, TSMLo --value_count; val = TSMimeHdrFieldValueStringGet(reqp, hdr_loc, field, value_count, &val_len); + if (val_len == (int)strlen("br")) { + br = !strncmp(val, "br", val_len); + } if (val_len == (int)strlen("gzip")) { gzip = !strncmp(val, "gzip", val_len); } else if (val_len == (int)strlen("deflate")) { @@ -78,10 +81,13 @@ normalize_accept_encoding(TSHttpTxn /* txnp ATS_UNUSED */, TSMBuffer reqp, TSMLo } // append a new accept-encoding field in the header - if (deflate || gzip) { + if (deflate || gzip || br) { TSMimeHdrFieldCreate(reqp, hdr_loc, &field); TSMimeHdrFieldNameSet(reqp, hdr_loc, field, TS_MIME_FIELD_ACCEPT_ENCODING, TS_MIME_LEN_ACCEPT_ENCODING); - + if (br) { + TSMimeHdrFieldValueStringInsert(reqp, hdr_loc, field, -1, "br", strlen("br")); + info("normalized accept encoding to br"); + } if (gzip) { TSMimeHdrFieldValueStringInsert(reqp, hdr_loc, field, -1, "gzip", strlen("gzip")); info("normalized accept encoding to gzip"); diff --git a/plugins/gzip/misc.h b/plugins/gzip/misc.h index 60f5aea861b..6c00cc0000d 100644 --- a/plugins/gzip/misc.h +++ b/plugins/gzip/misc.h @@ -29,6 +29,10 @@ #include #include +#if HAVE_BROTLI_ENCODE_H +#include +#endif + #include "configuration.h" using namespace Gzip; @@ -39,8 +43,12 @@ static const int WINDOW_BITS_DEFLATE = -15; static const int WINDOW_BITS_GZIP = 31; // misc -static const int COMPRESSION_TYPE_DEFLATE = 1; -static const int COMPRESSION_TYPE_GZIP = 2; +enum CompressionType { + COMPRESSION_TYPE_DEFAULT = 0, + COMPRESSION_TYPE_DEFLATE = 1, + COMPRESSION_TYPE_GZIP = 2, + COMPRESSION_TYPE_BROTLI = 4 +}; // this one is used to rename the accept encoding header // it will be restored later on @@ -53,6 +61,18 @@ enum transform_state { transform_state_finished, }; +#if HAVE_BROTLI_ENCODE_H +typedef struct { + BrotliEncoderState *br; + uint8_t *next_in; + size_t avail_in; + uint8_t *next_out; + size_t avail_out; + size_t total_in; + size_t total_out; +} b_stream; +#endif + typedef struct { TSHttpTxn txn; HostConfiguration *hc; @@ -63,7 +83,11 @@ typedef struct { z_stream zstrm; enum transform_state state; int compression_type; -} GzipData; + int compression_algorithms; +#if HAVE_BROTLI_ENCODE_H + b_stream bstrm; +#endif +} Data; voidpf gzip_alloc(voidpf opaque, uInt items, uInt size); void gzip_free(voidpf opaque, voidpf address); diff --git a/plugins/gzip/tests/test_gzip.py b/plugins/gzip/tests/test_gzip.py index 137f271e17f..8b9b4f6669c 100644 --- a/plugins/gzip/tests/test_gzip.py +++ b/plugins/gzip/tests/test_gzip.py @@ -16,7 +16,8 @@ import requests import logging -import random, string +import random +import string import tsqa.test_cases import tsqa.utils @@ -26,74 +27,78 @@ origin_content_length = 0 log = logging.getLogger(__name__) -#Test positive cases of remap gzip plugin +# Test positive cases of remap gzip plugin gzip_remap_bench = [ - # Test gzip - { "args": "@pparam=gzip1.config", - "files": [("gzip1.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n") + # Test gzip + {"args": "@pparam=gzip1.config", + "files": [("gzip1.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n") ], - }, - { "args": "@pparam=gzip2.config", - "files": [("gzip2.config", "enabled true\nremove-accept-encoding false\ncache false\ncompressible-content-type text/*\n") + }, + {"args": "@pparam=gzip2.config", + "files": [("gzip2.config", "enabled true\nremove-accept-encoding false\ncache false\ncompressible-content-type text/*\n") ], - }, - { "args": "@pparam=gzip3.config", - "files": [("gzip3.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n") + }, + {"args": "@pparam=gzip3.config", + "files": [("gzip3.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n") ], - }, - { "args": "@pparam=gzip4.config", - "files": [("gzip4.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush true\n") + }, + {"args": "@pparam=gzip4.config", + "files": [("gzip4.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush true\n") ], - }, - { "args": "@pparam=gzip5.config", - "files": [("gzip5.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush false\n") + }, + {"args": "@pparam=gzip5.config", + "files": [("gzip5.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\nflush false\n") ], - }, - ] - -#Test negative cases of remap gzip plugin -gzip_remap_negative_bench = [ - #Test when gzip is disabled - { "args": "@pparam=gzip_negative1.config", - "files": [("gzip_negative1.config", "enabled false\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n") - ], - }, - #Test when compressible content doesn't match - { "args": "@pparam=gzip_negative2.config", - "files": [("gzip_negative2.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type !text/*\n") - ], - }, - #Test when disallow is configured to match some pattern - { "args": "@pparam=gzip_negative3.config", - "files": [("gzip_negative3.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\ndisallow *test*\n") - ], - }, - ] - -#Test global gzip plugin + }, +] + +# Test negative cases of remap gzip plugin +gzip_remap_negative_bench = [ + # Test when gzip is disabled + {"args": "@pparam=gzip_negative1.config", + "files": [("gzip_negative1.config", "enabled false\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\n") + ], + }, + # Test when compressible content doesn't match + {"args": "@pparam=gzip_negative2.config", + "files": [("gzip_negative2.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type !text/*\n") + ], + }, + # Test when disallow is configured to match some pattern + {"args": "@pparam=gzip_negative3.config", + "files": [("gzip_negative3.config", "enabled true\nremove-accept-encoding true\ncache false\ncompressible-content-type text/*\ndisallow *test*\n") + ], + }, +] + +# Test global gzip plugin gzip_global_bench = [ - { "args": "gzip_global1.config", - "files": [("gzip_global1.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n") + {"args": "gzip_global1.config", + "files": [("gzip_global1.config", "enabled true\nremove-accept-encoding true\ncache true\ncompressible-content-type text/*\n") ], - }, - ] + }, +] + +# Set up an origin server which returns random string. + -#Set up an origin server which returns random string. def handler(request): global origin_content_length rand_string = ''.join(random.choice(string.lowercase) for i in range(500)) origin_content_length = len(rand_string) return rand_string + def create_config_files(env, test): # Create gzip config files. for file in test['files']: filename = file[0] content = file[1] - path = os.path.join(env.layout.prefix, 'etc/trafficserver', filename); + path = os.path.join(env.layout.prefix, 'etc/trafficserver', filename) with open(path, 'w') as fh: fh.write(content) + class StaticEnvironmentCase(tsqa.test_cases.EnvironmentCase): @classmethod def getEnv(cls): @@ -103,16 +108,18 @@ def getEnv(cls): env.clone(layout=layout) return env -#Test gzip remap plugin +# Test gzip remap plugin + + class TestGzipRemapPlugin(tsqa.test_cases.DynamicHTTPEndpointCase, StaticEnvironmentCase): @classmethod def setUpEnv(cls, env): cls.configs['plugin.config'].add_line('xdebug.so') cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': '.*', - 'proxy.config.diags.debug.tags': 'gzip.*', - 'proxy.config.url_remap.pristine_host_hdr': 1,}) + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': '.*', + 'proxy.config.diags.debug.tags': 'gzip.*', + 'proxy.config.url_remap.pristine_host_hdr': 1, }) cls.http_endpoint.add_handler('/path/to/object', handler) @@ -120,7 +127,8 @@ def add_remap_rule(remap_prefix, remap_index, test): host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index) port = cls.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] args = test['args'] - remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2} @plugin=gzip.so {3}'.format(host, port, cls.http_endpoint.address[1], args) + remap_rule = 'map http://{0}:{1} http://127.0.0.1:{2} @plugin=gzip.so {3}'.format( + host, port, cls.http_endpoint.address[1], args) log.info(' {0}'.format(remap_rule)) cls.configs['remap.config'].add_line(remap_rule) @@ -129,17 +137,17 @@ def add_remap_rule(remap_prefix, remap_index, test): for test in gzip_remap_bench: add_remap_rule("gzip", i, test) create_config_files(env, test) - i+=1 + i += 1 - #Prepare negative gzip tests related remap rules. + # Prepare negative gzip tests related remap rules. i = 0 for test in gzip_remap_negative_bench: add_remap_rule("gzip_negative", i, test) create_config_files(env, test) - i+=1 + i += 1 - def send_request(self,remap_prefix, remap_index): - host = 'test_{0}_{1}.example.com'.format( remap_prefix, remap_index) + def send_request(self, remap_prefix, remap_index): + host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index) port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] url = 'http://127.0.0.1:{0}/path/to/object'.format(port) log.info('host is {0}, port is {1}, url is {2}'.format(host, port, url)) @@ -178,16 +186,18 @@ def test_gzip_remap_plugin(self): self.send_gzip_request_negative('gzip_negative', i) i += 1 -#Test gzip global plugin +# Test gzip global plugin + + class TestGzipGlobalPlugin(tsqa.test_cases.DynamicHTTPEndpointCase, StaticEnvironmentCase): @classmethod def setUpEnv(cls, env): cls.configs['plugin.config'].add_line('xdebug.so') cls.configs['records.config']['CONFIG'].update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'gzip.*', - 'proxy.config.url_remap.pristine_host_hdr': 1,}) + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'gzip.*', + 'proxy.config.url_remap.pristine_host_hdr': 1, }) cls.http_endpoint.add_handler('/path/to/object', handler) @@ -207,13 +217,13 @@ def add_global_plugin_rule(test): # Prepare gzip plugin rules i = 0 for test in gzip_global_bench: - add_remap_rule("gzip_global",i) + add_remap_rule("gzip_global", i) add_global_plugin_rule(test) create_config_files(env, test) - i+=1 + i += 1 - def send_request(self,remap_prefix, remap_index): - host = 'test_{0}_{1}.example.com'.format( remap_prefix, remap_index) + def send_request(self, remap_prefix, remap_index): + host = 'test_{0}_{1}.example.com'.format(remap_prefix, remap_index) port = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports'] url = 'http://127.0.0.1:{0}/path/to/object'.format(port) log.info('host is {0}, port is {1}, url is {2}'.format(host, port, url)) @@ -238,4 +248,3 @@ def test_gzip_global_plugin(self): for test in gzip_global_bench: self.send_global_gzip_request("gzip_global", i) i += 1 - diff --git a/plugins/header_rewrite/condition.h b/plugins/header_rewrite/condition.h index 84a9e04e5eb..bebeeb5b613 100644 --- a/plugins/header_rewrite/condition.h +++ b/plugins/header_rewrite/condition.h @@ -101,7 +101,7 @@ class Condition : public Statement return _matcher; } - const MatcherOps + MatcherOps get_cond_op() const { return _cond_op; diff --git a/plugins/header_rewrite/conditions.cc b/plugins/header_rewrite/conditions.cc index d4425bec135..4b57dbabbfa 100644 --- a/plugins/header_rewrite/conditions.cc +++ b/plugins/header_rewrite/conditions.cc @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include "ts/ts.h" @@ -903,7 +903,7 @@ const char * ConditionGeo::get_geo_string(const sockaddr *addr) const { TSError("[%s] No Geo library available!", PLUGIN_NAME); - return NULL; + return nullptr; } int64_t diff --git a/plugins/header_rewrite/expander.cc b/plugins/header_rewrite/expander.cc index 543c202fc73..ceb46b74993 100644 --- a/plugins/header_rewrite/expander.cc +++ b/plugins/header_rewrite/expander.cc @@ -44,7 +44,7 @@ VariableExpander::expand(const Resources &res) break; } - std::string::size_type end = result.find(">", start); + std::string::size_type end = result.find('>', start); if (end == std::string::npos) { break; } @@ -113,6 +113,15 @@ VariableExpander::expand(const Resources &res) } TSHandleMLocRelease(bufp, TS_NULL_MLOC, url_loc); } + } else if (variable == "%") { + // The client request effective URL. + int url_len = 0; + char *url = TSHttpTxnEffectiveUrlStringGet(res.txnp, &url_len); + if (url && url_len) { + resolved_variable.assign(url, url_len); + } + free(url); + url = nullptr; } // TODO(SaveTheRbtz): Can be optimized diff --git a/plugins/header_rewrite/header_rewrite.cc b/plugins/header_rewrite/header_rewrite.cc index 4713d69cc09..1f30de28f80 100644 --- a/plugins/header_rewrite/header_rewrite.cc +++ b/plugins/header_rewrite/header_rewrite.cc @@ -93,7 +93,7 @@ class RulesConfig return _rules[hook]; } - bool parse_config(const std::string fname, TSHttpHookID default_hook); + bool parse_config(const std::string &fname, TSHttpHookID default_hook); void hold() @@ -149,7 +149,7 @@ RulesConfig::add_rule(RuleSet *rule) // anyways (or reload for remap.config), so not really in the critical path. // bool -RulesConfig::parse_config(const std::string fname, TSHttpHookID default_hook) +RulesConfig::parse_config(const std::string &fname, TSHttpHookID default_hook) { RuleSet *rule = nullptr; std::string filename; diff --git a/plugins/header_rewrite/header_rewrite_test.cc b/plugins/header_rewrite/header_rewrite_test.cc index 94e9a898f21..fa00fba0014 100644 --- a/plugins/header_rewrite/header_rewrite_test.cc +++ b/plugins/header_rewrite/header_rewrite_test.cc @@ -38,7 +38,7 @@ TSError(const char *fmt, ...) class ParserTest : public Parser { public: - ParserTest(std::string line) : Parser(line), res(true) { std::cout << "Finished parser test: " << line << std::endl; } + ParserTest(const std::string &line) : Parser(line), res(true) { std::cout << "Finished parser test: " << line << std::endl; } std::vector getTokens() { diff --git a/plugins/header_rewrite/operator.cc b/plugins/header_rewrite/operator.cc index 30de38da4cf..1afab7767be 100644 --- a/plugins/header_rewrite/operator.cc +++ b/plugins/header_rewrite/operator.cc @@ -23,7 +23,7 @@ #include "ts/ts.h" #include "operator.h" -const OperModifiers +OperModifiers Operator::get_oper_modifiers() const { if (_next) { diff --git a/plugins/header_rewrite/operator.h b/plugins/header_rewrite/operator.h index c20ce639334..839b7b56d8b 100644 --- a/plugins/header_rewrite/operator.h +++ b/plugins/header_rewrite/operator.h @@ -45,7 +45,7 @@ class Operator : public Statement { public: Operator() : _mods(OPER_NONE) { TSDebug(PLUGIN_NAME_DBG, "Calling CTOR for Operator"); } - const OperModifiers get_oper_modifiers() const; + OperModifiers get_oper_modifiers() const; virtual void initialize(Parser &p); void diff --git a/plugins/header_rewrite/operators.cc b/plugins/header_rewrite/operators.cc index 185cc18665b..beb4f8a87fd 100644 --- a/plugins/header_rewrite/operators.cc +++ b/plugins/header_rewrite/operators.cc @@ -20,7 +20,7 @@ // // #include -#include +#include #include "ts/ts.h" @@ -345,7 +345,7 @@ OperatorSetRedirect::exec(const Resources &res) const query = TSUrlHttpQueryGet(bufp, url_loc, &query_len); if ((get_oper_modifiers() & OPER_QSA) && (query_len > 0)) { TSDebug(PLUGIN_NAME, "QSA mode, append original query string: %.*s", query_len, query); - std::string connector = (value.find("?") == std::string::npos) ? "?" : "&"; + std::string connector = (value.find('?') == std::string::npos) ? "?" : "&"; value.append(connector); value.append(query, query_len); } diff --git a/plugins/header_rewrite/ruleset.h b/plugins/header_rewrite/ruleset.h index 1497528d064..6212abaac4c 100644 --- a/plugins/header_rewrite/ruleset.h +++ b/plugins/header_rewrite/ruleset.h @@ -68,13 +68,13 @@ class RuleSet _hook = hook; } - const TSHttpHookID + TSHttpHookID get_hook() const { return _hook; } - const ResourceIDs + ResourceIDs get_all_resource_ids() const { return _ids; diff --git a/plugins/header_rewrite/statement.cc b/plugins/header_rewrite/statement.cc index 07480c26aff..ee342dc8adc 100644 --- a/plugins/header_rewrite/statement.cc +++ b/plugins/header_rewrite/statement.cc @@ -33,7 +33,7 @@ Statement::append(Statement *stmt) tmp->_next = stmt; } -const ResourceIDs +ResourceIDs Statement::get_resource_ids() const { const Statement *stmt = this; diff --git a/plugins/header_rewrite/statement.h b/plugins/header_rewrite/statement.h index ef32fa807d5..7aa627b65b3 100644 --- a/plugins/header_rewrite/statement.h +++ b/plugins/header_rewrite/statement.h @@ -108,7 +108,7 @@ class Statement // Which hook are we adding this statement to? bool set_hook(TSHttpHookID hook); - const TSHttpHookID + TSHttpHookID get_hook() const { return _hook; @@ -124,7 +124,7 @@ class Statement // Linked list. void append(Statement *stmt); - const ResourceIDs get_resource_ids() const; + ResourceIDs get_resource_ids() const; virtual void initialize(Parser &) diff --git a/plugins/regex_remap/regex_remap.cc b/plugins/regex_remap/regex_remap.cc index 0fca36cd97b..5a4e8958a61 100644 --- a/plugins/regex_remap/regex_remap.cc +++ b/plugins/regex_remap/regex_remap.cc @@ -24,17 +24,17 @@ #include "ts/remap.h" #include -#include -#include -#include +#include +#include +#include -#include +#include #include #include #include #include -#include +#include // Get some specific stuff from libts, yes, we can do that now that we build inside the core. #include "ts/ink_platform.h" @@ -326,7 +326,7 @@ RemapRegex::initialize(const std::string ®, const std::string &sub, const std _next = nullptr; // Parse options - std::string::size_type start = opt.find_first_of("@"); + std::string::size_type start = opt.find_first_of('@'); std::string::size_type pos1, pos2; Override *last_override = nullptr; @@ -334,7 +334,7 @@ RemapRegex::initialize(const std::string ®, const std::string &sub, const std std::string opt_val; ++start; - pos1 = opt.find_first_of("=", start); + pos1 = opt.find_first_of('=', start); pos2 = opt.find_first_of(" \t\n", pos1); if (pos2 == std::string::npos) { pos2 = opt.length(); @@ -406,7 +406,7 @@ RemapRegex::initialize(const std::string ®, const std::string &sub, const std TSError("[%s] Unknown options: %s", PLUGIN_NAME, opt.c_str()); } } - start = opt.find_first_of("@", pos2); + start = opt.find_first_of('@', pos2); } return true; diff --git a/plugins/regex_revalidate/regex_revalidate.c b/plugins/regex_revalidate/regex_revalidate.c index 94eaa01f90f..1c352b81333 100644 --- a/plugins/regex_revalidate/regex_revalidate.c +++ b/plugins/regex_revalidate/regex_revalidate.c @@ -362,7 +362,7 @@ config_handler(TSCont cont, TSEvent event ATS_UNUSED, void *edata ATS_UNUSED) iptr = __sync_val_compare_and_swap(&(pstate->invalidate_list), pstate->invalidate_list, i); if (iptr) { - free_cont = TSContCreate(free_handler, NULL); + free_cont = TSContCreate(free_handler, TSMutexCreate()); TSContDataSet(free_cont, (void *)iptr); TSContSchedule(free_cont, FREE_TMOUT, TS_THREAD_POOL_TASK); } diff --git a/plugins/s3_auth/Makefile.inc b/plugins/s3_auth/Makefile.inc index eb6388773e3..7865d5e61c4 100644 --- a/plugins/s3_auth/Makefile.inc +++ b/plugins/s3_auth/Makefile.inc @@ -15,4 +15,4 @@ # limitations under the License. pkglib_LTLIBRARIES += s3_auth/s3_auth.la -s3_auth_s3_auth_la_SOURCES = s3_auth/s3_auth.cc +s3_auth_s3_auth_la_SOURCES = s3_auth/s3_auth.cc s3_auth/aws_auth_v4.cc diff --git a/plugins/s3_auth/aws_auth_v4.cc b/plugins/s3_auth/aws_auth_v4.cc new file mode 100644 index 00000000000..5b8708acf34 --- /dev/null +++ b/plugins/s3_auth/aws_auth_v4.cc @@ -0,0 +1,690 @@ +/* + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/** + * @file aws_auth_v4.cc + * @brief AWS Auth v4 signing utility. + * @see aws_auth_v4.h + */ + +#include /* strlen() */ +#include /* strftime(), time(), gmtime_r() */ +#include /* std::setw */ +#include /* std::stringstream */ +#include /* SHA(), sha256_Update(), SHA256_Final, etc. */ +#include /* HMAC() */ + +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT +#include +#endif + +#include "aws_auth_v4.h" + +/** + * @brief Lower-case Base16 encode a character string (hexadecimal format) + * + * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + * Base16 RFC4648: https://tools.ietf.org/html/rfc4648#section-8 + * + * @param in ptr to an input counted string to be base16 encoded. + * @param inLen input character string length + * @return base16 encoded string. + */ +String +base16Encode(const char *in, size_t inLen) +{ + if (nullptr == in || inLen == 0) { + return {}; + } + + std::stringstream result; + + const char *src = in; + const char *srcEnd = in + inLen; + + while (src < srcEnd) { + result << std::setfill('0') << std::setw(2) << std::hex << static_cast((*src) & 0xFF); + src++; + } + return result.str(); +} + +/** + * @brief URI-encode a character string (AWS specific version, see spec) + * + * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + * + * @param in string to be URI encoded + * @param isObjectName if true don't encode '/', keep it as it is. + * @return encoded string. + */ +String +uriEncode(const String &in, bool isObjectName) +{ + std::stringstream result; + + for (char i : in) { + if (isalnum(i) || i == '-' || i == '_' || i == '.' || i == '~') { + /* URI encode every byte except the unreserved characters: + * 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'. */ + result << i; + } else if (i == ' ') { + /* The space character is a reserved character and must be encoded as "%20" (and not as "+"). */ + result << "%20"; + } else if (isObjectName && i == '/') { + /* Encode the forward slash character, '/', everywhere except in the object key name. */ + result << "/"; + } else { + /* Letters in the hexadecimal value must be upper-case, for example "%1A". */ + result << "%" << std::uppercase << std::setfill('0') << std::setw(2) << std::hex << (int)i; + } + } + + return result.str(); +} + +/** + * @brief trim the white-space character from the beginning and the end of the string ("in-place", just moving pointers around) + * + * @param in ptr to an input string + * @param inLen input character count + * @param newLen trimmed string character count. + * @return pointer to the trimmed string. + */ +const char * +trimWhiteSpaces(const char *in, size_t inLen, size_t &newLen) +{ + if (nullptr == in || inLen == 0) { + return in; + } + + const char *first = in; + while (size_t(first - in) < inLen && isspace(*first)) { + first++; + } + + const char *last = in + inLen - 1; + while (last > in && isspace(*last)) { + last--; + } + + newLen = last - first + 1; + return first; +} + +/** + * @brief Trim white spaces from beginning and end. + * @returns trimmed string + */ +String +trimWhiteSpaces(const String &s) +{ + /* @todo do this better? */ + static const String whiteSpace = " \t\n\v\f\r"; + size_t start = s.find_first_not_of(whiteSpace); + if (String::npos == start) { + return String(); + } + size_t stop = s.find_last_not_of(whiteSpace); + return s.substr(start, stop - start + 1); +} + +/* + * Group of static inline helper function for less error prone parameter handling and unit test logging. + */ +inline static void +sha256Update(SHA256_CTX *ctx, const char *in, size_t inLen) +{ + SHA256_Update(ctx, in, inLen); +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << String(in, inLen); +#endif +} + +inline static void +sha256Update(SHA256_CTX *ctx, const char *in) +{ + sha256Update(ctx, in, strlen(in)); +} + +inline static void +sha256Update(SHA256_CTX *ctx, const String &in) +{ + sha256Update(ctx, in.c_str(), in.length()); +} + +inline static void +sha256Final(unsigned char hex[SHA256_DIGEST_LENGTH], SHA256_CTX *ctx) +{ + SHA256_Final(hex, ctx); +} + +/** + * @brief: Payload SHA 256 = Hex(SHA256Hash() (no new-line char at end) + * + * @todo support for signing of PUSH, POST content / payload + * @param signPayload specifies whether the content / payload should be signed + * @return signature of the content or "UNSIGNED-PAYLOAD" to mark that the payload is not signed + */ +String +getPayloadSha256(bool signPayload) +{ + static const String UNSIGNED_PAYLOAD("UNSIGNED-PAYLOAD"); + + if (!signPayload) { + return UNSIGNED_PAYLOAD; + } + + unsigned char payloadHash[SHA256_DIGEST_LENGTH]; + SHA256((const unsigned char *)"", 0, payloadHash); /* empty content */ + + return base16Encode((char *)payloadHash, SHA256_DIGEST_LENGTH); +} + +/** + * @brief Get Canonical Uri SHA256 Hash + * + * Hex(SHA256Hash()) + * AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + * + * @param api an TS API wrapper that will provide interface to HTTP request elements (method, path, query, headers, etc). + * @param signPayload specifies if the content / payload should be signed. + * @param includeHeaders headers that must be signed + * @param excludeHeaders headers that must not be signed + * @param signedHeaders a reference to a string to which the signed headers names will be appended + * @return SHA256 hash of the canonical request. + */ +String +getCanonicalRequestSha256Hash(TsInterface &api, bool signPayload, const StringSet &includeHeaders, const StringSet &excludeHeaders, + String &signedHeaders) +{ + int length; + const char *str = nullptr; + unsigned char canonicalRequestSha256Hash[SHA256_DIGEST_LENGTH]; + SHA256_CTX canonicalRequestSha256Ctx; + + SHA256_Init(&canonicalRequestSha256Ctx); + +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << ""; +#endif + + /* \n */ + str = api.getMethod(&length); + sha256Update(&canonicalRequestSha256Ctx, str, length); + sha256Update(&canonicalRequestSha256Ctx, "\n"); + + /* URI Encoded Canonical URI + * \n */ + str = api.getPath(&length); + String path("/"); + path.append(str, length); + String canonicalUri = uriEncode(path, /* isObjectName */ true); + sha256Update(&canonicalRequestSha256Ctx, canonicalUri); + sha256Update(&canonicalRequestSha256Ctx, "\n"); + + /* Sorted Canonical Query String + * \n */ + const char *query = api.getQuery(&length); + + StringSet paramNames; + StringMap paramsMap; + std::istringstream istr(String(query, length)); + String token; + StringSet container; + + while (std::getline(istr, token, '&')) { + String::size_type pos(token.find_first_of('=')); + String param(token.substr(0, pos == String::npos ? token.size() : pos)); + String value(pos == String::npos ? "" : token.substr(pos + 1, token.size())); + + String encodedParam = uriEncode(param, /* isObjectName */ false); + + paramNames.insert(encodedParam); + paramsMap[encodedParam] = uriEncode(value, /* isObjectName */ false); + } + + String queryStr; + for (const auto ¶mName : paramNames) { + if (!queryStr.empty()) { + queryStr.append("&"); + } + queryStr.append(paramName); + queryStr.append("=").append(paramsMap[paramName]); + } + sha256Update(&canonicalRequestSha256Ctx, queryStr); + sha256Update(&canonicalRequestSha256Ctx, "\n"); + + /* Sorted Canonical Headers + * \n */ + StringSet signedHeadersSet; + StringMap headersMap; + + for (HeaderIterator it = api.headerBegin(); it != api.headerEnd(); it++) { + int nameLen; + int valueLen; + const char *name = it.getName(&nameLen); + const char *value = it.getValue(&valueLen); + + if (nullptr == name || 0 == nameLen) { + continue; + } + + String lowercaseName(name, nameLen); + std::transform(lowercaseName.begin(), lowercaseName.end(), lowercaseName.begin(), ::tolower); + + /* Host, content-type and x-amx-* headers are mandatory */ + bool xAmzHeader = (lowercaseName.length() >= X_AMZ.length() && 0 == lowercaseName.compare(0, X_AMZ.length(), X_AMZ)); + bool contentTypeHeader = (0 == CONTENT_TYPE.compare(lowercaseName)); + bool hostHeader = (0 == HOST.compare(lowercaseName)); + if (!xAmzHeader && !contentTypeHeader && !hostHeader) { + /* Skip internal headers (starting with '@'*/ + if ('@' == name[0] /* exclude internal headers */) { + continue; + } + + /* @todo do better here, since iterating over the headers in ATS is known to be less efficient, + * come up with a better way if include headers set is non-empty */ + bool include = + (!includeHeaders.empty() && includeHeaders.end() != includeHeaders.find(lowercaseName)); /* requested to be included */ + bool exclude = + (!excludeHeaders.empty() && excludeHeaders.end() != excludeHeaders.find(lowercaseName)); /* requested to be excluded */ + + if ((includeHeaders.empty() && exclude) || (!includeHeaders.empty() && (!include || exclude))) { +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << "ignore header: " << String(name, nameLen) << std::endl; +#endif + continue; + } + } + + size_t trimValueLen = 0; + const char *trimValue = trimWhiteSpaces(value, valueLen, trimValueLen); + + signedHeadersSet.insert(lowercaseName); + headersMap[lowercaseName] = String(trimValue, trimValueLen); + } + + for (const auto &it : signedHeadersSet) { + sha256Update(&canonicalRequestSha256Ctx, it); + sha256Update(&canonicalRequestSha256Ctx, ":"); + sha256Update(&canonicalRequestSha256Ctx, headersMap[it]); + sha256Update(&canonicalRequestSha256Ctx, "\n"); + } + sha256Update(&canonicalRequestSha256Ctx, "\n"); + + for (const auto &it : signedHeadersSet) { + if (!signedHeaders.empty()) { + signedHeaders.append(";"); + } + signedHeaders.append(it); + } + + sha256Update(&canonicalRequestSha256Ctx, signedHeaders); + sha256Update(&canonicalRequestSha256Ctx, "\n"); + + /* Hex(SHA256Hash() (no new-line char at end) + * @TODO support non-empty content, i.e. POST */ + String payloadSha256Hash = getPayloadSha256(signPayload); + sha256Update(&canonicalRequestSha256Ctx, payloadSha256Hash); + + /* Hex(SHA256Hash()) */ + sha256Final(canonicalRequestSha256Hash, &canonicalRequestSha256Ctx); +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << "" << std::endl; +#endif + return base16Encode((char *)canonicalRequestSha256Hash, SHA256_DIGEST_LENGTH); +} + +/** + * @brief Default AWS entry-point host name to region based on (S3): + * + * @see http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * it is used to get the region programmatically w/o configuration + * parameters and can (meant to) be overwritten if necessary. + */ +const StringMap +createDefaultRegionMap() +{ + StringMap m; + /* us-east-2 */ + m["s3.us-east-2.amazonaws.com"] = "us-east-2"; + m["s3-us-east-2.amazonaws.com"] = "us-east-2"; + m["s3.dualstack.us-east-2.amazonaws.com"] = "us-east-2"; + /* "us-east-1" */ + m["s3.amazonaws.com"] = "us-east-1"; + m["s3-external-1.amazonaws.com"] = "us-east-1"; + m["s3.dualstack.us-east-1.amazonaws.com"] = "us-east-1"; + /* us-west-1 */ + m["s3-us-west-1.amazonaws.com"] = "us-west-1"; + m["s3.dualstack.us-west-1.amazonaws.com"] = "us-west-1"; + /* us-west-2 */ + m["s3-us-west-2.amazonaws.com"] = "us-west-2"; + m["s3.dualstack.us-west-2.amazonaws.com"] = "us-west-2"; + /* ca-central-1 */ + m["s3.ca-central-1.amazonaws.com"] = "ca-central-1"; + m["s3-ca-central-1.amazonaws.com"] = "ca-central-1"; + m["s3.dualstack.ca-central-1.amazonaws.com"] = "ca-central-1"; + /* ap-south-1 */ + m["s3.ap-south-1.amazonaws.com"] = "ap-south-1"; + m["s3-ap-south-1.amazonaws.com"] = "ap-south-1"; + m["s3.dualstack.ap-south-1.amazonaws.com"] = "ap-south-1"; + /* ap-northeast-2 */ + m["s3.ap-northeast-2.amazonaws.com"] = "ap-northeast-2"; + m["s3-ap-northeast-2.amazonaws.com"] = "ap-northeast-2"; + m["s3.dualstack.ap-northeast-2.amazonaws.com"] = "ap-northeast-2"; + /* ap-southeast-1 */ + m["s3-ap-southeast-1.amazonaws.com"] = "ap-southeast-1"; + m["s3.dualstack.ap-southeast-1.amazonaws.com"] = "ap-southeast-1"; + /* ap-southeast-2 */ + m["s3-ap-southeast-2.amazonaws.com"] = "ap-southeast-2"; + m["s3.dualstack.ap-southeast-2.amazonaws.com"] = "ap-southeast-2"; + /* ap-northeast-1 */ + m["s3-ap-northeast-1.amazonaws.com"] = "ap-northeast-1"; + m["s3.dualstack.ap-northeast-1.amazonaws.com"] = "ap-northeast-1"; + /* eu-central-1 */ + m["s3.eu-central-1.amazonaws.com"] = "eu-central-1"; + m["s3-eu-central-1.amazonaws.com"] = "eu-central-1"; + m["s3.dualstack.eu-central-1.amazonaws.com"] = "eu-central-1"; + /* eu-west-1 */ + m["s3-eu-west-1.amazonaws.com"] = "eu-central-1"; + m["s3.dualstack.eu-west-1.amazonaws.com"] = "eu-central-1"; + /* eu-west-2 */ + m["s3.eu-west-2.amazonaws.com"] = "eu-west-2"; + m["s3-eu-west-2.amazonaws.com"] = "eu-west-2"; + m["s3.dualstack.eu-west-2.amazonaws.com"] = "eu-west-2"; + /* sa-east-1 */ + m["s3-sa-east-1.amazonaws.com"] = "sa-east-1"; + m["s3.dualstack.sa-east-1.amazonaws.com"] = "sa-east-1"; + /* default "us-east-1" * */ + m[""] = "us-east-1"; + return m; +} +const StringMap defaultDefaultRegionMap = createDefaultRegionMap(); + +/** + * @description default list of headers to be excluded from the signing + */ +const StringSet +createDefaultExcludeHeaders() +{ + StringSet m; + /* exclude headers that are meant to be changed */ + m.insert("x-forwarded-for"); + m.insert("via"); + return m; +} +const StringSet defaultExcludeHeaders = createDefaultExcludeHeaders(); + +/** + * @description default list of headers to be included in the signing + */ +const StringSet +createDefaultIncludeHeaders() +{ + StringSet m; + return m; +} +const StringSet defaultIncludeHeaders = createDefaultIncludeHeaders(); + +/** + * @brief Get AWS (S3) region from the entry-point + * + * @see Implementation based on the following: + * http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html + * http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + * + * @param regionMap map containing entry-point to region mapping + * @param entryPoint entry-point name + * @param entryPointLen - entry point string length + */ +String +getRegion(const StringMap ®ionMap, const char *entryPoint, size_t entryPointLen) +{ + String region; + size_t dot = String::npos; + String hostname(entryPoint, entryPointLen); + + /* Start looking for a match from the top-level domain backwards to keep the mapping generic + * (so we can override it if we need later) */ + do { + String name; + dot = hostname.rfind('.', dot - 1); + if (String::npos != dot) { + name = hostname.substr(dot + 1); + } else { + name = hostname; + } + if (regionMap.end() != regionMap.find(name)) { + region = regionMap.at(name); + break; + } + } while (String::npos != dot); + + if (region.empty() && regionMap.end() != regionMap.find("")) { + region = regionMap.at(""); /* default region if nothing matches */ + } + + return region; +} + +/** + * @brief Constructs the string to sign + * + * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + + * @param entryPoint entry-point name + * @param entryPointLen entry-point name length + * @param dateTime - ISO 8601 time + * @param dateTimeLen - ISO 8601 time length + * @param region AWS region name + * @param region AWS region name length + * @param service service name + * @param serviceLen service name length + * @param sha256Hash canonical request SHA 256 hash + * @param sha256HashLen canonical request SHA 256 hash length + * @returns the string to sign + */ +String +getStringToSign(const char *entryPoint, size_t EntryPointLen, const char *dateTime, size_t dateTimeLen, const char *region, + size_t regionLen, const char *service, size_t serviceLen, const char *sha256Hash, size_t sha256HashLen) +{ + String stringToSign; + + /* AWS4-HMAC-SHA256\n (hard-coded, other values? */ + stringToSign.append("AWS4-HMAC-SHA256\n"); + + /* time stamp in ISO8601 format: \n */ + stringToSign.append(dateTime, dateTimeLen); + stringToSign.append("\n"); + + /* Scope: date.Format() + "/" + + "/" + + "/aws4_request" */ + stringToSign.append(dateTime, 8); /* Get only the YYYYMMDD */ + stringToSign.append("/"); + stringToSign.append(region, regionLen); + stringToSign.append("/"); + stringToSign.append(service, serviceLen); + stringToSign.append("/aws4_request\n"); + stringToSign.append(sha256Hash, sha256HashLen); + + return stringToSign; +} + +/** + * @brief Calculates the final signature based on the following parameters and base16 encodes it. + * + * signing key = HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4" + "", ), + * ), ),"aws4_request") + * + * @see AWS spec: http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html + * + * @param awsSecret AWS secret + * @param awsSecretLen AWS secret length + * @param awsRegion AWS region + * @param awsRegionLen AWS region length + * @param awsService AWS Service name + * @param awsServiceLen AWS service name length + * @param dateTime ISO8601 date/time + * @param dateTimeLen ISO8601 date/time length + * @param stringToSign string to sign + * @param stringToSignLen length of the string to sign + * @param base16Signature output buffer where the base16 signature will be stored + * @param base16SignatureLen size of the signature buffer = EVP_MAX_MD_SIZE (at least) + * + * @return number of characters written to the output buffer + */ +size_t +getSignature(const char *awsSecret, size_t awsSecretLen, const char *awsRegion, size_t awsRegionLen, const char *awsService, + size_t awsServiceLen, const char *dateTime, size_t dateTimeLen, const char *stringToSign, size_t stringToSignLen, + char *signature, size_t signatureLen) +{ + unsigned int dateKeyLen = EVP_MAX_MD_SIZE; + unsigned char dateKey[EVP_MAX_MD_SIZE]; + unsigned int dateRegionKeyLen = EVP_MAX_MD_SIZE; + unsigned char dateRegionKey[EVP_MAX_MD_SIZE]; + unsigned int dateRegionServiceKeyLen = EVP_MAX_MD_SIZE; + unsigned char dateRegionServiceKey[EVP_MAX_MD_SIZE]; + unsigned int signingKeyLen = EVP_MAX_MD_SIZE; + unsigned char signingKey[EVP_MAX_MD_SIZE]; + + size_t keyLen = 4 + awsSecretLen; + char key[keyLen]; + memcpy(key, "AWS4", 4); + memcpy(key + 4, awsSecret, awsSecretLen); + + unsigned int len = signatureLen; + if (HMAC(EVP_sha256(), key, keyLen, (unsigned char *)dateTime, dateTimeLen, dateKey, &dateKeyLen) && + HMAC(EVP_sha256(), dateKey, dateKeyLen, (unsigned char *)awsRegion, awsRegionLen, dateRegionKey, &dateRegionKeyLen) && + HMAC(EVP_sha256(), dateRegionKey, dateRegionKeyLen, (unsigned char *)awsService, awsServiceLen, dateRegionServiceKey, + &dateRegionServiceKeyLen) && + HMAC(EVP_sha256(), dateRegionServiceKey, dateRegionServiceKeyLen, (unsigned char *)"aws4_request", 12, signingKey, + &signingKeyLen) && + HMAC(EVP_sha256(), signingKey, signingKeyLen, (unsigned char *)stringToSign, stringToSignLen, (unsigned char *)signature, + &len)) { + return len; + } + + return 0; +} + +/** + * @brief formats the time stamp in ISO8601 format: + */ +size_t +getIso8601Time(time_t *now, char *dateTime, size_t dateTimeLen) +{ + struct tm tm; + return strftime(dateTime, dateTimeLen, "%Y%m%dT%H%M%SZ", gmtime_r(now, &tm)); +} + +/** + * @brief formats the time stamp in ISO8601 format: + */ +const char * +AwsAuthV4::getDateTime(size_t *dateTimeLen) +{ + *dateTimeLen = sizeof(_dateTime) - 1; + return _dateTime; +} + +/** + * @brief: HTTP content / payload SHA 256 = Hex(SHA256Hash() + * @return signature of the content or "UNSIGNED-PAYLOAD" to mark that the payload is not signed + */ +String +AwsAuthV4::getPayloadHash() +{ + return getPayloadSha256(_signPayload); +} + +/** + * @brief Get the value of the Authorization header (AWS authorization) v4 + * @return the Authorization header value + */ +String +AwsAuthV4::getAuthorizationHeader() +{ + String signedHeaders; + String canonicalReq = getCanonicalRequestSha256Hash(_api, _signPayload, _includedHeaders, _excludedHeaders, signedHeaders); + + int hostLen = 0; + const char *host = _api.getHost(&hostLen); + + String awsRegion = getRegion(_regionMap, host, hostLen); + + String stringToSign = getStringToSign(host, hostLen, _dateTime, sizeof(_dateTime) - 1, awsRegion.c_str(), awsRegion.length(), + _awsService, _awsServiceLen, canonicalReq.c_str(), canonicalReq.length()); +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << "" << stringToSign << "" << std::endl; +#endif + + char signature[EVP_MAX_MD_SIZE]; + size_t signatureLen = + getSignature(_awsSecretAccessKey, _awsSecretAccessKeyLen, awsRegion.c_str(), awsRegion.length(), _awsService, _awsServiceLen, + _dateTime, 8, stringToSign.c_str(), stringToSign.length(), signature, EVP_MAX_MD_SIZE); + + String base16Signature = base16Encode(signature, signatureLen); +#ifdef AWS_AUTH_V4_DETAILED_DEBUG_OUTPUT + std::cout << "" << base16Signature << "" << std::endl; +#endif + + std::stringstream authorizationHeader; + authorizationHeader << "AWS4-HMAC-SHA256 "; + authorizationHeader << "Credential=" << String(_awsAccessKeyId, _awsAccessKeyIdLen) << "/" << String(_dateTime, 8) << "/" + << awsRegion << "/" << String(_awsService, _awsServiceLen) << "/" + << "aws4_request" + << ","; + authorizationHeader << "SignedHeaders=" << signedHeaders << ","; + authorizationHeader << "Signature=" << base16Signature; + + return authorizationHeader.str(); +} + +/** + * @brief Authorization v4 constructor + * + * @param api wrapper providing access to HTTP request elements (URI host, path, query, headers, etc.) + * @param now current time-stamp + * @param signPayload defines if the HTTP content / payload needs to be signed + * @param awsAccessKeyId AWS access key ID + * @param awsAccessKeyIdLen AWS access key ID length + * @param awsSecretAccessKey AWS secret + * @param awsSecretAccessKeyLen AWS secret length + * @param awsService AWS Service name + * @param awsServiceLen AWS service name length + * @param includeHeaders set of headers to be signed + * @param excludeHeaders set of headers not to be signed + * @param regionMap entry-point to AWS region mapping + */ +AwsAuthV4::AwsAuthV4(TsInterface &api, time_t *now, bool signPayload, const char *awsAccessKeyId, size_t awsAccessKeyIdLen, + const char *awsSecretAccessKey, size_t awsSecretAccessKeyLen, const char *awsService, size_t awsServiceLen, + const StringSet &includedHeaders, const StringSet &excludedHeaders, const StringMap ®ionMap) + : _api(api), + _signPayload(signPayload), + _awsAccessKeyId(awsAccessKeyId), + _awsAccessKeyIdLen(awsAccessKeyIdLen), + _awsSecretAccessKey(awsSecretAccessKey), + _awsSecretAccessKeyLen(awsSecretAccessKeyLen), + _awsService(awsService), + _awsServiceLen(awsServiceLen), + _includedHeaders(includedHeaders.empty() ? defaultIncludeHeaders : includedHeaders), + _excludedHeaders(excludedHeaders.empty() ? defaultExcludeHeaders : excludedHeaders), + _regionMap(regionMap.empty() ? defaultDefaultRegionMap : regionMap) +{ + getIso8601Time(now, _dateTime, sizeof(_dateTime)); +} diff --git a/plugins/s3_auth/aws_auth_v4.h b/plugins/s3_auth/aws_auth_v4.h new file mode 100644 index 00000000000..1959ddf818a --- /dev/null +++ b/plugins/s3_auth/aws_auth_v4.h @@ -0,0 +1,207 @@ +/* + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/** + * @file aws_auth_v4.h + * @brief AWS Auth v4 signing utility. + * @see aws_auth_v4.cc + */ + +#ifndef PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_ +#define PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_ + +#include /* transform() */ +#include /* soze_t */ +#include /* std::string */ +#include /* std::stringstream */ +#include /* std::map */ +#include /* std::set */ + +#include + +typedef std::string String; +typedef std::set StringSet; +typedef std::map StringMap; + +class HeaderIterator; + +class TsInterface +{ +public: + virtual ~TsInterface(){}; + virtual const char *getMethod(int *length) = 0; + virtual const char *getHost(int *length) = 0; + virtual const char *getPath(int *length) = 0; + virtual const char *getQuery(int *length) = 0; + virtual HeaderIterator headerBegin() = 0; + virtual HeaderIterator headerEnd() = 0; +}; + +/* Define a header iterator to be used in the plugin using ATS API */ +class HeaderIterator +{ +public: + HeaderIterator() : _bufp(nullptr), _hdrs(TS_NULL_MLOC), _field(TS_NULL_MLOC) {} + HeaderIterator(TSMBuffer bufp, TSMLoc hdrs, TSMLoc field) : _bufp(bufp), _hdrs(hdrs), _field(field) {} + HeaderIterator(const HeaderIterator &it) + { + _bufp = it._bufp; + _hdrs = it._hdrs; + _field = it._field; + } + ~HeaderIterator() {} + HeaderIterator & + operator=(HeaderIterator &it) + { + _bufp = it._bufp; + _hdrs = it._hdrs; + _field = it._field; + return *this; + } + HeaderIterator &operator++() + { + /* @todo this is said to be slow in the API call comments, do something better here */ + TSMLoc next = TSMimeHdrFieldNext(_bufp, _hdrs, _field); + TSHandleMLocRelease(_bufp, _hdrs, _field); + _field = next; + return *this; + } + HeaderIterator operator++(int) + { + HeaderIterator tmp(*this); + operator++(); + return tmp; + } + bool + operator!=(const HeaderIterator &it) + { + return _bufp != it._bufp || _hdrs != it._hdrs || _field != it._field; + } + bool + operator==(const HeaderIterator &it) + { + return _bufp == it._bufp && _hdrs == it._hdrs && _field == it._field; + } + const char * + getName(int *len) + { + return TSMimeHdrFieldNameGet(_bufp, _hdrs, _field, len); + } + const char * + getValue(int *len) + { + return TSMimeHdrFieldValueStringGet(_bufp, _hdrs, _field, -1, len); + } + TSMBuffer _bufp; + TSMLoc _hdrs; + TSMLoc _field; +}; + +/* Define a API to be used in the plugin using ATS API */ +class TsApi : public TsInterface +{ +public: + TsApi(TSMBuffer bufp, TSMLoc hdrs, TSMLoc url) : _bufp(bufp), _hdrs(hdrs), _url(url) {} + ~TsApi() {} + const char * + getMethod(int *len) + { + return TSHttpHdrMethodGet(_bufp, _hdrs, len); + } + const char * + getHost(int *len) + { + return TSHttpHdrHostGet(_bufp, _hdrs, len); + } + const char * + getPath(int *len) + { + return TSUrlPathGet(_bufp, _url, len); + } + const char * + getQuery(int *len) + { + return TSUrlHttpQueryGet(_bufp, _url, len); + } + HeaderIterator + headerBegin() + { + return HeaderIterator(_bufp, _hdrs, TSMimeHdrFieldGet(_bufp, _hdrs, 0)); + } + HeaderIterator + headerEnd() + { + return HeaderIterator(_bufp, _hdrs, TS_NULL_MLOC); + } + TSMBuffer _bufp; + TSMLoc _hdrs; + TSMLoc _url; +}; + +/* S3 auth v4 utility API */ + +static const String X_AMZ_CONTENT_SHA256 = "x-amz-content-sha256"; +static const String X_AMX_DATE = "x-amz-date"; +static const String X_AMZ = "x-amz-"; +static const String CONTENT_TYPE = "content-type"; +static const String HOST = "host"; + +String trimWhiteSpaces(const String &s); + +template +void +commaSeparateString(ContainerType &ss, const String &input, bool trim = true, bool lowerCase = true) +{ + std::istringstream istr(input); + String token; + + while (std::getline(istr, token, ',')) { + token = trim ? trimWhiteSpaces(token) : token; + if (lowerCase) { + std::transform(token.begin(), token.end(), token.begin(), ::tolower); + } + ss.insert(ss.end(), token); + } +} + +class AwsAuthV4 +{ +public: + AwsAuthV4(TsInterface &api, time_t *now, bool signPayload, const char *awsAccessKeyId, size_t awsAccessKeyIdLen, + const char *awsSecretAccessKey, size_t awsSecretAccessKeyLen, const char *awsService, size_t awsServiceLen, + const StringSet &includedHeaders, const StringSet &excludedHeaders, const StringMap ®ionMap); + const char *getDateTime(size_t *dateTimeLen); + String getPayloadHash(); + String getAuthorizationHeader(); + +private: + TsInterface &_api; + char _dateTime[sizeof "20170428T010203Z"]; + bool _signPayload = false; + const char *_awsAccessKeyId = nullptr; + size_t _awsAccessKeyIdLen = 0; + const char *_awsSecretAccessKey = nullptr; + size_t _awsSecretAccessKeyLen = 0; + const char *_awsService = nullptr; + size_t _awsServiceLen = 0; + + const StringSet &_includedHeaders; + const StringSet &_excludedHeaders; + const StringMap &_regionMap; +}; +#endif /* PLUGINS_S3_AUTH_AWS_AUTH_V4_CC_ */ diff --git a/plugins/s3_auth/s3_auth.cc b/plugins/s3_auth/s3_auth.cc index 033b13af04d..57f6d5b1f8f 100644 --- a/plugins/s3_auth/s3_auth.cc +++ b/plugins/s3_auth/s3_auth.cc @@ -20,19 +20,29 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include -#include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include + +#include /* std::ifstream */ +#include +#include #include #include #include #include +#include + +// Special snowflake here, only availbale when building inside the ATS source tree. +#include "ts/ink_atomic.h" +#include "aws_auth_v4.h" /////////////////////////////////////////////////////////////////////////////// // Some constants. @@ -40,6 +50,109 @@ static const char PLUGIN_NAME[] = "s3_auth"; static const char DATE_FMT[] = "%a, %d %b %Y %H:%M:%S %z"; +/** + * @brief Rebase a relative path onto the configuration directory. + */ +static String +makeConfigPath(const String &path) +{ + if (path.empty() || path[0] == '/') { + return path; + } + + return String(TSConfigDirGet()) + "/" + path; +} + +/** + * @brief a helper function which loads the entry-point to region from files. + * @param args classname + filename in ':' format. + * @return true if successful, false otherwise. + */ +static bool +loadRegionMap(StringMap &m, const String &filename) +{ + static const char *EXPECTED_FORMAT = ":"; + + String path(makeConfigPath(filename)); + + std::ifstream ifstr; + String line; + unsigned lineno = 0; + + ifstr.open(path.c_str()); + if (!ifstr) { + TSError("[%s] failed to load s3-region map from '%s'", PLUGIN_NAME, path.c_str()); + return false; + } + + TSDebug(PLUGIN_NAME, "loading region mapping from '%s'", path.c_str()); + + m[""] = ""; /* set a default just in case if the user does not specify it */ + + while (std::getline(ifstr, line)) { + String::size_type pos; + + ++lineno; + + // Allow #-prefixed comments. + pos = line.find_first_of('#'); + if (pos != String::npos) { + line.resize(pos); + } + + if (line.empty()) { + continue; + } + + std::size_t d = line.find(':'); + if (String::npos == d) { + TSError("[%s] failed to parse region map string '%s', expected format: '%s'", PLUGIN_NAME, line.c_str(), EXPECTED_FORMAT); + return false; + } + + String entrypoint(trimWhiteSpaces(String(line, 0, d))); + String region(trimWhiteSpaces(String(line, d + 1, String::npos))); + + if (region.empty()) { + TSDebug(PLUGIN_NAME, " in '%s' cannot be empty (skipped), expected format: '%s'", line.c_str(), EXPECTED_FORMAT); + continue; + } + + if (entrypoint.empty()) { + TSDebug(PLUGIN_NAME, "added default region %s", region.c_str()); + } else { + TSDebug(PLUGIN_NAME, "added entry-point:%s, region:%s", entrypoint.c_str(), region.c_str()); + } + + m[entrypoint] = region; + } + + if (m.at("").empty()) { + TSDebug(PLUGIN_NAME, "default region was not defined"); + } + + ifstr.close(); + return true; +} + +/////////////////////////////////////////////////////////////////////////////// +// Cache for the secrets file, to avoid reading / loding them repeatedly on +// a reload of remap.config. This gets cached for 60s (not configurable). +// +class S3Config; + +class ConfigCache +{ +public: + S3Config *get(const char *fname); + +private: + std::unordered_map> _cache; + static const int _ttl = 60; +}; + +ConfigCache gConfCache; + /////////////////////////////////////////////////////////////////////////////// // One configuration setup // @@ -48,10 +161,12 @@ int event_handler(TSCont, TSEvent, void *); // Forward declaration class S3Config { public: - S3Config() : _secret(nullptr), _secret_len(0), _keyid(nullptr), _keyid_len(0), _virt_host(false), _version(2), _cont(nullptr) + S3Config(bool get_cont = true) { - _cont = TSContCreate(event_handler, nullptr); - TSContDataSet(_cont, static_cast(this)); + if (get_cont) { + _cont = TSContCreate(event_handler, nullptr); + TSContDataSet(_cont, static_cast(this)); + } } ~S3Config() @@ -59,14 +174,95 @@ class S3Config _secret_len = _keyid_len = 0; TSfree(_secret); TSfree(_keyid); - TSContDestroy(_cont); + if (_cont) { + TSContDestroy(_cont); + } } // Is this configuration usable? bool valid() const { - return _secret && (_secret_len > 0) && _keyid && (_keyid_len > 0) && (2 == _version); + /* Check mandatory parameters first */ + if (!_secret || !(_secret_len > 0) || !_keyid || !(_keyid_len > 0) || (2 != _version && 4 != _version)) { + return false; + } + + /* Optional parameters, issue warning if v2 parameters are used with v4 and vice-versa (wrong parameters are ignored anyways) */ + if (2 == _version) { + if (_v4includeHeaders_modified && !_v4includeHeaders.empty()) { + TSError("[%s] headers are not being signed with AWS auth v2, included headers parameter ignored", PLUGIN_NAME); + } + if (_v4excludeHeaders_modified && !_v4excludeHeaders.empty()) { + TSError("[%s] headers are not being signed with AWS auth v2, excluded headers parameter ignored", PLUGIN_NAME); + } + if (_region_map_modified && !_region_map.empty()) { + TSError("[%s] region map is not used with AWS auth v2, parameter ignored", PLUGIN_NAME); + } + } else { + /* 4 == _version */ + if (_virt_host_modified) { + TSError("[%s] virtual host not used with AWS auth v4, parameter ignored", PLUGIN_NAME); + } + } + return true; + } + + void + acquire() + { + ink_atomic_increment(&_ref_count, 1); + } + + void + release() + { + TSDebug(PLUGIN_NAME, "ref_count is %d", _ref_count); + if (1 >= ink_atomic_decrement(&_ref_count, 1)) { + TSDebug(PLUGIN_NAME, "configuration deleted, due to ref-counting"); + delete this; + } + } + + // Used to copy relevant configurations that can be configured in a config file. Note: we intentionally + // don't override/use the assignment operator, since we only copy things IF they have been modified. + void + copy_changes_from(const S3Config *src) + { + if (src->_secret) { + _secret = TSstrdup(src->_secret); + _secret_len = src->_secret_len; + } + + if (src->_keyid) { + _keyid = TSstrdup(src->_keyid); + _keyid_len = src->_keyid_len; + } + + if (src->_version_modified) { + _version = src->_version; + _version_modified = true; + } + + if (src->_virt_host_modified) { + _virt_host = src->_virt_host; + _virt_host_modified = true; + } + + if (src->_v4includeHeaders_modified) { + _v4includeHeaders = src->_v4includeHeaders; + _v4includeHeaders_modified = true; + } + + if (src->_v4excludeHeaders_modified) { + _v4excludeHeaders = src->_v4excludeHeaders; + _v4excludeHeaders_modified = true; + } + + if (src->_region_map_modified) { + _region_map = src->_region_map; + _region_map_modified = true; + } } // Getters @@ -75,27 +271,55 @@ class S3Config { return _virt_host; } + const char * secret() const { return _secret; } + const char * keyid() const { return _keyid; } + int secret_len() const { return _secret_len; } + int keyid_len() const { return _keyid_len; } + int + version() const + { + return _version; + } + + const StringSet & + v4includeHeaders() + { + return _v4includeHeaders; + } + + const StringSet & + v4excludeHeaders() + { + return _v4excludeHeaders; + } + + const StringMap & + v4RegionMap() + { + return _region_map; + } + // Setters void set_secret(const char *s) @@ -114,16 +338,43 @@ class S3Config void set_virt_host(bool f = true) { - _virt_host = f; + _virt_host = f; + _virt_host_modified = true; } void set_version(const char *s) { - _version = strtol(s, nullptr, 10); + _version = strtol(s, nullptr, 10); + _version_modified = true; + } + + void + set_include_headers(const char *s) + { + ::commaSeparateString(_v4includeHeaders, s); + _v4includeHeaders_modified = true; + } + + void + set_exclude_headers(const char *s) + { + ::commaSeparateString(_v4excludeHeaders, s); + _v4excludeHeaders_modified = true; + + /* Exclude headers that are meant to be changed */ + _v4excludeHeaders.insert("x-forwarded-for"); + _v4excludeHeaders.insert("via"); + } + + void + set_region_map(const char *s) + { + loadRegionMap(_region_map, s); + _region_map_modified = true; } // Parse configs from an external file - bool parse_config(const char *config); + bool parse_config(const std::string &filename); // This should be called from the remap plugin, to setup the TXN hook for // SEND_REQUEST_HDR, such that we always attach the appropriate S3 auth. @@ -131,37 +382,40 @@ class S3Config schedule(TSHttpTxn txnp) const { TSHttpTxnHookAdd(txnp, TS_HTTP_SEND_REQUEST_HDR_HOOK, _cont); + TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, _cont); // To release the config lease } private: - char *_secret; - size_t _secret_len; - char *_keyid; - size_t _keyid_len; - bool _virt_host; - int _version; - TSCont _cont; + char *_secret = nullptr; + size_t _secret_len = 0; + char *_keyid = nullptr; + size_t _keyid_len = 0; + bool _virt_host = false; + int _version = 2; + bool _version_modified = false; + bool _virt_host_modified = false; + TSCont _cont = nullptr; + volatile int _ref_count = 1; + StringSet _v4includeHeaders; + bool _v4includeHeaders_modified = false; + StringSet _v4excludeHeaders; + bool _v4excludeHeaders_modified = false; + StringMap _region_map; + bool _region_map_modified = false; }; bool -S3Config::parse_config(const char *config) +S3Config::parse_config(const std::string &config_fname) { - if (!config) { + if (0 == config_fname.size()) { TSError("[%s] called without a config file, this is broken", PLUGIN_NAME); return false; } else { - char filename[PATH_MAX + 1]; - - if (*config != '/') { - snprintf(filename, sizeof(filename) - 1, "%s/%s", TSConfigDirGet(), config); - config = filename; - } - char line[512]; // These are long lines ... - FILE *file = fopen(config, "r"); + FILE *file = fopen(config_fname.c_str(), "r"); if (nullptr == file) { - TSError("[%s] unable to open %s", PLUGIN_NAME, config); + TSError("[%s] unable to open %s", PLUGIN_NAME, config_fname.c_str()); return false; } @@ -196,6 +450,12 @@ S3Config::parse_config(const char *config) set_version(pos2 + 8); } else if (0 == strncasecmp(pos2, "virtual_host", 12)) { set_virt_host(); + } else if (0 == strncasecmp(pos2, "v4-include-headers=", 19)) { + set_include_headers(pos2 + 19); + } else if (0 == strncasecmp(pos2, "v4-exclude-headers=", 19)) { + set_exclude_headers(pos2 + 19); + } else if (0 == strncasecmp(pos2, "v4-region-map=", 14)) { + set_region_map(pos2 + 14); } else { // ToDo: warnings? } @@ -207,6 +467,63 @@ S3Config::parse_config(const char *config) return true; } +/////////////////////////////////////////////////////////////////////////////// +// Implementation for the ConfigCache, it has to go here since we have a sort +// of circular dependency. Note that we always parse / get the configuration +// for the file, either from cache or by making one. The user of this just +// has to copy the relevant portions, but should not use the returned object +// directly (i.e. it must be copied). +// +S3Config * +ConfigCache::get(const char *fname) +{ + struct timeval tv; + + gettimeofday(&tv, nullptr); + + // Make sure the filename is an absolute path, prepending the config dir if needed + std::string config_fname = makeConfigPath(fname); + + auto it = _cache.find(config_fname); + + if (it != _cache.end()) { + if (tv.tv_sec > (it->second.second + _ttl)) { + // Update the cached configuration file. + S3Config *s3 = new S3Config(false); // false == this config does not get the continuation + + TSDebug(PLUGIN_NAME, "Configuration from %s is stale, reloading", config_fname.c_str()); + it->second.second = tv.tv_sec; + it->second.first->release(); + if (s3->parse_config(config_fname)) { + it->second.first = s3; + } else { + // Failed the configuration parse... Set the cache response to nullptr + s3->release(); + it->second.first = nullptr; + } + } else { + TSDebug(PLUGIN_NAME, "Configuration from %s is fresh, reusing", config_fname.c_str()); + } + return it->second.first; + } else { + // Create a new cached file. + S3Config *s3 = new S3Config(false); // false == this config does not get the continuation + + if (s3->parse_config(config_fname)) { + _cache[config_fname] = std::make_pair(s3, tv.tv_sec); + TSDebug(PLUGIN_NAME, "Parsing and caching configuration from %s, version:%d", config_fname.c_str(), s3->version()); + } else { + s3->release(); + return nullptr; + } + + return s3; + } + + TSAssert(!"Configuration parsing / caching failed"); + return nullptr; +} + /////////////////////////////////////////////////////////////////////////////// // This class is used to perform the S3 auth generation. // @@ -233,6 +550,8 @@ class S3Request return true; } + TSHttpStatus authorizeV2(S3Config *s3); + TSHttpStatus authorizeV4(S3Config *s3); TSHttpStatus authorize(S3Config *s3); bool set_header(const char *header, int header_len, const char *val, int val_len); @@ -305,6 +624,55 @@ str_concat(char *dst, size_t dst_len, const char *src, size_t src_len) return to_copy; } +TSHttpStatus +S3Request::authorize(S3Config *s3) +{ + TSHttpStatus status = TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + switch (s3->version()) { + case 2: + status = authorizeV2(s3); + break; + case 4: + status = authorizeV4(s3); + break; + default: + break; + } + return status; +} + +TSHttpStatus +S3Request::authorizeV4(S3Config *s3) +{ + TsApi api(_bufp, _hdr_loc, _url_loc); + time_t now = time(nullptr); + + AwsAuthV4 util(api, &now, /* signPayload */ false, s3->keyid(), s3->keyid_len(), s3->secret(), s3->secret_len(), "s3", 2, + s3->v4includeHeaders(), s3->v4excludeHeaders(), s3->v4RegionMap()); + String payloadHash = util.getPayloadHash(); + if (!set_header(X_AMZ_CONTENT_SHA256.c_str(), X_AMZ_CONTENT_SHA256.length(), payloadHash.c_str(), payloadHash.length())) { + return TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + } + + /* set x-amz-date header */ + size_t dateTimeLen = 0; + const char *dateTime = util.getDateTime(&dateTimeLen); + if (!set_header(X_AMX_DATE.c_str(), X_AMX_DATE.length(), dateTime, dateTimeLen)) { + return TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + } + + String auth = util.getAuthorizationHeader(); + if (auth.empty()) { + return TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + } + + if (!set_header(TS_MIME_FIELD_AUTHORIZATION, TS_MIME_LEN_AUTHORIZATION, auth.c_str(), auth.length())) { + return TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + } + + return TS_HTTP_STATUS_OK; +} + // Method to authorize the S3 request: // // StringToSign = HTTP-VERB + "\n" + @@ -323,7 +691,7 @@ str_concat(char *dst, size_t dst_len, const char *src, size_t src_len) // Note: This assumes that the URI path has been appropriately canonicalized by remapping // TSHttpStatus -S3Request::authorize(S3Config *s3) +S3Request::authorizeV2(S3Config *s3) { TSHttpStatus status = TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; TSMLoc host_loc = TS_NULL_MLOC, md5_loc = TS_NULL_MLOC, contype_loc = TS_NULL_MLOC; @@ -417,7 +785,7 @@ S3Request::authorize(S3Config *s3) } // Produce the SHA1 MAC digest -#if OPENSSL_VERSION_NUMBER < 0x10100000L +#ifndef HAVE_HMAC_CTX_NEW HMAC_CTX ctx[1]; #else HMAC_CTX *ctx; @@ -427,7 +795,7 @@ S3Request::authorize(S3Config *s3) unsigned char hmac[SHA_DIGEST_LENGTH]; char hmac_b64[SHA_DIGEST_LENGTH * 2]; -#if OPENSSL_VERSION_NUMBER < 0x10100000L +#ifndef HAVE_HMAC_CTX_NEW HMAC_CTX_init(ctx); #else ctx = HMAC_CTX_new(); @@ -454,7 +822,7 @@ S3Request::authorize(S3Config *s3) } HMAC_Final(ctx, hmac, &hmac_len); -#if OPENSSL_VERSION_NUMBER < 0x10100000L +#ifndef HAVE_HMAC_CTX_NEW HMAC_CTX_cleanup(ctx); #else HMAC_CTX_free(ctx); @@ -482,25 +850,39 @@ S3Request::authorize(S3Config *s3) /////////////////////////////////////////////////////////////////////////////// // This is the main continuation. int -event_handler(TSCont cont, TSEvent /* event */, void *edata) +event_handler(TSCont cont, TSEvent event, void *edata) { TSHttpTxn txnp = static_cast(edata); + S3Config *s3 = static_cast(TSContDataGet(cont)); + S3Request request(txnp); - TSHttpStatus status = TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + TSHttpStatus status = TS_HTTP_STATUS_INTERNAL_SERVER_ERROR; + TSEvent enable_event = TS_EVENT_HTTP_CONTINUE; - if (request.initialize()) { - status = request.authorize(static_cast(TSContDataGet(cont))); - } + switch (event) { + case TS_EVENT_HTTP_SEND_REQUEST_HDR: + if (request.initialize()) { + status = request.authorize(s3); + } - if (TS_HTTP_STATUS_OK == status) { - TSDebug(PLUGIN_NAME, "Succesfully signed the AWS S3 URL"); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); - } else { - TSDebug(PLUGIN_NAME, "Failed to sign the AWS S3 URL, status = %d", status); - TSHttpTxnSetHttpRetStatus(txnp, status); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_ERROR); + if (TS_HTTP_STATUS_OK == status) { + TSDebug(PLUGIN_NAME, "Succesfully signed the AWS S3 URL"); + } else { + TSDebug(PLUGIN_NAME, "Failed to sign the AWS S3 URL, status = %d", status); + TSHttpTxnSetHttpRetStatus(txnp, status); + enable_event = TS_EVENT_HTTP_ERROR; + } + break; + case TS_EVENT_HTTP_TXN_CLOSE: + s3->release(); // Release the configuration lease when txn closes + break; + default: + TSError("[%s] Unknown event for this plugin", PLUGIN_NAME); + TSDebug(PLUGIN_NAME, "unknown event for this plugin"); + break; } + TSHttpTxnReenable(txnp, enable_event); return 0; } @@ -537,10 +919,14 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE {const_cast("secret_key"), required_argument, nullptr, 's'}, {const_cast("version"), required_argument, nullptr, 'v'}, {const_cast("virtual_host"), no_argument, nullptr, 'h'}, + {const_cast("v4-include-headers"), required_argument, nullptr, 'i'}, + {const_cast("v4-exclude-headers"), required_argument, nullptr, 'e'}, + {const_cast("v4-region-map"), required_argument, nullptr, 'm'}, {nullptr, no_argument, nullptr, '\0'}, }; - S3Config *s3 = new S3Config(); + S3Config *s3 = new S3Config(true); // true == this config gets the continuation + S3Config *file_config = nullptr; // argv contains the "to" and "from" URLs. Skip the first so that the // second one poses as the program name. @@ -552,9 +938,15 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE switch (opt) { case 'c': - s3->parse_config(optarg); + file_config = gConfCache.get(optarg); // Get cached, or new, config object, from a file + if (!file_config) { + TSError("[%s] invalid configuration file, %s", PLUGIN_NAME, optarg); + *ih = nullptr; + s3->release(); + return TS_ERROR; + } break; - case 'k': + case 'a': s3->set_keyid(optarg); break; case 's': @@ -566,6 +958,15 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE case 'v': s3->set_version(optarg); break; + case 'i': + s3->set_include_headers(optarg); + break; + case 'e': + s3->set_exclude_headers(optarg); + break; + case 'm': + s3->set_region_map(optarg); + break; } if (opt == -1) { @@ -573,17 +974,23 @@ TSRemapNewInstance(int argc, char *argv[], void **ih, char * /* errbuf ATS_UNUSE } } + // Copy the config file secret into our instance of the configuration. + if (file_config) { + s3->copy_changes_from(file_config); + } + // Make sure we got both the shared secret and the AWS secret if (!s3->valid()) { TSError("[%s] requires both shared and AWS secret configuration", PLUGIN_NAME); - delete s3; + s3->release(); *ih = nullptr; return TS_ERROR; } + // Note that we don't acquire() the s3 config, it's implicit that we hold at least one ref *ih = static_cast(s3); - TSDebug(PLUGIN_NAME, "New rule: secret_key=%s, access_key=%s, virtual_host=%s", s3->secret(), s3->keyid(), - s3->virt_host() ? "yes" : "no"); + TSDebug(PLUGIN_NAME, "New rule: secret_key=%s, access_key=%s, virtual_host=%s, version=%d", s3->secret(), s3->keyid(), + s3->virt_host() ? "yes" : "no", s3->version()); return TS_SUCCESS; } @@ -593,7 +1000,7 @@ TSRemapDeleteInstance(void *ih) { S3Config *s3 = static_cast(ih); - delete s3; + s3->release(); } /////////////////////////////////////////////////////////////////////////////// @@ -606,6 +1013,7 @@ TSRemapDoRemap(void *ih, TSHttpTxn txnp, TSRemapRequestInfo * /* rri */) if (s3) { TSAssert(s3->valid()); + s3->acquire(); // Increasement ref-count // Now schedule the continuation to update the URL when going to origin. // Note that in most cases, this is a No-Op, assuming you have reasonable // cache hit ratio. However, the scheduling is next to free (very cheap). diff --git a/plugins/tcpinfo/tcpinfo.cc b/plugins/tcpinfo/tcpinfo.cc index 013a41154dc..b5e0a12c86b 100644 --- a/plugins/tcpinfo/tcpinfo.cc +++ b/plugins/tcpinfo/tcpinfo.cc @@ -21,8 +21,8 @@ limitations under the License. */ -#include -#include +#include +#include #include #include #include @@ -31,9 +31,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include @@ -104,13 +104,13 @@ log_tcp_info(Config *config, const char *event_name, TSHttpSsn ssnp) socklen_t tcp_info_len = sizeof(info); int fd; - TSReleaseAssert(config->log != NULL); + TSReleaseAssert(config->log != nullptr); - if (ssnp != NULL && (TSHttpSsnClientFdGet(ssnp, &fd) != TS_SUCCESS || fd <= 0)) { + if (ssnp != nullptr && (TSHttpSsnClientFdGet(ssnp, &fd) != TS_SUCCESS || fd <= 0)) { TSDebug("tcpinfo", "error getting the client socket fd from ssn"); return; } - if (ssnp == NULL) { + if (ssnp == nullptr) { TSDebug("tcpinfo", "ssn is not specified"); return; } @@ -123,7 +123,7 @@ log_tcp_info(Config *config, const char *event_name, TSHttpSsn ssnp) client_addr.sa = TSHttpSsnClientAddrGet(ssnp); server_addr.sa = TSHttpSsnIncomingAddrGet(ssnp); - if (client_addr.sa == NULL || server_addr.sa == NULL) { + if (client_addr.sa == nullptr || server_addr.sa == nullptr) { return; } diff --git a/plugins/xdebug/xdebug.cc b/plugins/xdebug/xdebug.cc index 001041c448c..c5a48e8893e 100644 --- a/plugins/xdebug/xdebug.cc +++ b/plugins/xdebug/xdebug.cc @@ -16,10 +16,10 @@ * limitations under the License. */ -#include -#include +#include +#include #include -#include +#include #include #include "ts/ts.h" diff --git a/proxy/AbstractBuffer.cc b/proxy/AbstractBuffer.cc index cbcf824dc0a..6b3ba838e4b 100644 --- a/proxy/AbstractBuffer.cc +++ b/proxy/AbstractBuffer.cc @@ -22,8 +22,8 @@ */ #include "ts/ink_config.h" -#include -#include +#include +#include #include "AbstractBuffer.h" /* #include "CacheAtomic.h" */ #include "ts/ink_align.h" diff --git a/proxy/CacheControl.cc b/proxy/CacheControl.cc index 947aabbe703..7962ff70639 100644 --- a/proxy/CacheControl.cc +++ b/proxy/CacheControl.cc @@ -68,7 +68,7 @@ CC_delete_table() // a timeout // struct CC_FreerContinuation; -typedef int (CC_FreerContinuation::*CC_FreerContHandler)(int, void *); +using CC_FreerContHandler = int (CC_FreerContinuation::*)(int, void *); struct CC_FreerContinuation : public Continuation { CC_table *p; int diff --git a/proxy/ControlBase.cc b/proxy/ControlBase.cc index 7f93f4f46ee..16df04aa1ac 100644 --- a/proxy/ControlBase.cc +++ b/proxy/ControlBase.cc @@ -74,10 +74,10 @@ struct TimeMod : public ControlBase::Modifier { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; - virtual void print(FILE *f) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; + void print(FILE *f) const override; static TimeMod *make(char *value, const char **error); static const char *timeOfDayToSeconds(const char *time_str, time_t *seconds); }; @@ -180,9 +180,9 @@ struct PortMod : public ControlBase::Modifier { static const char *const NAME; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; - virtual void print(FILE *f) const; + const char *name() const override; + bool check(HttpRequestData *req) const override; + void print(FILE *f) const override; static PortMod *make(char *value, const char **error); }; @@ -244,9 +244,9 @@ struct IPortMod : public ControlBase::Modifier { IPortMod(int port); - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; - virtual void print(FILE *f) const; + const char *name() const override; + bool check(HttpRequestData *req) const override; + void print(FILE *f) const override; static IPortMod *make(char *value, const char **error); }; @@ -292,10 +292,10 @@ struct SrcIPMod : public ControlBase::Modifier { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; - virtual void print(FILE *f) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; + void print(FILE *f) const override; static SrcIPMod *make(char *value, const char **error); }; @@ -343,10 +343,10 @@ struct SchemeMod : public ControlBase::Modifier { SchemeMod(int scheme); - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; - virtual void print(FILE *f) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; + void print(FILE *f) const override; const char *getWksText() const; @@ -405,10 +405,10 @@ struct TextMod : public ControlBase::Modifier { ts::Buffer text; TextMod(); - ~TextMod(); + ~TextMod() override; // Calls name() which the subclass must provide. - virtual void print(FILE *f) const; + void print(FILE *f) const override; // Copy the given NUL-terminated string to the text buffer. void set(const char *value); @@ -438,13 +438,13 @@ TextMod::set(const char *value) struct MultiTextMod : public ControlBase::Modifier { Vec text_vec; MultiTextMod(); - ~MultiTextMod(); + ~MultiTextMod() override; // Copy the value to the MultiTextMod buffer. void set(char *value); // Calls name() which the subclass must provide. - virtual void print(FILE *f) const; + void print(FILE *f) const override; }; MultiTextMod::MultiTextMod() @@ -479,9 +479,9 @@ MultiTextMod::set(char *value) struct MethodMod : public TextMod { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; static MethodMod *make(char *value, const char **error); }; @@ -515,9 +515,9 @@ MethodMod::make(char *value, const char **) struct PrefixMod : public TextMod { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; static PrefixMod *make(char *value, const char **error); }; @@ -563,9 +563,9 @@ PrefixMod::make(char *value, const char ** /* error ATS_UNUSED */) struct SuffixMod : public MultiTextMod { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; static SuffixMod *make(char *value, const char **error); }; const char *const SuffixMod::NAME = "Suffix"; @@ -611,9 +611,9 @@ SuffixMod::make(char *value, const char ** /* error ATS_UNUSED */) struct TagMod : public TextMod { static const char *const NAME; - virtual Type type() const; - virtual const char *name() const; - virtual bool check(HttpRequestData *req) const; + Type type() const override; + const char *name() const override; + bool check(HttpRequestData *req) const override; static TagMod *make(char *value, const char **error); }; const char *const TagMod::NAME = "Tag"; @@ -645,23 +645,23 @@ struct InternalMod : public ControlBase::Modifier { bool flag; static const char *const NAME; - virtual Type - type() const + Type + type() const override { return MOD_INTERNAL; } - virtual const char * - name() const + const char * + name() const override { return NAME; } - virtual bool - check(HttpRequestData *req) const + bool + check(HttpRequestData *req) const override { return req->internal_txn == flag; } - virtual void - print(FILE *f) const + void + print(FILE *f) const override { fprintf(f, "%s=%s ", this->name(), flag ? "true" : "false"); } diff --git a/proxy/ControlMatcher.cc b/proxy/ControlMatcher.cc index dba507b1fee..5908c5ecb2e 100644 --- a/proxy/ControlMatcher.cc +++ b/proxy/ControlMatcher.cc @@ -219,7 +219,7 @@ HostMatcher::NewEntry(matcher_line *line_info) error = cur_d->Init(line_info); if (error) { // There was a problem so undo the effects this function - memset(cur_d, 0, sizeof(Data)); + new (cur_d) Data(); // reconstruct } else { // Fill in the matching info host_lookup->NewEntry(match_data, (line_info->type == MATCH_DOMAIN) ? true : false, cur_d); diff --git a/proxy/CoreUtils.cc b/proxy/CoreUtils.cc index 95bd9e45733..10711382752 100644 --- a/proxy/CoreUtils.cc +++ b/proxy/CoreUtils.cc @@ -103,7 +103,6 @@ int program_counter = 0; #include #include #include -#include #include "ts/ink_platform.h" #include "CoreUtils.h" #endif /* darwin || freebsd || solaris */ @@ -112,6 +111,7 @@ int program_counter = 0; #include "http/HttpSM.h" #include +#include bool inTable; FILE *fp; @@ -307,7 +307,7 @@ CoreUtils::get_next_frame(core_stack_state *coress) if ((frameoff = (void **)ats_malloc(sizeof(long)))) { if (fread(frameoff, 4, 1, fp) == 1) { coress->framep = (intptr_t)*frameoff; - if (*frameoff == NULL) { + if (*frameoff == nullptr) { ats_free(frameoff); return 0; } diff --git a/proxy/CoreUtils.h b/proxy/CoreUtils.h index cd7e0dae3d5..dd6ed9389db 100644 --- a/proxy/CoreUtils.h +++ b/proxy/CoreUtils.h @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include "ts/DynArray.h" @@ -62,7 +61,6 @@ struct core_stack_state { #include #include #include -#include #include #define NO_OF_ARGS \ diff --git a/proxy/EventName.cc b/proxy/EventName.cc index b5e5412d938..341b44bb910 100644 --- a/proxy/EventName.cc +++ b/proxy/EventName.cc @@ -22,8 +22,8 @@ */ #include "ts/ink_config.h" -#include -#include +#include +#include #include "P_EventSystem.h" // #include "I_Disk.h" unused diff --git a/proxy/FetchSM.cc b/proxy/FetchSM.cc index 7b7332a5236..d0dfcace2f9 100644 --- a/proxy/FetchSM.cc +++ b/proxy/FetchSM.cc @@ -23,7 +23,7 @@ #include "ts/ink_config.h" #include "FetchSM.h" -#include +#include #include "HTTP.h" #include "PluginVC.h" @@ -467,8 +467,8 @@ FetchSM::process_fetch_read(int event) } if (header_done == 0 && ((fetch_flags & TS_FETCH_FLAGS_STREAM) || callback_options == AFTER_HEADER)) { - if (client_response_hdr.parse_resp(&http_parser, resp_reader, &bytes_used, 0) == PARSE_RESULT_DONE) { - header_done = 1; + if (client_response_hdr.parse_resp(&http_parser, resp_reader, &bytes_used, false) == PARSE_RESULT_DONE) { + header_done = true; if (fetch_flags & TS_FETCH_FLAGS_STREAM) { return InvokePluginExt(); } else { diff --git a/proxy/ICP.cc b/proxy/ICP.cc index 1a1b04ffbd6..0fec8e12472 100644 --- a/proxy/ICP.cc +++ b/proxy/ICP.cc @@ -144,10 +144,10 @@ HTTPHdr gclient_request; //**************************************************************************** // VC++ 5.0 is rather picky -typedef int (ICPPeerReadCont::*ICPPeerReadContHandler)(int, void *); -typedef int (ICPPeriodicCont::*ICPPeriodicContHandler)(int, void *); -typedef int (ICPHandlerCont::*ICPHandlerContHandler)(int, void *); -typedef int (ICPRequestCont::*ICPRequestContHandler)(int, void *); +using ICPPeerReadContHandler = int (ICPPeerReadCont::*)(int, void *); +using ICPPeriodicContHandler = int (ICPPeriodicCont::*)(int, void *); +using ICPHandlerContHandler = int (ICPHandlerCont::*)(int, void *); +using ICPRequestContHandler = int (ICPRequestCont::*)(int, void *); // Plugin freshness function PluginFreshnessCalcFunc pluginFreshnessCalcFunc = (PluginFreshnessCalcFunc) nullptr; @@ -519,7 +519,7 @@ ICPPeerReadCont::PeerReadStateMachine(PeerReadData *s, Event *e) return EVENT_CONT; // try again later } - while (1) { // loop forever + while (true) { // loop forever switch (s->_next_state) { case READ_ACTIVE: { @@ -1128,7 +1128,7 @@ ICPRequestCont::ICPStateMachine(int event, void *d) ICPConfiguration *ICPcf = _ICPpr->GetConfig(); ip_port_text_buffer ipb; - while (1) { // loop forever + while (true) { // loop forever switch (_next_state) { case ICP_START: { @@ -2182,7 +2182,7 @@ ICPProcessor::ReconfigureStateMachine(ReconfigState_t s, int gconfig_changed, in ink_assert(_ICPConfig->HaveLock()); int reconfig_status; - while (1) { + while (true) { switch (s) { case RC_RECONFIG: { if (!Lock()) { diff --git a/proxy/ICPConfig.cc b/proxy/ICPConfig.cc index 95487210526..a511089f6e4 100644 --- a/proxy/ICPConfig.cc +++ b/proxy/ICPConfig.cc @@ -397,7 +397,7 @@ ICPConfigUpdateCont::RetryICPconfigUpdate(int /* event ATS_UNUSED */, Event * /* // Class ICPConfiguration member functions // Overall manager of ICP configuration data from TS configuration. //-------------------------------------------------------------------------- -typedef int (ICPConfigUpdateCont::*ICPCfgContHandler)(int, void *); +using ICPCfgContHandler = int (ICPConfigUpdateCont::*)(int, void *); ICPConfiguration::ICPConfiguration() : _icp_config_callouts(0) { //********************************************************* @@ -1188,7 +1188,7 @@ MultiCastPeer::FindMultiCastChild(IpAddr const &addr, uint16_t port) // Class PeriodicCont member functions (abstract base class) // Look for TS ICP configuration changes by periodically looking. //------------------------------------------------------------------------- -typedef int (ICPPeriodicCont::*ICPPeriodicContHandler)(int, void *); +using ICPPeriodicContHandler = int (ICPPeriodicCont::*)(int, void *); PeriodicCont::PeriodicCont(ICPProcessor *icpP) : Continuation(nullptr), _ICPpr(icpP) { mutex = new_ProxyMutex(); diff --git a/proxy/IPAllow.cc b/proxy/IPAllow.cc index bece005b0cf..b5edc17e34f 100644 --- a/proxy/IPAllow.cc +++ b/proxy/IPAllow.cc @@ -113,13 +113,13 @@ IpAllow::PrintMap(IpMap *map) { std::ostringstream s; s << map->getCount() << " ACL entries."; - for (IpMap::iterator spot(map->begin()), limit(map->end()); spot != limit; ++spot) { + for (auto &spot : *map) { char text[INET6_ADDRSTRLEN]; - AclRecord const *ar = static_cast(spot->data()); + AclRecord const *ar = static_cast(spot.data()); - s << std::endl << " Line " << ar->_src_line << ": " << ats_ip_ntop(spot->min(), text, sizeof text); - if (0 != ats_ip_addr_cmp(spot->min(), spot->max())) { - s << " - " << ats_ip_ntop(spot->max(), text, sizeof text); + s << std::endl << " Line " << ar->_src_line << ": " << ats_ip_ntop(spot.min(), text, sizeof text); + if (0 != ats_ip_addr_cmp(spot.min(), spot.max())) { + s << " - " << ats_ip_ntop(spot.max(), text, sizeof text); } s << " method="; uint32_t mask = AclRecord::ALL_METHOD_MASK & ar->_method_mask; @@ -143,12 +143,11 @@ IpAllow::PrintMap(IpMap *map) if (!ar->_nonstandard_methods.empty()) { s << " nonstandard method="; bool leader = false; // need leading vbar? - for (AclRecord::MethodSet::iterator iter = ar->_nonstandard_methods.begin(), end = ar->_nonstandard_methods.end(); - iter != end; ++iter) { + for (const auto &_nonstandard_method : ar->_nonstandard_methods) { if (leader) { s << '|'; } - s << *iter; + s << _nonstandard_method; leader = true; } } diff --git a/proxy/InkAPI.cc b/proxy/InkAPI.cc index 6cc2770ad72..e937e769cff 100644 --- a/proxy/InkAPI.cc +++ b/proxy/InkAPI.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include "ts/ink_platform.h" #include "ts/ink_base64.h" @@ -39,6 +39,7 @@ #include "HttpSM.h" #include "HttpConfig.h" #include "P_Net.h" +#include "P_SSLNextProtocolAccept.h" #include "P_UDPNet.h" #include "P_HostDB.h" #include "P_Cache.h" @@ -60,6 +61,7 @@ #include "I_RecCore.h" #include "I_Machine.h" #include "HttpProxyServerMain.h" +#include /**************************************************************** * IMPORTANT - READ ME @@ -2123,8 +2125,8 @@ TSUrlStringGet(TSMBuffer bufp, TSMLoc obj, int *length) return url_string_get(url_impl, nullptr, length, nullptr); } -typedef const char *(URL::*URLPartGetF)(int *length); -typedef void (URL::*URLPartSetF)(const char *value, int length); +using URLPartGetF = const char *(URL::*)(int *); +using URLPartSetF = void (URL::*)(const char *, int); static const char * URLPartGet(TSMBuffer bufp, TSMLoc obj, int *length, URLPartGetF url_f) @@ -2384,9 +2386,12 @@ TSStringPercentDecode(const char *str, size_t str_len, char *dst, size_t dst_siz // TODO: We should check for "failures" here? unescape_str(buffer, buffer + dst_size, src, src + str_len, s); - *buffer = '\0'; + + size_t data_written = std::min(buffer - dst, dst_size - 1); + *(dst + data_written) = '\0'; + if (length) { - *length = (buffer - dst); + *length = (data_written); } return TS_SUCCESS; @@ -3042,7 +3047,7 @@ TSMimeHdrFieldNext(TSMBuffer bufp, TSMLoc hdr, TSMLoc field) return TS_NULL_MLOC; } - while (1) { + while (true) { ++slotnum; MIMEField *f = mime_hdr_field_get_slotnum(handle->mh, slotnum); @@ -3161,7 +3166,7 @@ TSMimeHdrFieldValuesClear(TSMBuffer bufp, TSMLoc hdr, TSMLoc field) * An empty string is also considered to be a token. The correct value of * the field after this function should be NULL. */ - mime_field_value_set(heap, handle->mh, handle->field_ptr, nullptr, 0, 1); + mime_field_value_set(heap, handle->mh, handle->field_ptr, nullptr, 0, true); return TS_SUCCESS; } @@ -4612,7 +4617,16 @@ TSHttpTxnHookAdd(TSHttpTxn txnp, TSHttpHookID id, TSCont contp) sdk_assert(sdk_sanity_check_continuation(contp) == TS_SUCCESS); sdk_assert(sdk_sanity_check_hook_id(id) == TS_SUCCESS); - HttpSM *sm = (HttpSM *)txnp; + HttpSM *sm = (HttpSM *)txnp; + APIHook *hook = sm->txn_hook_get(id); + + // Traverse list of hooks and add a particular hook only once + while (hook != nullptr) { + if (hook->m_cont == (INKContInternal *)contp) { + return; + } + hook = hook->m_link.next; + } sm->txn_hook_append(id, (INKContInternal *)contp); } @@ -4703,6 +4717,8 @@ TSHttpTxnPristineUrlGet(TSHttpTxn txnp, TSMBuffer *bufp, TSMLoc *url_loc) } // Shortcut to just get the URL. +// The caller is responsible to free memory that is allocated for the string +// that is returned. char * TSHttpTxnEffectiveUrlStringGet(TSHttpTxn txnp, int *length) { @@ -8088,7 +8104,6 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr ret = &overridableHttpConfig->body_factory_template_base; break; case TS_CONFIG_HTTP_CACHE_OPEN_WRITE_FAIL_ACTION: - typ = OVERRIDABLE_TYPE_INT; ret = &overridableHttpConfig->cache_open_write_fail_action; break; case TS_CONFIG_HTTP_ENABLE_REDIRECTION: @@ -8109,6 +8124,10 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr typ = OVERRIDABLE_TYPE_INT; ret = &overridableHttpConfig->attach_server_session_to_client; break; + case TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE: + typ = OVERRIDABLE_TYPE_INT; + ret = &overridableHttpConfig->safe_requests_retryable; + break; case TS_CONFIG_HTTP_ORIGIN_MAX_CONNECTIONS_QUEUE: typ = OVERRIDABLE_TYPE_INT; ret = &overridableHttpConfig->origin_max_connections_queue; @@ -8133,11 +8152,9 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr ret = &overridableHttpConfig->transaction_active_timeout_in; break; case TS_CONFIG_SRV_ENABLED: - typ = OVERRIDABLE_TYPE_INT; ret = &overridableHttpConfig->srv_enabled; break; case TS_CONFIG_HTTP_FORWARD_CONNECT_METHOD: - typ = OVERRIDABLE_TYPE_INT; ret = &overridableHttpConfig->forward_connect_method; break; case TS_CONFIG_SSL_CERT_FILENAME: @@ -8148,6 +8165,25 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr typ = OVERRIDABLE_TYPE_STRING; ret = &overridableHttpConfig->client_cert_filepath; break; + case TS_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB: + ret = &overridableHttpConfig->parent_failures_update_hostdb; + break; + case TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD: + typ = OVERRIDABLE_TYPE_INT; + ret = &overridableHttpConfig->parent_fail_threshold; + break; + case TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME: + typ = OVERRIDABLE_TYPE_INT; + ret = &overridableHttpConfig->parent_retry_time; + break; + case TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS: + typ = OVERRIDABLE_TYPE_INT; + ret = &overridableHttpConfig->per_parent_connect_attempts; + break; + case TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT: + typ = OVERRIDABLE_TYPE_INT; + ret = &overridableHttpConfig->parent_connect_timeout; + break; // This helps avoiding compiler warnings, yet detect unhandled enum members. case TS_CONFIG_NULL: case TS_CONFIG_LAST_ENTRY: @@ -8590,6 +8626,10 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf, cnf = TS_CONFIG_HTTP_ANONYMIZE_REMOVE_COOKIE; } else if (!strncmp(name, "proxy.config.http.request_header_max_size", length)) { cnf = TS_CONFIG_HTTP_REQUEST_HEADER_MAX_SIZE; + } else if (!strncmp(name, "proxy.config.http.safe_requests_retryable", length)) { + cnf = TS_CONFIG_HTTP_SAFE_REQUESTS_RETRYABLE; + } else if (!strncmp(name, "proxy.config.http.parent_proxy.retry_time", length)) { + cnf = TS_CONFIG_HTTP_PARENT_PROXY_RETRY_TIME; } break; case 'r': @@ -8684,6 +8724,8 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf, case 'd': if (!strncmp(name, "proxy.config.http.down_server.abort_threshold", length)) { cnf = TS_CONFIG_HTTP_DOWN_SERVER_ABORT_THRESHOLD; + } else if (!strncmp(name, "proxy.config.http.parent_proxy.fail_threshold", length)) { + cnf = TS_CONFIG_HTTP_PARENT_PROXY_FAIL_THRESHOLD; } break; case 'n': @@ -8765,6 +8807,11 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf, case 47: switch (name[length - 1]) { + case 'b': + if (!strncmp(name, "proxy.config.http.parent_proxy.mark_down_hostdb", length)) { + cnf = TS_CONFIG_PARENT_FAILURES_UPDATE_HOSTDB; + } + break; case 'd': if (!strncmp(name, "proxy.config.http.negative_revalidating_enabled", length)) { cnf = TS_CONFIG_HTTP_NEGATIVE_REVALIDATING_ENABLED; @@ -8886,9 +8933,17 @@ TSHttpTxnConfigFind(const char *name, int length, TSOverridableConfigKey *conf, } break; + case 55: + if (!strncmp(name, "proxy.config.http.parent_proxy.connect_attempts_timeout", length)) { + cnf = TS_CONFIG_HTTP_PARENT_CONNECT_ATTEMPT_TIMEOUT; + } + break; + case 58: if (!strncmp(name, "proxy.config.http.connect_attempts_max_retries_dead_server", length)) { cnf = TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER; + } else if (!strncmp(name, "proxy.config.http.parent_proxy.per_parent_connect_attempts", length)) { + cnf = TS_CONFIG_HTTP_PER_PARENT_CONNECT_ATTEMPTS; } break; } @@ -9162,6 +9217,65 @@ TSSslContextDestroy(TSSslContext ctx) SSLReleaseContext(reinterpret_cast(ctx)); } +void +TSRegisterProtocolSet(TSVConn sslp, TSNextProtocolSet ps) +{ + NetVConnection *vc = reinterpret_cast(sslp); + SSLNetVConnection *ssl_vc = dynamic_cast(vc); + if (ssl_vc) { + ssl_vc->registerNextProtocolSet(reinterpret_cast(ps)); + } +} + +TSNextProtocolSet +TSUnregisterProtocol(TSNextProtocolSet protoset, const char *protocol) +{ + SSLNextProtocolSet *snps = reinterpret_cast(protoset); + if (snps) { + snps->unregisterEndpoint(protocol, nullptr); + return reinterpret_cast(snps); + } + return nullptr; +} + +TSAcceptor +TSAcceptorGet(TSVConn sslp) +{ + NetVConnection *vc = reinterpret_cast(sslp); + SSLNetVConnection *ssl_vc = dynamic_cast(vc); + return ssl_vc ? reinterpret_cast(ssl_vc->accept_object) : nullptr; +} + +extern std::vector naVec; +TSAcceptor +TSAcceptorGetbyID(int ID) +{ + Debug("ssl", "getNetAccept in INK API.cc %p", naVec.at(ID)); + return reinterpret_cast(naVec.at(ID)); +} + +int +TSAcceptorIDGet(TSAcceptor acceptor) +{ + NetAccept *na = reinterpret_cast(acceptor); + return na ? na->id : -1; +} + +int +TSAcceptorCount() +{ + return naVec.size(); +} + +// clones the protoset associated with netAccept +TSNextProtocolSet +TSGetcloneProtoSet(TSAcceptor tna) +{ + NetAccept *na = reinterpret_cast(tna); + // clone protoset + return (na && na->snpa) ? reinterpret_cast(na->snpa->cloneProtoSet()) : nullptr; +} + tsapi int TSVConnIsSsl(TSVConn sslp) { @@ -9239,7 +9353,7 @@ TSUuidInitialize(TSUuid uuid, TSUuidVersion v) return u->valid() ? TS_SUCCESS : TS_ERROR; } -const TSUuid +TSUuid TSProcessUuidGet(void) { Machine *machine = Machine::instance(); @@ -9315,10 +9429,13 @@ TSHttpTxnClientProtocolStackGet(TSHttpTxn txnp, int n, const char **result, int { sdk_assert(sdk_sanity_check_txn(txnp) == TS_SUCCESS); sdk_assert(n == 0 || result != nullptr); - HttpSM *sm = (HttpSM *)txnp; + HttpSM *sm = reinterpret_cast(txnp); int count = 0; - if (sm) { - count = sm->populate_client_protocol(result, n); + if (sm && n > 0) { + auto mem = static_cast(alloca(sizeof(ts::StringView) * n)); + count = sm->populate_client_protocol(mem, n); + for (int i = 0; i < count; ++i) + result[i] = mem[i].ptr(); } if (actual) { *actual = count; @@ -9333,8 +9450,11 @@ TSHttpSsnClientProtocolStackGet(TSHttpSsn ssnp, int n, const char **result, int sdk_assert(n == 0 || result != nullptr); ProxyClientSession *cs = reinterpret_cast(ssnp); int count = 0; - if (cs) { - count = cs->populate_protocol(result, n); + if (cs && n > 0) { + auto mem = static_cast(alloca(sizeof(ts::StringView) * n)); + count = cs->populate_protocol(mem, n); + for (int i = 0; i < count; ++i) + result[i] = mem[i].ptr(); } if (actual) { *actual = count; @@ -9353,7 +9473,7 @@ TSHttpTxnClientProtocolStackContains(TSHttpTxn txnp, const char *tag) { sdk_assert(sdk_sanity_check_txn(txnp) == TS_SUCCESS); HttpSM *sm = (HttpSM *)txnp; - return sm->client_protocol_contains(tag); + return sm->client_protocol_contains(ts::StringView(tag)); } const char * @@ -9361,7 +9481,7 @@ TSHttpSsnClientProtocolStackContains(TSHttpSsn ssnp, const char *tag) { sdk_assert(sdk_sanity_check_http_ssn(ssnp) == TS_SUCCESS); ProxyClientSession *cs = reinterpret_cast(ssnp); - return cs->protocol_contains(tag); + return cs->protocol_contains(ts::StringView(tag)); } const char * diff --git a/proxy/InkAPIInternal.h b/proxy/InkAPIInternal.h index a3248b8aad6..5e364a9b7b4 100644 --- a/proxy/InkAPIInternal.h +++ b/proxy/InkAPIInternal.h @@ -285,6 +285,7 @@ typedef enum { TS_SSL_INTERNAL_FIRST_HOOK, TS_VCONN_PRE_ACCEPT_INTERNAL_HOOK = TS_SSL_INTERNAL_FIRST_HOOK, TS_SSL_CERT_INTERNAL_HOOK, + TS_SSL_SERVERNAME_INTERNAL_HOOK, TS_SSL_INTERNAL_LAST_HOOK } TSSslHookInternalID; diff --git a/proxy/InkAPITest.cc b/proxy/InkAPITest.cc index b00bd9543ab..df95f25d22d 100644 --- a/proxy/InkAPITest.cc +++ b/proxy/InkAPITest.cc @@ -32,13 +32,13 @@ #include "ts/ink_file.h" #include -#include +#include // extern int errno; #include #include -#include -#include +#include +#include #include "ts/Regression.h" #include "api/ts/ts.h" @@ -5439,7 +5439,7 @@ REGRESSION_TEST(SDK_API_TSMgmtGet)(RegressionTest *test, int /* atype ATS_UNUSED err = 1; } else if (strcmp(svalue, CONFIG_PARAM_STRING_VALUE) != 0) { SDK_RPRINT(test, "TSMgmtStringGet", "TestCase1.4", TC_FAIL, - "got incorrect value of param %s, should have been \"%s\", found \"%s\"", CONFIG_PARAM_STRING_NAME, + R"(got incorrect value of param %s, should have been "%s", found "%s")", CONFIG_PARAM_STRING_NAME, CONFIG_PARAM_STRING_VALUE, svalue); err = 1; } else { @@ -5550,7 +5550,8 @@ typedef enum { ORIG_TS_SSL_FIRST_HOOK, ORIG_TS_VCONN_PRE_ACCEPT_HOOK = ORIG_TS_SSL_FIRST_HOOK, ORIG_TS_SSL_SNI_HOOK, - ORIG_TS_SSL_LAST_HOOK = ORIG_TS_SSL_SNI_HOOK, + ORIG_TS_SSL_SERVERNAME_HOOK, + ORIG_TS_SSL_LAST_HOOK = TS_SSL_SERVERNAME_HOOK, ORIG_TS_HTTP_LAST_HOOK } ORIG_TSHttpHookID; @@ -5909,7 +5910,7 @@ ssn_handler(TSCont contp, TSEvent event, void *edata) SDK_RPRINT(data->test, "TSHttpTxnHookAdd", "TestCase2", TC_PASS, "ok"); data->test_passed_txn_hook_add++; txnp = (TSHttpTxn)edata; - if (1) { + if (true) { char *temp = TSstrdup(ERROR_BODY); TSHttpTxnErrorBodySet(txnp, temp, strlen(temp), nullptr); } @@ -6510,13 +6511,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnCache)(RegressionTest *test, int /* aty // TSHttpTxnUntransformedRespCache /////////////////////////////////////////////////////// -/** Append Transform Data Structure **/ -typedef struct { - TSVIO output_vio; - TSIOBuffer output_buffer; - TSIOBufferReader output_reader; - int append_needed; -} MyTransformData; /** Append Transform Data Structure Ends **/ typedef struct { @@ -6534,48 +6528,35 @@ typedef struct { bool test_passed_txn_untransformed_resp_cache; bool test_passed_transform_create; int req_no; - MyTransformData *transformData; - int magic; + uint32_t magic; } TransformTestData; +/** Append Transform Data Structure **/ +struct AppendTransformTestData { + TSVIO output_vio = nullptr; + TSIOBuffer output_buffer = nullptr; + TSIOBufferReader output_reader = nullptr; + TransformTestData *test_data = nullptr; + int append_needed = 1; + + ~AppendTransformTestData() + { + if (output_buffer) + TSIOBufferDestroy(output_buffer); + } +}; + /**** Append Transform Code (Tailored to needs)****/ static TSIOBuffer append_buffer; static TSIOBufferReader append_buffer_reader; static int64_t append_buffer_length; -static MyTransformData * -my_data_alloc() -{ - MyTransformData *data; - - data = (MyTransformData *)TSmalloc(sizeof(MyTransformData)); - data->output_vio = nullptr; - data->output_buffer = nullptr; - data->output_reader = nullptr; - data->append_needed = 1; - - return data; -} - -static void -my_data_destroy(MyTransformData *data) -{ - if (data) { - if (data->output_buffer) { - TSIOBufferDestroy(data->output_buffer); - } - TSfree(data); - } -} - static void handle_transform(TSCont contp) { TSVConn output_conn; TSVIO write_vio; - TransformTestData *contData; - MyTransformData *data; int64_t towrite; int64_t avail; @@ -6589,25 +6570,19 @@ handle_transform(TSCont contp) write_vio = TSVConnWriteVIOGet(contp); /* Get our data structure for this operation. The private data - structure contains the output VIO and output buffer. If the - private data structure pointer is NULL, then we'll create it - and initialize its internals. */ - contData = (TransformTestData *)TSContDataGet(contp); - data = contData->transformData; - if (!data) { + structure contains the output VIO and output buffer. + */ + auto *data = static_cast(TSContDataGet(contp)); + if (!data->output_buffer) { towrite = TSVIONBytesGet(write_vio); if (towrite != INT64_MAX) { towrite += append_buffer_length; } - contData->transformData = my_data_alloc(); - data = contData->transformData; - data->output_buffer = TSIOBufferCreate(); - data->output_reader = TSIOBufferReaderAlloc(data->output_buffer); - data->output_vio = TSVConnWrite(output_conn, contp, data->output_reader, towrite); - // Don't need this as the structure is encapsulated in another structure - // which is set to be Continuation's Data. - // TSContDataSet (contp, data); + data->output_buffer = TSIOBufferCreate(); + data->output_reader = TSIOBufferReaderAlloc(data->output_buffer); + data->output_vio = TSVConnWrite(output_conn, contp, data->output_reader, towrite); } + ink_assert(data->output_vio); /* We also check to see if the write VIO's buffer is non-NULL. A NULL buffer indicates that the write operation has been @@ -6690,16 +6665,15 @@ handle_transform(TSCont contp) static int transformtest_transform(TSCont contp, TSEvent event, void * /* edata ATS_UNUSED */) { - TransformTestData *contData = (TransformTestData *)TSContDataGet(contp); - if (contData->test_passed_transform_create == false) { - contData->test_passed_transform_create = true; - SDK_RPRINT(contData->test, "TSTransformCreate", "TestCase1", TC_PASS, "ok"); + auto *data = static_cast(TSContDataGet(contp)); + if (data->test_data->test_passed_transform_create == false) { + data->test_data->test_passed_transform_create = true; + SDK_RPRINT(data->test_data->test, "TSTransformCreate", "TestCase1", TC_PASS, "ok"); } /* Check to see if the transformation has been closed by a call to TSVConnClose. */ if (TSVConnClosedGet(contp)) { - my_data_destroy(contData->transformData); - contData->transformData = nullptr; + delete data; TSContDestroy(contp); return 0; } else { @@ -6763,14 +6737,16 @@ transformable(TSHttpTxn txnp, TransformTestData *data) } static void -transform_add(TSHttpTxn txnp, TransformTestData *data) +transform_add(TSHttpTxn txnp, TransformTestData *test_data) { TSVConn connp; + auto *data = new AppendTransformTestData; - connp = TSTransformCreate(transformtest_transform, txnp); + data->test_data = test_data; + connp = TSTransformCreate(transformtest_transform, txnp); TSContDataSet(connp, data); if (connp == nullptr) { - SDK_RPRINT(data->test, "TSHttpTxnTransform", "", TC_FAIL, "Unable to create Transformation."); + SDK_RPRINT(data->test_data->test, "TSHttpTxnTransform", "", TC_FAIL, "Unable to create Transformation."); return; } @@ -7033,7 +7009,6 @@ EXCLUSIVE_REGRESSION_TEST(SDK_API_HttpTxnTransform)(RegressionTest *test, int /* socktest->test_passed_txn_transformed_resp_cache = false; socktest->test_passed_txn_transformed_resp_cache = false; socktest->test_passed_transform_create = false; - socktest->transformData = nullptr; socktest->req_no = 1; socktest->magic = MAGIC_ALIVE; TSContDataSet(cont, socktest); @@ -7610,6 +7585,7 @@ const char *SDK_Overridable_Configs[TS_CONFIG_LAST_ENTRY] = { "proxy.config.http.cache.max_open_write_retries", "proxy.config.http.redirect_use_orig_cache_key", "proxy.config.http.attach_server_session_to_client", + "proxy.config.http.safe_requests_retryable", "proxy.config.http.origin_max_connections_queue", "proxy.config.websocket.no_activity_timeout", "proxy.config.websocket.active_timeout", @@ -7620,6 +7596,11 @@ const char *SDK_Overridable_Configs[TS_CONFIG_LAST_ENTRY] = { "proxy.config.http.forward_connect_method", "proxy.config.ssl.client.cert.filename", "proxy.config.ssl.client.cert.path", + "proxy.config.http.parent_proxy.mark_down_hostdb", + "proxy.config.http.parent_proxy.fail_threshold", + "proxy.config.http.parent_proxy.retry_time", + "proxy.config.http.parent_proxy.per_parent_connect_attempts", + "proxy.config.http.parent_proxy.connect_attempts_timeout", }; REGRESSION_TEST(SDK_API_OVERRIDABLE_CONFIGS)(RegressionTest *test, int /* atype ATS_UNUSED */, int *pstatus) @@ -7777,6 +7758,7 @@ REGRESSION_TEST(SDK_API_ENCODING)(RegressionTest *test, int /* atype ATS_UNUSED const char *url_base64 = "aHR0cDovL3d3dy5leGFtcGxlLmNvbS9mb28/ZmllPSAiIyU8PltdXF5ge31+JmJhcj17dGVzdH0mZnVtPUFwYWNoZSBUcmFmZmljIFNlcnZlcg=="; const char *url2 = "http://www.example.com/"; // No Percent encoding necessary + const char *url3 = "https://www.thisisoneexampleofastringoflengtheightyasciilowercasecharacters.com/"; char buf[1024]; size_t length; bool success = true; @@ -7822,10 +7804,33 @@ REGRESSION_TEST(SDK_API_ENCODING)(RegressionTest *test, int /* atype ATS_UNUSED success = false; } else { if (length != strlen(url2) || strcmp(buf, url2)) { - SDK_RPRINT(test, "TSStringPercentDecode", "TestCase1", TC_FAIL, "Failed on %s != %s", buf, url2); + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase2", TC_FAIL, "Failed on %s != %s", buf, url2); success = false; } else { - SDK_RPRINT(test, "TSStringPercentDecode", "TestCase1", TC_PASS, "ok"); + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase2", TC_PASS, "ok"); + } + } + + // test to verify TSStringPercentDecode does not write past the end of the + // buffer + const size_t buf_len = strlen(url3) + 1; // 81 + strncpy(buf, url3, buf_len - 1); + const char canary = 0xFF; + buf[buf_len - 1] = canary; + + const char *url3_clipped = "https://www.thisisoneexampleofastringoflengtheightyasciilowercasecharacters.com"; + if (TS_SUCCESS != TSStringPercentDecode(buf, buf_len - 1, buf, buf_len - 1, &length)) { + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase3", TC_FAIL, "Failed on %s", url3); + success = false; + } else { + if (memcmp(buf + buf_len - 1, &canary, 1)) { // Overwrite + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase3", TC_FAIL, "Failed on %s overwrites buffer", url3); + success = false; + } else if (length != strlen(url3_clipped) || strcmp(buf, url3_clipped)) { + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase3", TC_FAIL, "Failed on %s != %s", buf, url3_clipped); + success = false; + } else { + SDK_RPRINT(test, "TSStringPercentDecode", "TestCase3", TC_PASS, "ok"); } } diff --git a/proxy/Main.cc b/proxy/Main.cc index c4e859addee..b6ff339e0d9 100644 --- a/proxy/Main.cc +++ b/proxy/Main.cc @@ -213,7 +213,7 @@ static const ArgumentDescription argument_descriptions[] = { {"bind_stdout", '-', "Regular file to bind stdout to", "S512", &bind_stdout, "PROXY_BIND_STDOUT", nullptr}, {"bind_stderr", '-', "Regular file to bind stderr to", "S512", &bind_stderr, "PROXY_BIND_STDERR", nullptr}, #if defined(linux) - {"read_core", 'c', "Read Core file", "S255", &core_file, NULL, NULL}, + {"read_core", 'c', "Read Core file", "S255", &core_file, nullptr, nullptr}, #endif {"accept_mss", '-', "MSS for client connections", "I", &accept_mss, nullptr, nullptr}, @@ -289,7 +289,7 @@ class TrackerContinuation : public Continuation baseline_taken = 0; } - ~TrackerContinuation() { mutex = nullptr; } + ~TrackerContinuation() override { mutex = nullptr; } int periodic(int event, Event * /* e ATS_UNUSED */) { @@ -349,8 +349,12 @@ class DiagsLogContinuation : public Continuation class MemoryLimit : public Continuation { public: - MemoryLimit() : Continuation(new_ProxyMutex()), _memory_limit(0) { SET_HANDLER(&MemoryLimit::periodic); } - ~MemoryLimit() { mutex = NULL; } + MemoryLimit() : Continuation(new_ProxyMutex()), _memory_limit(0) + { + memset(&_usage, 0, sizeof(_usage)); + SET_HANDLER(&MemoryLimit::periodic); + } + ~MemoryLimit() override { mutex = nullptr; } int periodic(int event, Event *e) { @@ -455,8 +459,6 @@ proxy_signal_handler(int signo, siginfo_t *info, void *ctx) shutdown_event_system = true; sleep(1); - - ::exit(signo); } // @@ -508,7 +510,7 @@ check_lockfile() } static void -check_config_directories(void) +check_config_directories() { ats_scoped_str rundir(RecConfigReadRuntimeDir()); ats_scoped_str sysconfdir(RecConfigReadConfigDir()); @@ -1051,7 +1053,7 @@ init_core_size() } static void -adjust_sys_settings(void) +adjust_sys_settings() { struct rlimit lim; int fds_throttle = -1; @@ -1781,7 +1783,7 @@ main(int /* argc ATS_UNUSED */, const char **argv) eventProcessor.schedule_every(new DiagsLogContinuation, HRTIME_SECOND, ET_TASK); eventProcessor.schedule_every(new MemoryLimit, HRTIME_SECOND, ET_TASK); REC_RegisterConfigUpdateFunc("proxy.config.dump_mem_info_frequency", init_memory_tracker, nullptr); - init_memory_tracker(NULL, RECD_NULL, RecData(), nullptr); + init_memory_tracker(nullptr, RECD_NULL, RecData(), nullptr); // log initialization moved down diff --git a/proxy/ParentConsistentHash.cc b/proxy/ParentConsistentHash.cc index 8eedfe10b51..75ccb26d0ed 100644 --- a/proxy/ParentConsistentHash.cc +++ b/proxy/ParentConsistentHash.cc @@ -103,7 +103,8 @@ ParentConsistentHash::getPathHash(HttpRequestData *hrdata, ATSHash64 *h) } void -ParentConsistentHash::selectParent(const ParentSelectionPolicy *policy, bool first_call, ParentResult *result, RequestData *rdata) +ParentConsistentHash::selectParent(bool first_call, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, + unsigned int retry_time) { ATSHash64Sip24 hash; ATSConsistentHash *fhash; @@ -170,8 +171,8 @@ ParentConsistentHash::selectParent(const ParentSelectionPolicy *policy, bool fir do { if (pRec && !pRec->available) { Debug("parent_select", "Parent.failedAt = %u, retry = %u, xact_start = %u", (unsigned int)pRec->failedAt, - (unsigned int)policy->ParentRetryTime, (unsigned int)request_info->xact_start); - if ((pRec->failedAt + policy->ParentRetryTime) < request_info->xact_start) { + (unsigned int)retry_time, (unsigned int)request_info->xact_start); + if ((pRec->failedAt + retry_time) < request_info->xact_start) { parentRetry = true; // make sure that the proper state is recorded in the result structure result->last_parent = pRec->idx; @@ -243,7 +244,7 @@ ParentConsistentHash::selectParent(const ParentSelectionPolicy *policy, bool fir } void -ParentConsistentHash::markParentDown(const ParentSelectionPolicy *policy, ParentResult *result) +ParentConsistentHash::markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) { time_t now; pRecord *pRec; @@ -279,7 +280,7 @@ ParentConsistentHash::markParentDown(const ParentSelectionPolicy *policy, Parent // it relates to how long the parent has been down. now = time(nullptr); - // Mark the parent as down + // Mark the parent failure time. ink_atomic_swap(&pRec->failedAt, now); // If this is clean mark down and not a failed retry, we @@ -291,15 +292,25 @@ ParentConsistentHash::markParentDown(const ParentSelectionPolicy *policy, Parent Note("Parent %s marked as down %s:%d", (result->retry) ? "retry" : "initially", pRec->hostname, pRec->port); } else { - int old_count = ink_atomic_increment(&pRec->failCount, 1); + int old_count = 0; + now = time(nullptr); + + // if the last failure was outside the retry window, set the failcount to 1 + // and failedAt to now. + if ((pRec->failedAt + retry_time) < now) { + ink_atomic_swap(&pRec->failCount, 1); + ink_atomic_swap(&pRec->failedAt, now); + } else { + old_count = ink_atomic_increment(&pRec->failCount, 1); + } Debug("parent_select", "Parent fail count increased to %d for %s:%d", old_count + 1, pRec->hostname, pRec->port); new_fail_count = old_count + 1; } - if (new_fail_count > 0 && new_fail_count >= policy->FailThreshold) { - Note("Failure threshold met failcount:%d >= threshold:%d, http parent proxy %s:%d marked down", new_fail_count, - policy->FailThreshold, pRec->hostname, pRec->port); + if (new_fail_count > 0 && new_fail_count >= static_cast(fail_threshold)) { + Note("Failure threshold met failcount:%d >= threshold:%d, http parent proxy %s:%d marked down", new_fail_count, fail_threshold, + pRec->hostname, pRec->port); ink_atomic_swap(&pRec->available, false); Debug("parent_select", "Parent %s:%d marked unavailable, pRec->available=%d", pRec->hostname, pRec->port, pRec->available); } diff --git a/proxy/ParentConsistentHash.h b/proxy/ParentConsistentHash.h index 23b9c540ecd..567599fae04 100644 --- a/proxy/ParentConsistentHash.h +++ b/proxy/ParentConsistentHash.h @@ -53,8 +53,8 @@ class ParentConsistentHash : public ParentSelectionStrategy ParentConsistentHash(ParentRecord *_parent_record); ~ParentConsistentHash(); uint64_t getPathHash(HttpRequestData *hrdata, ATSHash64 *h); - void selectParent(const ParentSelectionPolicy *policy, bool firstCall, ParentResult *result, RequestData *rdata); - void markParentDown(const ParentSelectionPolicy *policy, ParentResult *result); + void selectParent(bool firstCall, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, unsigned int retry_time); + void markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time); uint32_t numParents(ParentResult *result) const; void markParentUp(ParentResult *result); }; diff --git a/proxy/ParentRoundRobin.cc b/proxy/ParentRoundRobin.cc index 66e5647a5bc..377427c7be9 100644 --- a/proxy/ParentRoundRobin.cc +++ b/proxy/ParentRoundRobin.cc @@ -54,7 +54,8 @@ ParentRoundRobin::~ParentRoundRobin() } void -ParentRoundRobin::selectParent(const ParentSelectionPolicy *policy, bool first_call, ParentResult *result, RequestData *rdata) +ParentRoundRobin::selectParent(bool first_call, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, + unsigned int retry_time) { Debug("parent_select", "In ParentRoundRobin::selectParent(): Using a round robin parent selection strategy."); int cur_index = 0; @@ -133,16 +134,16 @@ ParentRoundRobin::selectParent(const ParentSelectionPolicy *policy, bool first_c do { Debug("parent_select", "cur_index: %d, result->start_parent: %d", cur_index, result->start_parent); // DNS ParentOnly inhibits bypassing the parent so always return that t - if ((result->rec->parents[cur_index].failedAt == 0) || (result->rec->parents[cur_index].failCount < policy->FailThreshold)) { - Debug("parent_select", "FailThreshold = %d", policy->FailThreshold); + if ((result->rec->parents[cur_index].failedAt == 0) || + (result->rec->parents[cur_index].failCount < static_cast(fail_threshold))) { + Debug("parent_select", "FailThreshold = %d", fail_threshold); Debug("parent_select", "Selecting a parent due to little failCount (faileAt: %u failCount: %d)", (unsigned)result->rec->parents[cur_index].failedAt, result->rec->parents[cur_index].failCount); parentUp = true; } else { - if ((result->wrap_around) || - ((result->rec->parents[cur_index].failedAt + policy->ParentRetryTime) < request_info->xact_start)) { + if ((result->wrap_around) || ((result->rec->parents[cur_index].failedAt + retry_time) < request_info->xact_start)) { Debug("parent_select", "Parent[%d].failedAt = %u, retry = %u,xact_start = %" PRId64 " but wrap = %d", cur_index, - (unsigned)result->rec->parents[cur_index].failedAt, policy->ParentRetryTime, (int64_t)request_info->xact_start, + (unsigned)result->rec->parents[cur_index].failedAt, retry_time, (int64_t)request_info->xact_start, result->wrap_around); // Reuse the parent parentUp = true; @@ -185,7 +186,7 @@ ParentRoundRobin::numParents(ParentResult *result) const } void -ParentRoundRobin::markParentDown(const ParentSelectionPolicy *policy, ParentResult *result) +ParentRoundRobin::markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) { time_t now; pRecord *pRec; @@ -220,7 +221,7 @@ ParentRoundRobin::markParentDown(const ParentSelectionPolicy *policy, ParentResu // it relates to how long the parent has been down. now = time(nullptr); - // Mark the parent as down + // Mark the parent failure time. ink_atomic_swap(&pRec->failedAt, now); // If this is clean mark down and not a failed retry, we @@ -232,15 +233,25 @@ ParentRoundRobin::markParentDown(const ParentSelectionPolicy *policy, ParentResu Note("Parent %s marked as down %s:%d", (result->retry) ? "retry" : "initially", pRec->hostname, pRec->port); } else { - int old_count = ink_atomic_increment(&pRec->failCount, 1); + int old_count = 0; + now = time(nullptr); + + // if the last failure was outside the retry window, set the failcount to 1 + // and failedAt to now. + if ((pRec->failedAt + retry_time) < now) { + ink_atomic_swap(&pRec->failCount, 1); + ink_atomic_swap(&pRec->failedAt, now); + } else { + old_count = ink_atomic_increment(&pRec->failCount, 1); + } Debug("parent_select", "Parent fail count increased to %d for %s:%d", old_count + 1, pRec->hostname, pRec->port); new_fail_count = old_count + 1; } - if (new_fail_count > 0 && new_fail_count >= policy->FailThreshold) { - Note("Failure threshold met failcount:%d >= threshold:%d, http parent proxy %s:%d marked down", new_fail_count, - policy->FailThreshold, pRec->hostname, pRec->port); + if (new_fail_count > 0 && new_fail_count >= static_cast(fail_threshold)) { + Note("Failure threshold met failcount:%d >= threshold:%d, http parent proxy %s:%d marked down", new_fail_count, fail_threshold, + pRec->hostname, pRec->port); ink_atomic_swap(&pRec->available, false); Debug("parent_select", "Parent marked unavailable, pRec->available=%d", pRec->available); } diff --git a/proxy/ParentRoundRobin.h b/proxy/ParentRoundRobin.h index 818e26c0356..b7995ec594e 100644 --- a/proxy/ParentRoundRobin.h +++ b/proxy/ParentRoundRobin.h @@ -40,8 +40,8 @@ class ParentRoundRobin : public ParentSelectionStrategy public: ParentRoundRobin(ParentRecord *_parent_record, ParentRR_t _round_robin_type); ~ParentRoundRobin(); - void selectParent(const ParentSelectionPolicy *policy, bool firstCall, ParentResult *result, RequestData *rdata); - void markParentDown(const ParentSelectionPolicy *policy, ParentResult *result); + void selectParent(bool firstCall, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, unsigned int retry_time); + void markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time); uint32_t numParents(ParentResult *result) const; void markParentUp(ParentResult *result); }; diff --git a/proxy/ParentSelection.cc b/proxy/ParentSelection.cc index 3b08812c1a7..520f75472dd 100644 --- a/proxy/ParentSelection.cc +++ b/proxy/ParentSelection.cc @@ -96,7 +96,7 @@ ParentConfigParams::apiParentExists(HttpRequestData *rdata) } void -ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result) +ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) { P_table *tablePtr = parent_table; ParentRecord *defaultPtr = DefaultParent; @@ -125,6 +125,7 @@ ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result) result->last_parent = 0; Debug("parent_select", "Result for %s was API set parent %s:%d", rdata->get_host(), result->hostname, result->port); + return; } tablePtr->Match(rdata, result); @@ -144,7 +145,7 @@ ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result) } if (rec != extApiRecord) { - selectParent(true, result, rdata); + selectParent(true, result, rdata, fail_threshold, retry_time); } const char *host = rdata->get_host(); @@ -173,7 +174,7 @@ ParentConfigParams::findParent(HttpRequestData *rdata, ParentResult *result) } void -ParentConfigParams::nextParent(HttpRequestData *rdata, ParentResult *result) +ParentConfigParams::nextParent(HttpRequestData *rdata, ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) { P_table *tablePtr = parent_table; @@ -197,7 +198,7 @@ ParentConfigParams::nextParent(HttpRequestData *rdata, ParentResult *result) // Find the next parent in the array Debug("parent_select", "Calling selectParent() from nextParent"); - selectParent(false, result, rdata); + selectParent(false, result, rdata, fail_threshold, retry_time); const char *host = rdata->get_host(); @@ -227,9 +228,11 @@ ParentConfigParams::nextParent(HttpRequestData *rdata, ParentResult *result) bool ParentConfigParams::parentExists(HttpRequestData *rdata) { + unsigned int fail_threshold = policy.FailThreshold; + unsigned int retry_time = policy.ParentRetryTime; ParentResult result; - findParent(rdata, &result); + findParent(rdata, &result, fail_threshold, retry_time); if (result.result == PARENT_SPECIFIED) { return true; @@ -910,8 +913,10 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, passes = fails = 0; config.startup(); char tbl[2048]; - HttpRequestData *request = nullptr; - ParentResult *result = nullptr; + HttpRequestData *request = nullptr; + ParentResult *result = nullptr; + unsigned int fail_threshold = 1; + unsigned int retry_time = 5; #define T(x) \ do { \ @@ -925,10 +930,10 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, ParentTable = new P_table("", "ParentSelection Unit Test Table", &http_dest_tags, \ ALLOW_HOST_TABLE | ALLOW_REGEX_TABLE | ALLOW_URL_TABLE | ALLOW_IP_TABLE | DONT_BUILD_TABLE); \ ParentTable->BuildTableFromString(tbl); \ - params = new ParentConfigParams(ParentTable); \ - params->policy.FailThreshold = 1; \ - params->policy.ParentEnable = true; \ - params->policy.ParentRetryTime = 5; \ + RecSetRecordInt("proxy.config.http.parent_proxy.fail_threshold", fail_threshold, REC_SOURCE_DEFAULT); \ + RecSetRecordInt("proxy.config.http.parent_proxy.retry_time", retry_time, REC_SOURCE_DEFAULT); \ + RecSetRecordInt("proxy.config.http.parent_proxy_routing_enable", 1, REC_SOURCE_DEFAULT); \ + params = new ParentConfigParams(ParentTable); \ } while (0) #define REINIT \ @@ -964,9 +969,9 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, } \ } while (0) -#define FP \ - do { \ - params->findParent(request, result); \ +#define FP \ + do { \ + params->findParent(request, result, fail_threshold, retry_time); \ } while (0) // Test 1 @@ -1092,7 +1097,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, br(request, "i.am.rabbit.net"); FP; RE(verify(result, PARENT_SPECIFIED, "fuzzy", 80), 7); - params->markParentDown(result); + params->markParentDown(result, fail_threshold, retry_time); // Test 9 ST(9); @@ -1137,7 +1142,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, br(request, "i.am.rabbit.net"); FP; RE(verify(result, PARENT_SPECIFIED, "frisky", 80), 14); - params->markParentDown(result); + params->markParentDown(result, fail_threshold, retry_time); // restart the loop @@ -1184,7 +1189,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, br(request, "i.am.rabbit.net"); FP; RE(verify(result, PARENT_SPECIFIED, "furry", 80), 21); - params->markParentDown(result); + params->markParentDown(result, fail_threshold, retry_time); // Test 23 - 32 for (i = 23; i < 33; i++) { @@ -1195,7 +1200,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, RE(verify(result, PARENT_SPECIFIED, "fluffy", 80), i); } - params->markParentDown(result); // now they're all down + params->markParentDown(result, 1, 5); // now they're all down // Test 33 - 132 for (i = 33; i < 133; i++) { @@ -1248,7 +1253,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, FP; sleep(1); RE(verify(result, PARENT_SPECIFIED, "fuzzy", 80), 173); - params->markParentDown(result); // fuzzy is down. + params->markParentDown(result, fail_threshold, retry_time); // fuzzy is down. // Test 174 ST(174); @@ -1258,7 +1263,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "frisky", 80), 174); - params->markParentDown(result); // frisky is down. + params->markParentDown(result, fail_threshold, retry_time); // frisky is down. // Test 175 ST(175); @@ -1268,7 +1273,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "furry", 80), 175); - params->markParentDown(result); // frisky is down. + params->markParentDown(result, fail_threshold, retry_time); // frisky is down. // Test 176 ST(176); @@ -1278,7 +1283,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "fluffy", 80), 176); - params->markParentDown(result); // all are down now. + params->markParentDown(result, fail_threshold, retry_time); // all are down now. // Test 177 ST(177); @@ -1300,7 +1305,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "fuzzy", 80), 178); - params->markParentDown(result); // fuzzy is down + params->markParentDown(result, fail_threshold, retry_time); // fuzzy is down // Test 179 ST(179); @@ -1310,7 +1315,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "fluffy", 80), 179); - params->markParentDown(result); // fluffy is down + params->markParentDown(result, fail_threshold, retry_time); // fluffy is down // Test 180 ST(180); @@ -1320,7 +1325,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "furry", 80), 180); - params->markParentDown(result); // furry is down + params->markParentDown(result, fail_threshold, retry_time); // furry is down // Test 181 ST(181); @@ -1330,7 +1335,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, sleep(1); RE(verify(result, PARENT_SPECIFIED, "frisky", 80), 181); - params->markParentDown(result); // frisky is down and we should be back on fuzzy. + params->markParentDown(result, fail_threshold, retry_time); // frisky is down and we should be back on fuzzy. // Test 182 ST(182); @@ -1341,7 +1346,7 @@ EXCLUSIVE_REGRESSION_TEST(PARENTSELECTION)(RegressionTest * /* t ATS_UNUSED */, RE(verify(result, PARENT_FAIL, nullptr, 80), 182); // wait long enough so that fuzzy is retryable. - sleep(params->policy.ParentRetryTime - 1); + sleep(params->policy.ParentRetryTime - 2); // Test 183 ST(183); diff --git a/proxy/ParentSelection.h b/proxy/ParentSelection.h index 78c09423616..be56fa9bf03 100644 --- a/proxy/ParentSelection.h +++ b/proxy/ParentSelection.h @@ -48,6 +48,7 @@ struct RequestData; struct matcher_line; struct ParentResult; +struct OverridableHttpConfigParams; class ParentRecord; class ParentSelectionStrategy; @@ -289,17 +290,19 @@ struct ParentSelectionPolicy { class ParentSelectionStrategy { public: - // void selectParent(const ParentSelectionPolicy *policy, bool firstCall, ParentResult *result, RequestData *rdata) + // void selectParent(bool firstCall, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, unsigned int + // retry_time) // // The implementation parent lookup. // - virtual void selectParent(const ParentSelectionPolicy *policy, bool firstCall, ParentResult *result, RequestData *rdata) = 0; + virtual void selectParent(bool firstCall, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, + unsigned int retry_time) = 0; - // void markParentDown(const ParentSelectionPolicy *policy, ParentResult *result) + // void markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) // // Marks the parent pointed to by result as down // - virtual void markParentDown(const ParentSelectionPolicy *policy, ParentResult *result) = 0; + virtual void markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) = 0; // uint32_t numParents(ParentResult *result); // @@ -325,26 +328,26 @@ class ParentConfigParams : public ConfigInfo ~ParentConfigParams(){}; bool apiParentExists(HttpRequestData *rdata); - void findParent(HttpRequestData *rdata, ParentResult *result); - void nextParent(HttpRequestData *rdata, ParentResult *result); + void findParent(HttpRequestData *rdata, ParentResult *result, unsigned int fail_threshold, unsigned int retry_time); + void nextParent(HttpRequestData *rdata, ParentResult *result, unsigned int fail_threshold, unsigned int retry_time); bool parentExists(HttpRequestData *rdata); // implementation of functions from ParentSelectionStrategy. void - selectParent(bool firstCall, ParentResult *result, RequestData *rdata) + selectParent(bool firstCall, ParentResult *result, RequestData *rdata, unsigned int fail_threshold, unsigned int retry_time) { if (!result->is_api_result()) { ink_release_assert(result->rec->selection_strategy != NULL); - return result->rec->selection_strategy->selectParent(&policy, firstCall, result, rdata); + return result->rec->selection_strategy->selectParent(firstCall, result, rdata, fail_threshold, retry_time); } } void - markParentDown(ParentResult *result) + markParentDown(ParentResult *result, unsigned int fail_threshold, unsigned int retry_time) { if (!result->is_api_result()) { ink_release_assert(result->rec->selection_strategy != NULL); - result->rec->selection_strategy->markParentDown(&policy, result); + result->rec->selection_strategy->markParentDown(result, fail_threshold, retry_time); } } diff --git a/proxy/Plugin.cc b/proxy/Plugin.cc index 5ae13c9f0ee..c8b91073c81 100644 --- a/proxy/Plugin.cc +++ b/proxy/Plugin.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include "ts/ink_platform.h" #include "ts/ink_file.h" #include "ts/ParseRules.h" @@ -36,7 +36,7 @@ static const char *plugin_dir = "."; -typedef void (*init_func_t)(int argc, char *argv[]); +using init_func_t = void (*)(int, char **); // Plugin registration vars // @@ -256,7 +256,7 @@ plugin_init(bool validateOnly) } // not comment or blank, so rip line into tokens - while (1) { + while (true) { if (argc >= MAX_PLUGIN_ARGS) { Warning("Exceeded max number of args (%d) for plugin: [%s]", MAX_PLUGIN_ARGS, argc > 0 ? argv[0] : "???"); break; diff --git a/proxy/PluginVC.cc b/proxy/PluginVC.cc index 1ad01d9403d..7a377e723bb 100644 --- a/proxy/PluginVC.cc +++ b/proxy/PluginVC.cc @@ -211,7 +211,9 @@ PluginVC::main_handler(int event, void *data) } else if (call_event == inactive_event) { if (inactive_timeout_at && inactive_timeout_at < Thread::get_hrtime()) { process_timeout(&inactive_event, VC_EVENT_INACTIVITY_TIMEOUT); - call_event->cancel(); + if (nullptr == inactive_event) { + call_event->cancel(); + } } } else { if (call_event == sm_lock_retry_event) { @@ -749,13 +751,17 @@ PluginVC::process_timeout(Event **e, int event_to_send) if (closed) { // already closed, ignore the timeout event // to avoid handle_event asserting use-after-free + *e = nullptr; return; } if (read_state.vio.op == VIO::READ && !read_state.shutdown && read_state.vio.ntodo() > 0) { MUTEX_TRY_LOCK(lock, read_state.vio.mutex, (*e)->ethread); if (!lock.is_locked()) { - (*e)->schedule_in(PVC_LOCK_RETRY_TIME); + if (*e == active_event) { + // Only reschedule active_event due to inactive_event is perorid event. + (*e)->schedule_in(PVC_LOCK_RETRY_TIME); + } return; } *e = nullptr; @@ -763,7 +769,10 @@ PluginVC::process_timeout(Event **e, int event_to_send) } else if (write_state.vio.op == VIO::WRITE && !write_state.shutdown && write_state.vio.ntodo() > 0) { MUTEX_TRY_LOCK(lock, write_state.vio.mutex, (*e)->ethread); if (!lock.is_locked()) { - (*e)->schedule_in(PVC_LOCK_RETRY_TIME); + if (*e == active_event) { + // Only reschedule active_event due to inactive_event is perorid event. + (*e)->schedule_in(PVC_LOCK_RETRY_TIME); + } return; } *e = nullptr; @@ -928,7 +937,7 @@ PluginVC::set_tcp_init_cwnd(int /* init_cwnd ATS_UNUSED */) } int -PluginVC::set_tcp_congestion_control(const char *ATS_UNUSED, int ATS_UNUSED) +PluginVC::set_tcp_congestion_control(int ATS_UNUSED) { return -1; } @@ -1234,7 +1243,7 @@ class PVCTestDriver : public NetTestDriver { public: PVCTestDriver(); - ~PVCTestDriver(); + ~PVCTestDriver() override; void start_tests(RegressionTest *r_arg, int *pstatus_arg); void run_next_test(); diff --git a/proxy/PluginVC.h b/proxy/PluginVC.h index 9f10f5d958c..6137848f213 100644 --- a/proxy/PluginVC.h +++ b/proxy/PluginVC.h @@ -104,7 +104,7 @@ class PluginVC : public NetVConnection, public PluginIdentity virtual void set_local_addr(); virtual void set_remote_addr(); virtual int set_tcp_init_cwnd(int init_cwnd); - virtual int set_tcp_congestion_control(const char *name, int len); + virtual int set_tcp_congestion_control(int); virtual void apply_options(); diff --git a/proxy/ProxyClientSession.h b/proxy/ProxyClientSession.h index aa5d129f78b..9e8ff39b436 100644 --- a/proxy/ProxyClientSession.h +++ b/proxy/ProxyClientSession.h @@ -107,6 +107,12 @@ class ProxyClientSession : public VConnection return this->api_hooks.has_hooks() || http_global_hooks->has_hooks(); } + bool + is_active() const + { + return m_active; + } + // Initiate an API hook invocation. void do_api_callout(TSHttpHookID id); @@ -177,27 +183,17 @@ class ProxyClientSession : public VConnection } virtual int - populate_protocol(const char **result, int size) const + populate_protocol(ts::StringView *result, int size) const { - int retval = 0; - - if (get_netvc()) { - retval += this->get_netvc()->populate_protocol(result, size); - } - - return retval; + auto vc = this->get_netvc(); + return vc ? vc->populate_protocol(result, size) : 0; } virtual const char * - protocol_contains(const char *tag_prefix) const + protocol_contains(ts::StringView tag_prefix) const { - const char *retval = NULL; - - if (get_netvc()) { - retval = this->get_netvc()->protocol_contains(tag_prefix); - } - - return retval; + auto vc = this->get_netvc(); + return vc ? vc->protocol_contains(tag_prefix) : nullptr; } void set_session_active(); diff --git a/proxy/ProxyClientTransaction.h b/proxy/ProxyClientTransaction.h index d0c67f0b0db..b5735f8eae0 100644 --- a/proxy/ProxyClientTransaction.h +++ b/proxy/ProxyClientTransaction.h @@ -25,6 +25,7 @@ #define __PROXY_CLIENT_TRANSACTION_H__ #include "ProxyClientSession.h" +#include class HttpSM; class HttpServerSession; @@ -243,22 +244,15 @@ class ProxyClientTransaction : public VConnection } virtual int - populate_protocol(const char **result, int size) const + populate_protocol(ts::StringView *result, int size) const { - int retval = 0; - if (parent) { - retval = parent->populate_protocol(result, size); - } - return retval; + return parent ? parent->populate_protocol(result, size) : 0; } + virtual const char * - protocol_contains(const char *tag_prefix) const + protocol_contains(ts::StringView tag_prefix) const { - const char *retval = NULL; - if (parent) { - retval = parent->protocol_contains(tag_prefix); - } - return retval; + return parent ? parent->protocol_contains(tag_prefix) : nullptr; } protected: diff --git a/proxy/RegressionSM.cc b/proxy/RegressionSM.cc index 84d75d414c7..b31368e7867 100644 --- a/proxy/RegressionSM.cc +++ b/proxy/RegressionSM.cc @@ -252,8 +252,8 @@ RegressionSM::RegressionSM(const RegressionSM &ao) : Continuation(ao) } struct ReRegressionSM : public RegressionSM { - virtual void - run() + void + run() override { if (time(nullptr) < 1) { // example test rprintf(t, "impossible"); @@ -263,8 +263,8 @@ struct ReRegressionSM : public RegressionSM { } } ReRegressionSM(RegressionTest *at) : RegressionSM(at) {} - virtual RegressionSM * - clone() + RegressionSM * + clone() override { return new ReRegressionSM(*this); } diff --git a/proxy/ReverseProxy.cc b/proxy/ReverseProxy.cc index b9d7716178c..c21e41d6c31 100644 --- a/proxy/ReverseProxy.cc +++ b/proxy/ReverseProxy.cc @@ -119,7 +119,7 @@ response_url_remap(HTTPHdr *response_header) /** Used to read the remap.config file after the manager signals a change. */ struct UR_UpdateContinuation; -typedef int (UR_UpdateContinuation::*UR_UpdContHandler)(int, void *); +using UR_UpdContHandler = int (UR_UpdateContinuation::*)(int, void *); struct UR_UpdateContinuation : public Continuation { int file_update_handler(int /* etype ATS_UNUSED */, void * /* data ATS_UNUSED */) @@ -149,8 +149,10 @@ reloadUrlRewrite() newTable = new UrlRewrite(); if (newTable->is_valid()) { new_Deleter(rewrite_table, URL_REWRITE_TIMEOUT); - Debug("url_rewrite", "remap.config done reloading!"); + static const char *msg = "remap.config done reloading!"; ink_atomic_swap(&rewrite_table, newTable); + Debug("url_rewrite", "%s", msg); + Note("%s", msg); return true; } else { static const char *msg = "failed to reload remap.config, not replacing!"; diff --git a/proxy/SocksProxy.cc b/proxy/SocksProxy.cc index 2fbaf719b2d..e03e4fc8fe7 100644 --- a/proxy/SocksProxy.cc +++ b/proxy/SocksProxy.cc @@ -43,7 +43,7 @@ static RecRawStatBlock *socksproxy_stat_block; #define SOCKSPROXY_INC_STAT(x) RecIncrRawStat(socksproxy_stat_block, mutex->thread_holding, x) struct SocksProxy : public Continuation { - typedef int (SocksProxy::*EventHandler)(int event, void *data); + using EventHandler = int (SocksProxy::*)(int, void *); enum { SOCKS_INIT = 1, @@ -69,7 +69,7 @@ struct SocksProxy : public Continuation { pending_action(nullptr) { } - ~SocksProxy() {} + ~SocksProxy() override {} // int startEvent(int event, void * data); int mainEvent(int event, void *data); int setupHttpRequest(unsigned char *p); @@ -279,7 +279,7 @@ SocksProxy::mainEvent(int event, void *data) buf->fill(n_bytes); - clientVC->do_io_write(this, n_bytes, reader, 0); + clientVC->do_io_write(this, n_bytes, reader, false); state = AUTH_DONE; } else { @@ -439,7 +439,7 @@ SocksProxy::sendResp(bool granted) } buf->fill(n_bytes); - clientVC->do_io_write(this, n_bytes, reader, 0); + clientVC->do_io_write(this, n_bytes, reader, false); return n_bytes; } @@ -494,7 +494,7 @@ new_SocksProxy(NetVConnection *netVC) } struct SocksAccepter : public Continuation { - typedef int (SocksAccepter::*SocksAccepterHandler)(int, void *); + using SocksAccepterHandler = int (SocksAccepter::*)(int, void *); int mainEvent(int event, NetVConnection *netVC) diff --git a/proxy/api/ts/ts.h b/proxy/api/ts/ts.h index 128a4e3260b..96f47a57a79 100644 --- a/proxy/api/ts/ts.h +++ b/proxy/api/ts/ts.h @@ -1235,6 +1235,13 @@ tsapi TSSslContext TSSslContextFindByAddr(struct sockaddr const *); // Create a new SSL context based on the settings in records.config tsapi TSSslContext TSSslServerContextCreate(void); tsapi void TSSslContextDestroy(TSSslContext ctx); +tsapi TSNextProtocolSet TSUnregisterProtocol(TSNextProtocolSet protoset, const char *protocol); +TSAcceptor TSAcceptorGet(TSVConn sslp); +TSNextProtocolSet TSGetcloneProtoSet(TSAcceptor tna); +TSAcceptor TSAcceptorGetbyID(int ID); +void TSRegisterProtocolSet(TSVConn sslp, TSNextProtocolSet ps); +int TSAcceptorCount(); +int TSAcceptorIDGet(TSAcceptor acceptor); // Returns 1 if the sslp argument refers to a SSL connection tsapi int TSVConnIsSsl(TSVConn sslp); @@ -1711,7 +1718,6 @@ tsapi TSVConn TSTransformOutputVConnGet(TSVConn connp); /* -------------------------------------------------------------------------- Net VConnections */ - tsapi struct sockaddr const *TSNetVConnRemoteAddrGet(TSVConn vc); /** @@ -2412,7 +2418,7 @@ tsapi TSReturnCode TSUuidStringParse(TSUuid uuid, const char *uuid_str); tsapi TSReturnCode TSClientRequestUuidGet(TSHttpTxn txnp, char *uuid_str); /* Get the process global UUID, resets on every startup */ -tsapi const TSUuid TSProcessUuidGet(void); +tsapi TSUuid TSProcessUuidGet(void); /** Returns the plugin_tag. diff --git a/proxy/congest/CongestionTest.cc b/proxy/congest/CongestionTest.cc index b4ca3198555..98606048474 100644 --- a/proxy/congest/CongestionTest.cc +++ b/proxy/congest/CongestionTest.cc @@ -28,7 +28,6 @@ * ****************************************************************************/ #include "ts/ink_platform.h" -#include #include "Main.h" #include "CongestionDB.h" #include "Congestion.h" diff --git a/proxy/hdrs/HTTP.cc b/proxy/hdrs/HTTP.cc index 587f6fb3d2c..3be7064da90 100644 --- a/proxy/hdrs/HTTP.cc +++ b/proxy/hdrs/HTTP.cc @@ -24,9 +24,9 @@ #include "ts/ink_defs.h" #include "ts/ink_platform.h" #include "ts/ink_inet.h" -#include -#include -#include +#include +#include +#include #include "HTTP.h" #include "HdrToken.h" #include "ts/Diags.h" diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h index 4e9c1bffb56..5891834dcfe 100644 --- a/proxy/hdrs/HTTP.h +++ b/proxy/hdrs/HTTP.h @@ -59,6 +59,7 @@ enum HTTPStatus { HTTP_STATUS_NOT_MODIFIED = 304, HTTP_STATUS_USE_PROXY = 305, HTTP_STATUS_TEMPORARY_REDIRECT = 307, + HTTP_STATUS_PERMANENT_REDIRECT = 308, HTTP_STATUS_BAD_REQUEST = 400, HTTP_STATUS_UNAUTHORIZED = 401, @@ -495,6 +496,7 @@ class HTTPHdr : public MIMEHdr mutable int m_port; ///< Target port. mutable bool m_target_cached; ///< Whether host name and port are cached. mutable bool m_target_in_url; ///< Whether host name and port are in the URL. + mutable bool m_100_continue_required; /// Set if the port was effectively specified in the header. /// @c true if the target (in the URL or the HOST field) also specified /// a port. That is, @c true if whatever source had the target host @@ -760,7 +762,7 @@ HTTPVersion::operator<=(const HTTPVersion &hv) const /*------------------------------------------------------------------------- -------------------------------------------------------------------------*/ -inline HTTPHdr::HTTPHdr() : MIMEHdr(), m_http(NULL), m_url_cached(), m_target_cached(false) +inline HTTPHdr::HTTPHdr() : MIMEHdr(), m_http(NULL), m_url_cached(), m_target_cached(false), m_100_continue_required(false) { } diff --git a/proxy/hdrs/HdrHeap.cc b/proxy/hdrs/HdrHeap.cc index 7a356251350..87490dd11c1 100644 --- a/proxy/hdrs/HdrHeap.cc +++ b/proxy/hdrs/HdrHeap.cc @@ -100,11 +100,11 @@ HdrHeap::init() // garbage it is pointing to m_read_write_heap.detach(); - for (int i = 0; i < HDR_BUF_RONLY_HEAPS; i++) { - m_ronly_heap[i].m_heap_start = nullptr; - m_ronly_heap[i].m_ref_count_ptr.detach(); - m_ronly_heap[i].m_locked = false; - m_ronly_heap[i].m_heap_len = 0; + for (auto &i : m_ronly_heap) { + i.m_heap_start = nullptr; + i.m_ref_count_ptr.detach(); + i.m_locked = false; + i.m_heap_len = 0; } m_lost_string_space = 0; @@ -176,8 +176,8 @@ HdrHeap::destroy() } m_read_write_heap = nullptr; - for (int i = 0; i < HDR_BUF_RONLY_HEAPS; i++) { - m_ronly_heap[i].m_ref_count_ptr = nullptr; + for (auto &i : m_ronly_heap) { + i.m_ref_count_ptr = nullptr; } if (m_size == HDR_HEAP_DEFAULT_SIZE) { @@ -204,7 +204,7 @@ HdrHeap::allocate_obj(int nbytes, int type) HdrHeap *h = this; - while (1) { + while (true) { if ((unsigned)nbytes <= (h->m_free_size)) { new_space = h->m_free_start; h->m_free_start += nbytes; @@ -330,12 +330,12 @@ HdrHeap::demote_rw_str_heap() { // First, see if we have any open slots for read // only heaps - for (int i = 0; i < HDR_BUF_RONLY_HEAPS; i++) { - if (m_ronly_heap[i].m_heap_start == nullptr) { + for (auto &i : m_ronly_heap) { + if (i.m_heap_start == nullptr) { // We've found a slot - m_ronly_heap[i].m_ref_count_ptr = m_read_write_heap.object(); - m_ronly_heap[i].m_heap_start = (char *)m_read_write_heap.get(); - m_ronly_heap[i].m_heap_len = m_read_write_heap->m_heap_size - m_read_write_heap->m_free_size; + i.m_ref_count_ptr = m_read_write_heap.object(); + i.m_heap_start = (char *)m_read_write_heap.get(); + i.m_heap_len = m_read_write_heap->m_heap_size - m_read_write_heap->m_free_size; // Debug("hdrs", "Demoted rw heap of %d size", m_read_write_heap->m_heap_size); m_read_write_heap = nullptr; @@ -376,11 +376,11 @@ HdrHeap::coalesce_str_heaps(int incoming_size) m_read_write_heap = new_heap; int heaps_removed = 0; - for (int j = 0; j < HDR_BUF_RONLY_HEAPS; j++) { - if (m_ronly_heap[j].m_heap_start != nullptr && m_ronly_heap[j].m_locked == false) { - m_ronly_heap[j].m_ref_count_ptr = nullptr; - m_ronly_heap[j].m_heap_start = nullptr; - m_ronly_heap[j].m_heap_len = 0; + for (auto &j : m_ronly_heap) { + if (j.m_heap_start != nullptr && j.m_locked == false) { + j.m_ref_count_ptr = nullptr; + j.m_heap_start = nullptr; + j.m_heap_len = 0; heaps_removed++; } } @@ -488,10 +488,10 @@ HdrHeap::sanity_check_strs() num_heaps++; } - for (int i = 0; i < HDR_BUF_RONLY_HEAPS; i++) { - if (m_ronly_heap[i].m_heap_start != nullptr) { - heaps[num_heaps].start = m_ronly_heap[i].m_heap_start; - heaps[num_heaps].end = m_ronly_heap[i].m_heap_start + m_ronly_heap[i].m_heap_len; + for (auto &i : m_ronly_heap) { + if (i.m_heap_start != nullptr) { + heaps[num_heaps].start = i.m_heap_start; + heaps[num_heaps].end = i.m_heap_start + i.m_heap_len; num_heaps++; } } @@ -562,9 +562,9 @@ HdrHeap::marshal_length() len += m_read_write_heap->m_heap_size - (sizeof(HdrStrHeap) + m_read_write_heap->m_free_size); } - for (int j = 0; j < HDR_BUF_RONLY_HEAPS; j++) { - if (m_ronly_heap[j].m_heap_start != nullptr) { - len += m_ronly_heap[j].m_heap_len; + for (auto &j : m_ronly_heap) { + if (j.m_heap_start != nullptr) { + len += j.m_heap_len; } } @@ -861,7 +861,7 @@ HdrHeap::check_marshalled(uint32_t buf_length) int HdrHeap::unmarshal(int buf_length, int obj_type, HdrHeapObjImpl **found_obj, RefCountObj *block_ref) { - bool obj_found = false; + *found_obj = nullptr; // Check out this heap and make sure it is OK if (m_magic != HDR_BUF_MAGIC_MARSHALED) { @@ -929,7 +929,7 @@ HdrHeap::unmarshal(int buf_length, int obj_type, HdrHeapObjImpl **found_obj, Ref HdrHeapObjImpl *obj = (HdrHeapObjImpl *)obj_data; ink_assert(obj_is_aligned(obj)); - if (obj->m_type == (unsigned)obj_type && obj_found == false) { + if (obj->m_type == (unsigned)obj_type && *found_obj == nullptr) { *found_obj = obj; } @@ -1064,10 +1064,9 @@ HdrHeap::inherit_string_heaps(const HdrHeap *inherit_from) inherit_from->m_read_write_heap.get(), &first_free)); } // Copy over read only string heaps - for (int i = 0; i < HDR_BUF_RONLY_HEAPS; i++) { - if (inherit_from->m_ronly_heap[i].m_heap_start) { - ink_release_assert(attach_str_heap(inherit_from->m_ronly_heap[i].m_heap_start, inherit_from->m_ronly_heap[i].m_heap_len, - inherit_from->m_ronly_heap[i].m_ref_count_ptr.get(), &first_free)); + for (const auto &i : inherit_from->m_ronly_heap) { + if (i.m_heap_start) { + ink_release_assert(attach_str_heap(i.m_heap_start, i.m_heap_len, i.m_ref_count_ptr.get(), &first_free)); } } diff --git a/proxy/hdrs/HdrTest.cc b/proxy/hdrs/HdrTest.cc index f9629251882..eaa98e779b1 100644 --- a/proxy/hdrs/HdrTest.cc +++ b/proxy/hdrs/HdrTest.cc @@ -475,7 +475,9 @@ HdrTest::test_url() int HdrTest::test_mime() { - static const char mime[] = { + // This can not be a static string (any more) since we unfold the headers + // in place. + char mime[] = { // "Date: Tuesday, 08-Dec-98 20:32:17 GMT\r\n" "Date: 6 Nov 1994 08:49:37 GMT\r\n" "Max-Forwards: 65535\r\n" @@ -514,7 +516,7 @@ HdrTest::test_mime() mime_parser_init(&parser); - bool must_copy_strs = 0; + bool must_copy_strs = false; hdr.create(nullptr); err = hdr.parse(&parser, &start, end, must_copy_strs, false); @@ -523,6 +525,26 @@ HdrTest::test_mime() return (failures_to_status("test_mime", 1)); } + // Test the (new) continuation line folding to be correct. This should replace the + // \r\n with two spaces (so a total of three between "part1" and "part2"). + int length; + const char *continuation = hdr.value_get("continuation", 12, &length); + + if ((13 != length)) { + printf("FAILED: continue header folded line was too short\n"); + return (failures_to_status("test_mime", 1)); + } + + if (strncmp(continuation + 5, " ", 3)) { + printf("FAILED: continue header unfolding did not produce correct WS's\n"); + return (failures_to_status("test_mime", 1)); + } + + if (strncmp(continuation, "part1 part2", 13)) { + printf("FAILED: continue header unfolding was not correct\n"); + return (failures_to_status("test_mime", 1)); + } + hdr.field_delete("not_there", 9); hdr.field_delete("accept", 6); hdr.field_delete("scooby", 6); @@ -539,7 +561,7 @@ HdrTest::test_mime() hdr.set_age(9999); - int length = hdr.length_get(); + length = hdr.length_get(); printf("hdr.length_get() = %d\n", length); time_t t0, t1, t2; @@ -558,7 +580,7 @@ HdrTest::test_mime() return (failures_to_status("test_mime", 1)); } - hdr.value_append("Cache-Control", 13, "no-cache", 8, 1); + hdr.value_append("Cache-Control", 13, "no-cache", 8, true); MIMEField *cc_field; StrList slist; @@ -713,7 +735,7 @@ HdrTest::test_http_aux(const char *request, const char *response) rsp_hdr.create(HTTP_TYPE_RESPONSE); printf("======== parsing\n\n"); - while (1) { + while (true) { err = req_hdr.parse_req(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -754,7 +776,7 @@ HdrTest::test_http_aux(const char *request, const char *response) http_parser_clear(&parser); http_parser_init(&parser); - while (1) { + while (true) { err = rsp_hdr.parse_resp(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -1064,7 +1086,7 @@ HdrTest::test_http_hdr_copy_over_aux(int testnum, const char *request, const cha http_parser_init(&parser); - while (1) { + while (true) { err = req_hdr.parse_req(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -1086,7 +1108,7 @@ HdrTest::test_http_hdr_copy_over_aux(int testnum, const char *request, const cha http_parser_init(&parser); - while (1) { + while (true) { err = resp_hdr.parse_resp(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -1187,7 +1209,7 @@ HdrTest::test_http_hdr_null_char(int testnum, const char *request, const char * cpy_buf[length / 2] = '\0'; http_parser_init(&parser); - while (1) { + while (true) { err = hdr.parse_req(&parser, &cpy_buf_ptr, cpy_buf_ptr + length, true); if (err != PARSE_RESULT_CONT) { break; @@ -1229,7 +1251,7 @@ HdrTest::test_http_hdr_ctl_char(int testnum, const char *request, const char * / http_parser_init(&parser); - while (1) { + while (true) { err = hdr.parse_req(&parser, &cpy_buf_ptr, cpy_buf_ptr + strlen(start), true); if (err != PARSE_RESULT_CONT) { break; @@ -1276,7 +1298,7 @@ HdrTest::test_http_hdr_print_and_copy_aux(int testnum, const char *request, cons http_parser_init(&parser); - while (1) { + while (true) { err = hdr.parse_req(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -1350,7 +1372,7 @@ HdrTest::test_http_hdr_print_and_copy_aux(int testnum, const char *request, cons http_parser_init(&parser); - while (1) { + while (true) { err = hdr.parse_resp(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; @@ -1656,7 +1678,7 @@ HdrTest::test_http_mutation() resp_hdr.create(HTTP_TYPE_RESPONSE); - while (1) { + while (true) { err = resp_hdr.parse_resp(&parser, &start, end, true); if (err != PARSE_RESULT_CONT) { break; diff --git a/proxy/hdrs/HdrToken.cc b/proxy/hdrs/HdrToken.cc index 6978315f481..9efbaf2b6c4 100644 --- a/proxy/hdrs/HdrToken.cc +++ b/proxy/hdrs/HdrToken.cc @@ -25,7 +25,7 @@ #include "ts/HashFNV.h" #include "ts/Diags.h" #include "ts/ink_memory.h" -#include +#include #include "ts/Allocator.h" #include "HTTP.h" #include "HdrToken.h" diff --git a/proxy/hdrs/MIME.cc b/proxy/hdrs/MIME.cc index 1023e6f1dc6..418c34a67af 100644 --- a/proxy/hdrs/MIME.cc +++ b/proxy/hdrs/MIME.cc @@ -25,9 +25,9 @@ #include "ts/ink_platform.h" #include "ts/ink_memory.h" #include "ts/TsBuffer.h" -#include -#include -#include +#include +#include +#include #include "MIME.h" #include "HdrHeap.h" #include "HdrToken.h" @@ -1029,7 +1029,7 @@ mime_hdr_cooked_stuff_init(MIMEHdrImpl *mh, MIMEField *changing_field_or_null) mh->m_cooked_stuff.m_cache_control.m_secs_min_fresh = 0; } if ((changing_field_or_null == nullptr) || (changing_field_or_null->m_wks_idx != MIME_WKSIDX_CACHE_CONTROL)) { - mh->m_cooked_stuff.m_pragma.m_no_cache = 0; + mh->m_cooked_stuff.m_pragma.m_no_cache = false; } } @@ -1449,7 +1449,7 @@ mime_field_create_named(HdrHeap *heap, MIMEHdrImpl *mh, const char *name, int le { MIMEField *field = mime_field_create(heap, mh); int field_name_wks_idx = hdrtoken_tokenize(name, length); - mime_field_name_set(heap, mh, field, field_name_wks_idx, name, length, 1); + mime_field_name_set(heap, mh, field, field_name_wks_idx, name, length, true); return field; } @@ -1637,7 +1637,7 @@ mime_hdr_field_delete(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, bool del heap->free_string(field->m_ptr_value, field->m_len_value); MIME_HDR_SANITY_CHECK(mh); - mime_hdr_field_detach(mh, field, 0); + mime_hdr_field_detach(mh, field, false); MIME_HDR_SANITY_CHECK(mh); mime_field_destroy(mh, field); @@ -1650,7 +1650,7 @@ mime_hdr_field_delete(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, bool del heap->free_string(field->m_ptr_value, field->m_len_value); MIME_HDR_SANITY_CHECK(mh); - mime_hdr_field_detach(mh, field, 0); + mime_hdr_field_detach(mh, field, false); MIME_HDR_SANITY_CHECK(mh); mime_field_destroy(mh, field); @@ -1699,7 +1699,7 @@ mime_hdr_prepare_for_value_set(HdrHeap *heap, MIMEHdrImpl *mh, const char *name, { wks_idx = hdrtoken_tokenize(name, name_length); field = mime_field_create(heap, mh); - mime_field_name_set(heap, mh, field, wks_idx, name, name_length, 1); + mime_field_name_set(heap, mh, field, wks_idx, name, name_length, true); mime_hdr_field_attach(mh, field, 0, nullptr); } else if (field->m_next_dup) // list of more than 1 field @@ -1707,7 +1707,7 @@ mime_hdr_prepare_for_value_set(HdrHeap *heap, MIMEHdrImpl *mh, const char *name, wks_idx = field->m_wks_idx; mime_hdr_field_delete(heap, mh, field, true); field = mime_field_create(heap, mh); - mime_field_name_set(heap, mh, field, wks_idx, name, name_length, 1); + mime_field_name_set(heap, mh, field, wks_idx, name, name_length, true); mime_hdr_field_attach(mh, field, 0, nullptr); } return field; @@ -2136,7 +2136,7 @@ mime_field_value_set_int(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, int32 { char buf[16]; int len = mime_format_int(buf, value, sizeof(buf)); - mime_field_value_set(heap, mh, field, buf, len, 1); + mime_field_value_set(heap, mh, field, buf, len, true); } void @@ -2144,7 +2144,7 @@ mime_field_value_set_uint(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, uint { char buf[16]; int len = mime_format_uint(buf, value, sizeof(buf)); - mime_field_value_set(heap, mh, field, buf, len, 1); + mime_field_value_set(heap, mh, field, buf, len, true); } void @@ -2152,7 +2152,7 @@ mime_field_value_set_int64(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, int { char buf[20]; int len = mime_format_int64(buf, value, sizeof(buf)); - mime_field_value_set(heap, mh, field, buf, len, 1); + mime_field_value_set(heap, mh, field, buf, len, true); } void @@ -2160,7 +2160,7 @@ mime_field_value_set_date(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, time { char buf[33]; int len = mime_format_date(buf, value); - mime_field_value_set(heap, mh, field, buf, len, 1); + mime_field_value_set(heap, mh, field, buf, len, true); } void @@ -2173,8 +2173,8 @@ mime_field_name_value_set(HdrHeap *heap, MIMEHdrImpl *mh, MIMEField *field, int1 ink_assert(field->m_readiness == MIME_FIELD_SLOT_READINESS_DETACHED); if (must_copy_strings) { - mime_field_name_set(heap, mh, field, name_wks_idx_or_neg1, name, name_length, 1); - mime_field_value_set(heap, mh, field, value, value_length, 1); + mime_field_name_set(heap, mh, field, name_wks_idx_or_neg1, name, name_length, true); + mime_field_value_set(heap, mh, field, value, value_length, true); } else { field->m_wks_idx = name_wks_idx_or_neg1; field->m_ptr_name = name; @@ -2435,6 +2435,12 @@ mime_scanner_get(MIMEScanner *S, const char **raw_input_s, const char *raw_input case MIME_PARSE_AFTER: // After a LF. Might be the end or a continuation. if (ParseRules::is_ws(*raw_input_c)) { + char *unfold = const_cast(raw_input_c - 1); + + *unfold-- = ' '; + if (ParseRules::is_cr(*unfold)) { + *unfold = ' '; + } S->m_state = MIME_PARSE_INSIDE; // back inside the field. } else { S->m_state = MIME_PARSE_BEFORE; // field terminated. @@ -2552,7 +2558,7 @@ mime_parser_parse(MIMEParser *parser, HdrHeap *heap, MIMEHdrImpl *mh, const char MIMEScanner *scanner = &parser->m_scanner; - while (1) { + while (true) { //////////////////////////////////////////////////////////////////////////// // get a name:value line, with all continuation lines glued into one line // //////////////////////////////////////////////////////////////////////////// @@ -2651,7 +2657,7 @@ mime_parser_parse(MIMEParser *parser, HdrHeap *heap, MIMEHdrImpl *mh, const char MIMEField *field = mime_field_create(heap, mh); mime_field_name_value_set(heap, mh, field, field_name_wks_idx, field_name_first, field_name_length, field_value_first, - field_value_length, true, total_line_length, 0); + field_value_length, true, total_line_length, false); mime_hdr_field_attach(mh, field, 1, nullptr); } } @@ -3884,7 +3890,7 @@ MIMEHdrImpl::recompute_cooked_stuff(MIMEField *changing_field_or_null) if (!field->has_dups()) { // try fastpath first s = field->value_get(&len); if (ptr_len_casecmp(s, len, "no-cache", 8) == 0) { - m_cooked_stuff.m_pragma.m_no_cache = 1; + m_cooked_stuff.m_pragma.m_no_cache = true; return; } } @@ -3901,7 +3907,7 @@ MIMEHdrImpl::recompute_cooked_stuff(MIMEField *changing_field_or_null) if (hdrtoken_tokenize(s, tlen, &token_wks) >= 0) { if (token_wks == MIME_VALUE_NO_CACHE) { - m_cooked_stuff.m_pragma.m_no_cache = 1; + m_cooked_stuff.m_pragma.m_no_cache = true; } } } diff --git a/proxy/hdrs/MIME.h b/proxy/hdrs/MIME.h index 1a5949809b2..babdbc1841d 100644 --- a/proxy/hdrs/MIME.h +++ b/proxy/hdrs/MIME.h @@ -67,7 +67,7 @@ enum MimeParseState { * * ***********************************************************************/ -#ifdef DEBUG +#ifdef ENABLE_MIME_SANITY_CHECK #define MIME_HDR_SANITY_CHECK mime_hdr_sanity_check #else #define MIME_HDR_SANITY_CHECK (void) diff --git a/proxy/hdrs/URL.cc b/proxy/hdrs/URL.cc index 8911c978ea5..372c2f3e529 100644 --- a/proxy/hdrs/URL.cc +++ b/proxy/hdrs/URL.cc @@ -21,7 +21,7 @@ limitations under the License. */ -#include +#include #include #include "ts/ink_platform.h" #include "ts/ink_memory.h" @@ -1854,12 +1854,11 @@ REGRESSION_TEST(VALIDATE_HDR_FIELD)(RegressionTest *t, int /* level ATS_UNUSED * TestBox box(t, pstatus); box = REGRESSION_TEST_PASSED; - for (unsigned int i = 0; i < sizeof(http_validate_hdr_field_test_case) / sizeof(http_validate_hdr_field_test_case[0]); ++i) { - const char *const txt = http_validate_hdr_field_test_case[i].text; + for (auto i : http_validate_hdr_field_test_case) { + const char *const txt = i.text; ts::ConstBuffer tmp = ts::ConstBuffer(txt, strlen(txt)); - box.check(validate_host_name(tmp) == http_validate_hdr_field_test_case[i].valid, - "Validation of FQDN (host) header: \"%s\", expected %s, but not", txt, - (http_validate_hdr_field_test_case[i].valid ? "true" : "false")); + box.check(validate_host_name(tmp) == i.valid, "Validation of FQDN (host) header: \"%s\", expected %s, but not", txt, + (i.valid ? "true" : "false")); } } @@ -1892,11 +1891,10 @@ REGRESSION_TEST(ParseRules_strict_URI)(RegressionTest *t, int /* level ATS_UNUSE TestBox box(t, pstatus); box = REGRESSION_TEST_PASSED; - for (unsigned int i = 0; i < sizeof(http_strict_uri_parsing_test_case) / sizeof(http_strict_uri_parsing_test_case[0]); ++i) { - const char *const uri = http_strict_uri_parsing_test_case[i].uri; - box.check(url_is_strictly_compliant(uri, uri + strlen(uri)) == http_strict_uri_parsing_test_case[i].valid, - "Strictly parse URI: \"%s\", expected %s, but not", uri, - (http_strict_uri_parsing_test_case[i].valid ? "true" : "false")); + for (auto i : http_strict_uri_parsing_test_case) { + const char *const uri = i.uri; + box.check(url_is_strictly_compliant(uri, uri + strlen(uri)) == i.valid, "Strictly parse URI: \"%s\", expected %s, but not", uri, + (i.valid ? "true" : "false")); } } diff --git a/proxy/http/Http1ClientSession.cc b/proxy/http/Http1ClientSession.cc index f1bb6aa2202..ba7cbf2ab0d 100644 --- a/proxy/http/Http1ClientSession.cc +++ b/proxy/http/Http1ClientSession.cc @@ -187,6 +187,8 @@ Http1ClientSession::new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOB DebugHttpSsn("[%" PRId64 "] session born, netvc %p", con_id, new_vc); + client_vc->set_tcp_congestion_control(CLIENT_SIDE); + read_buffer = iobuf ? iobuf : new_MIOBuffer(HTTP_HEADER_BUFFER_SIZE_INDEX); sm_reader = reader ? reader : read_buffer->alloc_reader(); trans.set_reader(sm_reader); diff --git a/proxy/http/HttpBodyFactory.cc b/proxy/http/HttpBodyFactory.cc index 57e7753336d..39f3950c696 100644 --- a/proxy/http/HttpBodyFactory.cc +++ b/proxy/http/HttpBodyFactory.cc @@ -107,7 +107,7 @@ HttpBodyFactory::fabricate_with_old_api(const char *type, HttpTransact::State *c Log::error("BODY_FACTORY: suppressing '%s' response for url '%s'", type, url); } unlock(); - return (nullptr); + return nullptr; } ////////////////////////////////////////////////////////////////////////////////// // if language-targeting activated, get client Accept-Language & Accept-Charset // @@ -146,6 +146,11 @@ HttpBodyFactory::fabricate_with_old_api(const char *type, HttpTransact::State *c // if failed, try to fabricate the default custom response // ///////////////////////////////////////////////////////////// if (buffer == nullptr) { + if (is_response_body_precluded(context->http_return_code)) { + *resulting_buffer_length = 0; + unlock(); + return nullptr; + } buffer = fabricate(&acpt_language_list, &acpt_charset_list, "default", context, resulting_buffer_length, &lang_ptr, &charset_ptr, &set); } @@ -400,7 +405,7 @@ HttpBodyFactory::fabricate(StrList *acpt_language_list, StrList *acpt_charset_li char *buffer; const char *pType = context->txn_conf->body_factory_template_base; const char *set; - HttpBodyTemplate *t; + HttpBodyTemplate *t = NULL; HttpBodySet *body_set; char template_base[PATH_NAME_MAX]; @@ -416,12 +421,12 @@ HttpBodyFactory::fabricate(StrList *acpt_language_list, StrList *acpt_charset_li // if error body suppressed, return NULL if (is_response_suppressed(context)) { Debug("body_factory", " error suppression enabled, returning NULL template"); - return (nullptr); + return nullptr; } // if custom error pages are disabled, return NULL if (!enable_customizations) { Debug("body_factory", " customization disabled, returning NULL template"); - return (nullptr); + return nullptr; } // what set should we use (language target if enable_customizations == 2) @@ -429,6 +434,8 @@ HttpBodyFactory::fabricate(StrList *acpt_language_list, StrList *acpt_charset_li set = determine_set_by_language(acpt_language_list, acpt_charset_list); } else if (enable_customizations == 3) { set = determine_set_by_host(context); + } else if (is_response_body_precluded(context->http_return_code)) { + return nullptr; } else { set = "default"; } @@ -437,17 +444,25 @@ HttpBodyFactory::fabricate(StrList *acpt_language_list, StrList *acpt_charset_li } if (pType != nullptr && 0 != *pType && 0 != strncmp(pType, "NONE", 4)) { sprintf(template_base, "%s_%s", pType, type); - } else { - sprintf(template_base, "%s", type); + t = find_template(set, template_base, &body_set); + // Check for default alternate. + if (t == nullptr) { + sprintf(template_base, "%s_default", pType); + t = find_template(set, template_base, &body_set); + } } - // see if we have a custom error page template - t = find_template(set, template_base, &body_set); + + // Check for base customizations if specializations didn't work. if (t == nullptr) { + if (is_response_body_precluded(context->http_return_code)) { + return nullptr; + } t = find_template(set, type, &body_set); // this executes if the template_base is wrong and doesn't exist } + if (t == nullptr) { Debug("body_factory", " can't find template, returning NULL template"); - return (nullptr); + return nullptr; } *content_language_return = body_set->content_language; @@ -455,7 +470,7 @@ HttpBodyFactory::fabricate(StrList *acpt_language_list, StrList *acpt_charset_li // build the custom error page buffer = t->build_instantiated_buffer(context, buffer_length_return); - return (buffer); + return buffer; } // LOCKING: must be called with lock taken @@ -692,8 +707,9 @@ HttpBodyFactory::load_body_set_from_directory(char *set_name, char *tmpl_dir) DIR *dir; int status; struct stat stat_buf; - char path[MAXPATHLEN + 1]; struct dirent *dirEntry; + char path[MAXPATHLEN + 1]; + static const char BASED_DEFAULT[] = "_default"; //////////////////////////////////////////////// // ensure we can open tmpl_dir as a directory // @@ -702,7 +718,7 @@ HttpBodyFactory::load_body_set_from_directory(char *set_name, char *tmpl_dir) Debug("body_factory", " load_body_set_from_directory(%s)", tmpl_dir); dir = opendir(tmpl_dir); if (dir == nullptr) { - return (nullptr); + return nullptr; } ///////////////////////////////////////////// @@ -713,7 +729,7 @@ HttpBodyFactory::load_body_set_from_directory(char *set_name, char *tmpl_dir) status = stat(path, &stat_buf); if ((status < 0) || !S_ISREG(stat_buf.st_mode)) { closedir(dir); - return (nullptr); + return nullptr; } Debug("body_factory", " found '%s'", path); @@ -729,14 +745,19 @@ HttpBodyFactory::load_body_set_from_directory(char *set_name, char *tmpl_dir) while ((dirEntry = readdir(dir))) { HttpBodyTemplate *tmpl; + size_t d_len = strlen(dirEntry->d_name); /////////////////////////////////////////////////////////////// - // all template files have name of the form # // + // all template files must have a file name of the form // + // - # // + // - _# // + // - _default [based default] // + // - default [global default] // /////////////////////////////////////////////////////////////// - if ((strchr(dirEntry->d_name, '#') == nullptr) && (strcmp(dirEntry->d_name, "default") != 0)) { + if (!(nullptr != strchr(dirEntry->d_name, '#') || (0 == strcmp(dirEntry->d_name, "default")) || + (d_len >= sizeof(BASED_DEFAULT) && 0 == strcmp(dirEntry->d_name + d_len - (sizeof(BASED_DEFAULT) - 1), BASED_DEFAULT)))) continue; - } snprintf(path, sizeof(path), "%s/%s", tmpl_dir, dirEntry->d_name); status = stat(path, &stat_buf); @@ -815,7 +836,7 @@ HttpBodySet::init(char *set, char *dir) lineno = 0; - while (1) { + while (true) { char *name_s, *name_e, *value_s, *value_e, *hash; ++lineno; diff --git a/proxy/http/HttpConfig.cc b/proxy/http/HttpConfig.cc index 763f6bae6f7..130fd9aa693 100644 --- a/proxy/http/HttpConfig.cc +++ b/proxy/http/HttpConfig.cc @@ -22,8 +22,8 @@ */ #include "ts/ink_config.h" -#include -#include +#include +#include #include "HttpConfig.h" #include "HTTP.h" #include "ProcessManager.h" @@ -898,6 +898,7 @@ HttpConfig::startup() HttpEstablishStaticConfigLongLong(c.oride.origin_max_connections_queue, "proxy.config.http.origin_max_connections_queue"); HttpEstablishStaticConfigLongLong(c.origin_min_keep_alive_connections, "proxy.config.http.origin_min_keep_alive_connections"); HttpEstablishStaticConfigByte(c.oride.attach_server_session_to_client, "proxy.config.http.attach_server_session_to_client"); + HttpEstablishStaticConfigByte(c.oride.safe_requests_retryable, "proxy.config.http.safe_requests_retryable"); HttpEstablishStaticConfigByte(c.disable_ssl_parenting, "proxy.local.http.parent_proxy.disable_connect_tunneling"); HttpEstablishStaticConfigByte(c.oride.forward_connect_method, "proxy.config.http.forward_connect_method"); @@ -969,8 +970,12 @@ HttpConfig::startup() HttpEstablishStaticConfigLongLong(c.oride.connect_attempts_timeout, "proxy.config.http.connect_attempts_timeout"); HttpEstablishStaticConfigLongLong(c.oride.post_connect_attempts_timeout, "proxy.config.http.post_connect_attempts_timeout"); HttpEstablishStaticConfigLongLong(c.oride.parent_connect_attempts, "proxy.config.http.parent_proxy.total_connect_attempts"); - HttpEstablishStaticConfigLongLong(c.per_parent_connect_attempts, "proxy.config.http.parent_proxy.per_parent_connect_attempts"); - HttpEstablishStaticConfigLongLong(c.parent_connect_timeout, "proxy.config.http.parent_proxy.connect_attempts_timeout"); + HttpEstablishStaticConfigLongLong(c.oride.parent_retry_time, "proxy.config.http.parent_proxy.retry_time"); + HttpEstablishStaticConfigLongLong(c.oride.parent_fail_threshold, "proxy.config.http.parent_proxy.fail_threshold"); + HttpEstablishStaticConfigLongLong(c.oride.per_parent_connect_attempts, + "proxy.config.http.parent_proxy.per_parent_connect_attempts"); + HttpEstablishStaticConfigLongLong(c.oride.parent_connect_timeout, "proxy.config.http.parent_proxy.connect_attempts_timeout"); + HttpEstablishStaticConfigByte(c.oride.parent_failures_update_hostdb, "proxy.config.http.parent_proxy.mark_down_hostdb"); HttpEstablishStaticConfigLongLong(c.oride.sock_recv_buffer_size_out, "proxy.config.net.sock_recv_buffer_size_out"); HttpEstablishStaticConfigLongLong(c.oride.sock_send_buffer_size_out, "proxy.config.net.sock_send_buffer_size_out"); @@ -1181,6 +1186,7 @@ HttpConfig::reconfigure() } params->origin_min_keep_alive_connections = m_master.origin_min_keep_alive_connections; params->oride.attach_server_session_to_client = m_master.oride.attach_server_session_to_client; + params->oride.safe_requests_retryable = m_master.oride.safe_requests_retryable; if (params->oride.origin_max_connections && params->oride.origin_max_connections < params->origin_min_keep_alive_connections) { Warning("origin_max_connections < origin_min_keep_alive_connections, setting min=max , please correct your records.config"); @@ -1240,7 +1246,7 @@ HttpConfig::reconfigure() params->oride.connect_attempts_max_retries = m_master.oride.connect_attempts_max_retries; params->oride.connect_attempts_max_retries_dead_server = m_master.oride.connect_attempts_max_retries_dead_server; - if (m_master.oride.connect_attempts_rr_retries >= params->oride.connect_attempts_max_retries) { + if (m_master.oride.connect_attempts_rr_retries > params->oride.connect_attempts_max_retries) { Warning("connect_attempts_rr_retries (%" PRIu64 ") is greater than " "connect_attempts_max_retries (%" PRIu64 "), this means requests " "will never redispatch to another server", @@ -1250,8 +1256,11 @@ HttpConfig::reconfigure() params->oride.connect_attempts_timeout = m_master.oride.connect_attempts_timeout; params->oride.post_connect_attempts_timeout = m_master.oride.post_connect_attempts_timeout; params->oride.parent_connect_attempts = m_master.oride.parent_connect_attempts; - params->per_parent_connect_attempts = m_master.per_parent_connect_attempts; - params->parent_connect_timeout = m_master.parent_connect_timeout; + params->oride.parent_retry_time = m_master.oride.parent_retry_time; + params->oride.parent_fail_threshold = m_master.oride.parent_fail_threshold; + params->oride.per_parent_connect_attempts = m_master.oride.per_parent_connect_attempts; + params->oride.parent_connect_timeout = m_master.oride.parent_connect_timeout; + params->oride.parent_failures_update_hostdb = m_master.oride.parent_failures_update_hostdb; params->oride.sock_recv_buffer_size_out = m_master.oride.sock_recv_buffer_size_out; params->oride.sock_send_buffer_size_out = m_master.oride.sock_send_buffer_size_out; @@ -1470,7 +1479,7 @@ HttpConfig::parse_ports_list(char *ports_string) start = ports_string; - while (1) { // eat whitespace + while (true) { // eat whitespace while ((start[0] != '\0') && ParseRules::is_space(start[0])) { start++; } diff --git a/proxy/http/HttpConfig.h b/proxy/http/HttpConfig.h index cc75710384f..4ec49c12b55 100644 --- a/proxy/http/HttpConfig.h +++ b/proxy/http/HttpConfig.h @@ -379,6 +379,7 @@ struct OverridableHttpConfigParams { fwd_proxy_auth_to_parent(0), uncacheable_requests_bypass_parent(1), attach_server_session_to_client(0), + safe_requests_retryable(1), forward_connect_method(0), insert_age_in_response(1), anonymize_remove_from(0), @@ -409,6 +410,7 @@ struct OverridableHttpConfigParams { flow_control_enabled(0), normalize_ae_gzip(0), srv_enabled(0), + parent_failures_update_hostdb(0), cache_open_write_fail_action(0), post_check_content_length_enabled(1), redirection_enabled(0), @@ -446,6 +448,10 @@ struct OverridableHttpConfigParams { connect_attempts_timeout(30), post_connect_attempts_timeout(1800), parent_connect_attempts(4), + parent_retry_time(300), + parent_fail_threshold(10), + per_parent_connect_attempts(2), + parent_connect_timeout(30), down_server_timeout(300), client_abort_threshold(10), freshness_fuzz_time(240), @@ -499,6 +505,8 @@ struct OverridableHttpConfigParams { MgmtByte uncacheable_requests_bypass_parent; MgmtByte attach_server_session_to_client; + MgmtByte safe_requests_retryable; + MgmtByte forward_connect_method; MgmtByte insert_age_in_response; @@ -560,6 +568,7 @@ struct OverridableHttpConfigParams { // hostdb/dns variables // ////////////////////////// MgmtByte srv_enabled; + MgmtByte parent_failures_update_hostdb; MgmtByte cache_open_write_fail_action; @@ -640,7 +649,15 @@ struct OverridableHttpConfigParams { MgmtInt connect_attempts_rr_retries; MgmtInt connect_attempts_timeout; MgmtInt post_connect_attempts_timeout; + + //////////////////////////////////// + // parent proxy connect attempts // + /////////////////////////////////// MgmtInt parent_connect_attempts; + MgmtInt parent_retry_time; + MgmtInt parent_fail_threshold; + MgmtInt per_parent_connect_attempts; + MgmtInt parent_connect_timeout; MgmtInt down_server_timeout; MgmtInt client_abort_threshold; @@ -731,12 +748,6 @@ struct HttpConfigParams : public ConfigInfo { MgmtInt accept_no_activity_timeout; - //////////////////////////////////// - // origin server connect attempts // - //////////////////////////////////// - MgmtInt per_parent_connect_attempts; - MgmtInt parent_connect_timeout; - /////////////////////////////////////////////////////////////////// // Privacy: fields which are removed from the user agent request // /////////////////////////////////////////////////////////////////// @@ -874,8 +885,6 @@ inline HttpConfigParams::HttpConfigParams() proxy_response_via_string_len(0), cluster_time_delta(0), accept_no_activity_timeout(120), - per_parent_connect_attempts(2), - parent_connect_timeout(30), anonymize_other_header_list(NULL), cache_vary_default_text(NULL), cache_vary_default_images(NULL), diff --git a/proxy/http/HttpDebugNames.cc b/proxy/http/HttpDebugNames.cc index 7285787aa7b..b9e526337fc 100644 --- a/proxy/http/HttpDebugNames.cc +++ b/proxy/http/HttpDebugNames.cc @@ -482,6 +482,8 @@ HttpDebugNames::get_api_hook_name(TSHttpHookID t) return "TS_VCONN_PRE_ACCEPT_HOOK"; case TS_SSL_CERT_HOOK: return "TS_SSL_CERT_HOOK"; + case TS_SSL_SERVERNAME_HOOK: + return "TS_SSL_SERVERNAME_HOOK"; } return "unknown hook"; diff --git a/proxy/http/HttpPages.cc b/proxy/http/HttpPages.cc index 30c3d83c034..45145e99256 100644 --- a/proxy/http/HttpPages.cc +++ b/proxy/http/HttpPages.cc @@ -119,33 +119,33 @@ HttpPagesHandler::dump_tunnel_info(HttpSM *sm) resp_add("

Producers

"); resp_begin_table(1, 4, 60); - for (int i = 0; i < MAX_PRODUCERS; i++) { - if (t->producers[i].vc != nullptr) { + for (auto &producer : t->producers) { + if (producer.vc != nullptr) { resp_begin_row(); // Col 1 - name resp_begin_column(); - resp_add(t->producers[i].name); + resp_add(producer.name); resp_end_column(); // Col 2 - alive resp_begin_column(); - resp_add("%d", t->producers[i].alive); + resp_add("%d", producer.alive); resp_end_column(); // Col 3 - ndone resp_begin_column(); - if (t->producers[i].alive && t->producers[i].read_vio) { - resp_add("%d", t->producers[i].read_vio->ndone); + if (producer.alive && producer.read_vio) { + resp_add("%d", producer.read_vio->ndone); } else { - resp_add("%d", t->producers[i].bytes_read); + resp_add("%d", producer.bytes_read); } resp_end_column(); // Col 4 - nbytes resp_begin_column(); - if (t->producers[i].alive && t->producers[i].read_vio) { - resp_add("%d", t->producers[i].read_vio->nbytes); + if (producer.alive && producer.read_vio) { + resp_add("%d", producer.read_vio->nbytes); } else { resp_add("-"); } @@ -158,33 +158,33 @@ HttpPagesHandler::dump_tunnel_info(HttpSM *sm) resp_add("

Consumers

"); resp_begin_table(1, 5, 60); - for (int j = 0; j < MAX_CONSUMERS; j++) { - if (t->consumers[j].vc != nullptr) { + for (auto &consumer : t->consumers) { + if (consumer.vc != nullptr) { resp_begin_row(); // Col 1 - name resp_begin_column(); - resp_add(t->consumers[j].name); + resp_add(consumer.name); resp_end_column(); // Col 2 - alive resp_begin_column(); - resp_add("%d", t->consumers[j].alive); + resp_add("%d", consumer.alive); resp_end_column(); // Col 3 - ndone resp_begin_column(); - if (t->consumers[j].alive && t->consumers[j].write_vio) { - resp_add("%d", t->consumers[j].write_vio->ndone); + if (consumer.alive && consumer.write_vio) { + resp_add("%d", consumer.write_vio->ndone); } else { - resp_add("%d", t->consumers[j].bytes_written); + resp_add("%d", consumer.bytes_written); } resp_end_column(); // Col 4 - nbytes resp_begin_column(); - if (t->consumers[j].alive && t->consumers[j].write_vio) { - resp_add("%d", t->consumers[j].write_vio->nbytes); + if (consumer.alive && consumer.write_vio) { + resp_add("%d", consumer.write_vio->nbytes); } else { resp_add("-"); } @@ -192,8 +192,8 @@ HttpPagesHandler::dump_tunnel_info(HttpSM *sm) // Col 5 - read avail resp_begin_column(); - if (t->consumers[j].alive && t->consumers[j].buffer_reader) { - resp_add("%d", t->consumers[j].buffer_reader->read_avail()); + if (consumer.alive && consumer.buffer_reader) { + resp_add("%d", consumer.buffer_reader->read_avail()); } else { resp_add("-"); } @@ -462,7 +462,7 @@ http_pages_init() statPagesManager.register_http("http", http_pages_callback); // Create the mutexes for http list protection - for (int i = 0; i < HTTP_LIST_BUCKETS; i++) { - HttpSMList[i].mutex = new_ProxyMutex(); + for (auto &i : HttpSMList) { + i.mutex = new_ProxyMutex(); } } diff --git a/proxy/http/HttpProxyServerMain.cc b/proxy/http/HttpProxyServerMain.cc index a0ef4e4518b..99b0e2cfd5d 100644 --- a/proxy/http/HttpProxyServerMain.cc +++ b/proxy/http/HttpProxyServerMain.cc @@ -176,6 +176,7 @@ MakeHttpProxyAcceptor(HttpProxyAcceptor &acceptor, HttpProxyPort &port, unsigned ProtocolProbeSessionAccept *probe = new ProtocolProbeSessionAccept(); HttpSessionAccept *http = nullptr; // don't allocate this unless it will be used. + probe->proxyPort = &port; if (port.m_session_protocol_preference.intersects(HTTP_PROTOCOL_SET)) { http = new HttpSessionAccept(accept_opt); @@ -214,7 +215,7 @@ MakeHttpProxyAcceptor(HttpProxyAcceptor &acceptor, HttpProxyPort &port, unsigned SCOPED_MUTEX_LOCK(lock, ssl_plugin_mutex, this_ethread()); ssl_plugin_acceptors.push(ssl); - + ssl->proxyPort = &port; acceptor._accept = ssl; } else { acceptor._accept = probe; diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc index 7d0d067ca85..3c9da09edd9 100644 --- a/proxy/http/HttpSM.cc +++ b/proxy/http/HttpSM.cc @@ -24,6 +24,7 @@ #include "../ProxyClientTransaction.h" #include "HttpSM.h" +#include "HttpTransactHeaders.h" #include "ProxyConfig.h" #include "HttpServerSession.h" #include "HttpDebugNames.h" @@ -49,6 +50,8 @@ #include "congest/Congestion.h" #include "ts/I_Layout.h" +using ts::StringView; + #define DEFAULT_RESPONSE_BUFFER_SIZE_INDEX 6 // 8K #define DEFAULT_REQUEST_BUFFER_SIZE_INDEX 6 // 8K #define MIN_CONFIG_BUFFER_SIZE_INDEX 5 // 4K @@ -361,7 +364,6 @@ HttpSM::cleanup() void HttpSM::destroy() { - HTTP_DECREMENT_DYN_STAT(http_current_client_transactions_stat); cleanup(); httpSMAllocator.free(this); } @@ -795,14 +797,17 @@ HttpSM::state_read_client_request_header(int event, void *data) ua_entry->write_buffer = new_MIOBuffer(alloc_index); IOBufferReader *buf_start = ua_entry->write_buffer->alloc_reader(); + t_state.hdr_info.client_request.m_100_continue_required = true; + DebugSM("http_seq", "send 100 Continue response to client"); - int64_t nbytes = ua_entry->write_buffer->write(str_100_continue_response, len_100_continue_response); - ua_session->do_io_write(netvc, nbytes, buf_start); + int64_t nbytes = ua_entry->write_buffer->write(str_100_continue_response, len_100_continue_response); + ua_entry->write_vio = ua_session->do_io_write(this, nbytes, buf_start); } } if (t_state.hdr_info.client_request.method_get_wksidx() == HTTP_WKSIDX_TRACE || - (t_state.hdr_info.request_content_length <= 0 && t_state.client_info.transfer_encoding != HttpTransact::CHUNKED_ENCODING)) { + (t_state.hdr_info.client_request.get_content_length() == 0 && + t_state.client_info.transfer_encoding != HttpTransact::CHUNKED_ENCODING)) { // Enable further IO to watch for client aborts ua_entry->read_vio->reenable(); } else { @@ -891,7 +896,7 @@ HttpSM::state_watch_for_client_abort(int event, void *data) { STATE_ENTER(&HttpSM::state_watch_for_client_abort, event); - ink_assert(ua_entry->read_vio == (VIO *)data); + ink_assert(ua_entry->read_vio == (VIO *)data || ua_entry->write_vio == (VIO *)data); ink_assert(ua_entry->vc == ua_session); switch (event) { @@ -954,6 +959,20 @@ HttpSM::state_watch_for_client_abort(int event, void *data) // Ignore. Could be a pipelined request. We'll get to it // when we finish the current transaction break; + case VC_EVENT_WRITE_READY: + // 100-continue handler + ink_assert(t_state.hdr_info.client_request.m_100_continue_required); + ua_entry->write_vio->reenable(); + break; + case VC_EVENT_WRITE_COMPLETE: + // 100-continue handler + ink_assert(t_state.hdr_info.client_request.m_100_continue_required); + if (ua_entry->write_buffer) { + ink_assert(ua_entry->write_vio && !ua_entry->write_vio->ntodo()); + free_MIOBuffer(ua_entry->write_buffer); + ua_entry->write_buffer = nullptr; + } + break; default: ink_release_assert(0); break; @@ -1539,9 +1558,6 @@ HttpSM::state_api_callout(int event, void *data) api_timer = 0; switch (api_next) { case API_RETURN_CONTINUE: - if (t_state.api_next_action == HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR) { - do_redirect(); - } handle_api_return(); break; case API_RETURN_DEFERED_CLOSE: @@ -1614,6 +1630,16 @@ HttpSM::handle_api_return() if (ua_session) { ua_session->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_in)); } + + // We only follow 3xx when redirect_in_process == false. Otherwise the redirection has already been launched (in + // SM_ACTION_SERVE_FROM_CACHE or SM_ACTION_SERVER_READ).redirect_in_process is set before this logic if we need more direction. + // This redirection is only used with the build_error_reponse. Then, the redirection_tries will be increased by + // state_read_server_reponse_header and never get into this logic again. + if (enable_redirection && !t_state.redirect_info.redirect_in_process && is_redirect_required() && + (redirection_tries <= t_state.txn_conf->number_of_redirections)) { + ++redirection_tries; + do_redirect(); + } // we have further processing to do // based on what t_state.next_action is break; @@ -1953,7 +1979,7 @@ HttpSM::state_read_server_response_header(int event, void *data) t_state.api_next_action = HttpTransact::SM_ACTION_API_READ_RESPONSE_HDR; // if exceeded limit deallocate postdata buffers and disable redirection - if (enable_redirection && (redirection_tries < t_state.txn_conf->number_of_redirections)) { + if (enable_redirection && (redirection_tries <= t_state.txn_conf->number_of_redirections)) { ++redirection_tries; } else { tunnel.deallocate_redirect_postdata_buffers(); @@ -2751,25 +2777,39 @@ HttpSM::tunnel_handler_post(int event, void *data) return 0; // Cannot do anything if there is no producer } - if (event != HTTP_TUNNEL_EVENT_DONE) { - if ((event == VC_EVENT_WRITE_COMPLETE) || (event == VC_EVENT_EOS)) { - if (ua_entry->write_buffer) { - free_MIOBuffer(ua_entry->write_buffer); - ua_entry->write_buffer = nullptr; - } - } + switch (event) { + case HTTP_TUNNEL_EVENT_DONE: // Tunnel done. + break; + case VC_EVENT_WRITE_READY: // iocore may callback first before send. + return 0; + case VC_EVENT_EOS: // SSLNetVC may callback EOS during write error (6.0.x or early) + case VC_EVENT_ERROR: // Send HTTP 408 error + case VC_EVENT_WRITE_COMPLETE: // tunnel_handler_post_ua has sent HTTP 408 response + case VC_EVENT_INACTIVITY_TIMEOUT: // ua_session timeout during sending the HTTP 408 response + case VC_EVENT_ACTIVE_TIMEOUT: // ua_session timeout + if (ua_entry->write_buffer) { + free_MIOBuffer(ua_entry->write_buffer); + ua_entry->write_buffer = nullptr; + ua_entry->vc->do_io_write(this, 0, nullptr); + } + // The if statement will always true since these codes are all for HTTP 408 response sending. - by oknet xu if (p->handler_state == HTTP_SM_POST_UA_FAIL) { Debug("http_tunnel", "cleanup tunnel in tunnel_handler_post"); hsm_release_assert(ua_entry->in_tunnel == true); - ink_assert((event == VC_EVENT_WRITE_COMPLETE) || (event == VC_EVENT_EOS)); + tunnel_handler_post_or_put(p); vc_table.cleanup_all(); tunnel.chain_abort_all(p); p->read_vio = nullptr; p->vc->do_io_close(EHTTP_ERROR); - tunnel_handler_post_or_put(p); tunnel.kill_tunnel(); return 0; } + break; + case VC_EVENT_READ_READY: + case VC_EVENT_READ_COMPLETE: + default: + ink_assert(!"not reached"); + return 0; } ink_assert(event == HTTP_TUNNEL_EVENT_DONE); @@ -3466,7 +3506,7 @@ HttpSM::tunnel_handler_post_ua(int event, HttpTunnelProducer *p) { STATE_ENTER(&HttpSM::tunnel_handler_post_ua, event); client_request_body_bytes = p->init_bytes_done + p->bytes_read; - int64_t alloc_index, nbytes; + int64_t nbytes, buf_size; IOBufferReader *buf_start; switch (event) { @@ -3488,20 +3528,43 @@ HttpSM::tunnel_handler_post_ua(int event, HttpTunnelProducer *p) } // send back 408 request timeout - alloc_index = buffer_size_to_index(len_408_request_timeout_response + t_state.internal_msg_buffer_size); + buf_size = index_to_buffer_size(HTTP_HEADER_BUFFER_SIZE_INDEX) + t_state.internal_msg_buffer_size; if (ua_entry->write_buffer) { + if (t_state.hdr_info.client_request.m_100_continue_required) { + ink_assert(ua_entry->write_vio && !ua_entry->write_vio->ntodo()); + } free_MIOBuffer(ua_entry->write_buffer); ua_entry->write_buffer = nullptr; } - ua_entry->write_buffer = new_MIOBuffer(alloc_index); + ua_entry->write_buffer = new_MIOBuffer(buffer_size_to_index(buf_size)); buf_start = ua_entry->write_buffer->alloc_reader(); DebugSM("http_tunnel", "send 408 response to client to vc %p, tunnel vc %p", ua_session->get_netvc(), p->vc); - nbytes = ua_entry->write_buffer->write(str_408_request_timeout_response, len_408_request_timeout_response); - nbytes += ua_entry->write_buffer->write(t_state.internal_msg_buffer, t_state.internal_msg_buffer_size); + if (t_state.internal_msg_buffer && t_state.internal_msg_buffer_size) { + client_response_hdr_bytes = write_response_header_into_buffer(&t_state.hdr_info.client_response, ua_entry->write_buffer); + nbytes = client_response_hdr_bytes + t_state.internal_msg_buffer_size; + if (t_state.internal_msg_buffer_fast_allocator_size < 0) { + ua_entry->write_buffer->append_xmalloced(t_state.internal_msg_buffer, t_state.internal_msg_buffer_size); + } else { + ua_entry->write_buffer->append_fast_allocated(t_state.internal_msg_buffer, t_state.internal_msg_buffer_size, + t_state.internal_msg_buffer_fast_allocator_size); + } + // The IOBufferBlock will free the msg buffer when necessary so + // eliminate our pointer to it + t_state.internal_msg_buffer = nullptr; + t_state.internal_msg_buffer_size = 0; + } else { + client_response_hdr_bytes = nbytes = + ua_entry->write_buffer->write(str_408_request_timeout_response, len_408_request_timeout_response); + } - p->vc->do_io_write(this, nbytes, buf_start); + // The HttpSM default handler still is HttpSM::state_request_wait_for_transform_read. + // However, WRITE_COMPLETE/TIMEOUT/ERROR event should be managed/handled by tunnel_handler_post. + ua_entry->vc_handler = &HttpSM::tunnel_handler_post; + ua_entry->write_vio = p->vc->do_io_write(this, nbytes, buf_start); + // Reset the inactivity timeout, otherwise the InactivityCop will callback again in the next second. + ua_session->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_in)); p->vc->do_io_shutdown(IO_SHUTDOWN_READ); return 0; } @@ -3548,6 +3611,7 @@ HttpSM::tunnel_handler_post_ua(int event, HttpTunnelProducer *p) // timeouts ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort; ua_entry->read_vio = p->vc->do_io_read(this, INT64_MAX, ua_buffer_reader->mbuf); + ua_session->set_inactivity_timeout(0); break; default: ink_release_assert(0); @@ -3634,6 +3698,7 @@ HttpSM::tunnel_handler_post_server(int event, HttpTunnelConsumer *c) // on the user agent in order to get timeouts // coming to the state machine and not the tunnel ua_entry->vc_handler = &HttpSM::state_watch_for_client_abort; + ua_session->set_inactivity_timeout(0); // YTS Team, yamsat Plugin // When event is VC_EVENT_ERROR,and when redirection is enabled @@ -4020,7 +4085,6 @@ HttpSM::do_remap_request(bool run_inline) { DebugSM("http_seq", "[HttpSM::do_remap_request] Remapping request"); DebugSM("url_rewrite", "Starting a possible remapping for request [%" PRId64 "]", sm_id); - SSLConfig::scoped_config params; bool ret = false; if (t_state.cop_test_page == false) { ret = remapProcessor.setup_for_remap(&t_state); @@ -4061,16 +4125,6 @@ HttpSM::do_remap_request(bool run_inline) pending_action = remap_action_handle; } - // check if the overridden client cert filename is already attached to an existing ssl context - ats_scoped_str clientCert(Layout::relative_to(t_state.txn_conf->client_cert_filepath, t_state.txn_conf->client_cert_filename)); - auto tCTX = params->getCTX(clientCert); - - if (tCTX == nullptr) { - // make new client ctx and add it to the ctx list - auto tctx = params->getNewCTX(clientCert); - params->InsertCTX(clientCert, tctx); - } - return; } @@ -4709,7 +4763,7 @@ void HttpSM::do_http_server_open(bool raw) { int ip_family = t_state.current.server->dst_addr.sa.sa_family; - DebugSM("http_track", "entered inside do_http_server_open ][%s]", ats_ip_family_name(ip_family)); + DebugSM("http_track", "entered inside do_http_server_open ][%s]", ats_ip_family_name(ip_family).ptr()); // Make sure we are on the "right" thread if (ua_session) { @@ -4798,7 +4852,7 @@ HttpSM::do_http_server_open(bool raw) } // Congestion Check - if (t_state.pCongestionEntry != NULL) { + if (t_state.pCongestionEntry != nullptr) { if (t_state.pCongestionEntry->F_congested() && (!t_state.pCongestionEntry->proxy_retry(milestones[TS_MILESTONE_SERVER_CONNECT]))) { t_state.congestion_congested_or_failed = 1; @@ -5030,10 +5084,10 @@ HttpSM::do_http_server_open(bool raw) } } - // draft-stenberg-httpbis-tcp recommends only enabling TFO on safe, indempotent methods or + // draft-stenberg-httpbis-tcp recommends only enabling TFO on indempotent methods or // those with intervening protocol layers (eg. TLS). - if (scheme_to_use == URL_WKSIDX_HTTPS || t_state.method == HTTP_WKSIDX_CONNECT || t_state.method == HTTP_WKSIDX_DELETE || - t_state.method == HTTP_WKSIDX_GET || t_state.method == HTTP_WKSIDX_HEAD || t_state.method == HTTP_WKSIDX_PUT) { + + if (scheme_to_use == URL_WKSIDX_HTTPS || HttpTransactHeaders::is_method_idempotent(t_state.method)) { opt.f_tcp_fastopen = (t_state.txn_conf->sock_option_flag_out & NetVCOptions::SOCK_OPT_TCP_FAST_OPEN); } @@ -5046,28 +5100,48 @@ HttpSM::do_http_server_open(bool raw) opt.set_sni_servername(host, len); } - ats_scoped_str clientCert( - (Layout::relative_to(t_state.txn_conf->client_cert_filepath, t_state.txn_conf->client_cert_filename))); - opt.set_client_certname(clientCert); + SSLConfig::scoped_config params; + // check if the overridden client cert filename is already attached to an existing ssl context + if (t_state.txn_conf->client_cert_filepath && t_state.txn_conf->client_cert_filename) { + ats_scoped_str clientCert( + Layout::relative_to(t_state.txn_conf->client_cert_filepath, t_state.txn_conf->client_cert_filename)); + if (clientCert != nullptr) { + auto tCTX = params->getCTX(clientCert); + + if (tCTX == nullptr) { + // make new client ctx and add it to the ctx list + Debug("ssl", "adding new cert for client cert %s", (char *)clientCert); + auto tctx = params->getNewCTX(clientCert); + params->InsertCTX(clientCert, tctx); + } + opt.set_client_certname(clientCert); + } + } connect_action_handle = sslNetProcessor.connect_re(this, // state machine &t_state.current.server->dst_addr.sa, // addr + port &opt); - } else if (t_state.method != HTTP_WKSIDX_CONNECT) { + } else if (t_state.method != HTTP_WKSIDX_CONNECT && t_state.method != HTTP_WKSIDX_POST && t_state.method != HTTP_WKSIDX_PUT) { DebugSM("http", "calling netProcessor.connect_re"); connect_action_handle = netProcessor.connect_re(this, // state machine &t_state.current.server->dst_addr.sa, // addr + port &opt); } else { - // CONNECT method + // The request transform would be applied to POST and/or PUT request. + // The server_vc should be established (writeable) before request transform start. + // The CheckConnect is created by connect_s, + // It will callback NET_EVENT_OPEN to HttpSM if server_vc is WRITE_READY, + // Otherwise NET_EVENT_OPEN_FAILED is callbacked. MgmtInt connect_timeout; - ink_assert(t_state.method == HTTP_WKSIDX_CONNECT); + ink_assert(t_state.method == HTTP_WKSIDX_CONNECT || t_state.method == HTTP_WKSIDX_POST || t_state.method == HTTP_WKSIDX_PUT); // Set the inactivity timeout to the connect timeout so that we // we fail this server if it doesn't start sending the response // header - if (t_state.current.server == &t_state.parent_info) { - connect_timeout = t_state.http_config_param->parent_connect_timeout; + if (t_state.method == HTTP_WKSIDX_POST || t_state.method == HTTP_WKSIDX_PUT) { + connect_timeout = t_state.txn_conf->post_connect_attempts_timeout; + } else if (t_state.current.server == &t_state.parent_info) { + connect_timeout = t_state.txn_conf->parent_connect_timeout; } else if (t_state.pCongestionEntry != nullptr) { connect_timeout = t_state.pCongestionEntry->connect_timeout(); } else { @@ -5716,7 +5790,8 @@ HttpSM::do_setup_post_tunnel(HttpVC_t to_vc_type) // If we're half closed, we got a FIN from the client. Forward it on to the origin server // now that we have the tunnel operational. - if (ua_session->get_half_close_flag()) { + // HttpTunnel could broken due to bad chunked data and close all vc by chain_abort_all(). + if (p->handler_state != HTTP_SM_POST_UA_FAIL && ua_session->get_half_close_flag()) { p->vc->do_io_shutdown(IO_SHUTDOWN_READ); } } @@ -5967,7 +6042,7 @@ HttpSM::attach_server_session(HttpServerSession *s) if (t_state.method == HTTP_WKSIDX_POST || t_state.method == HTTP_WKSIDX_PUT) { connect_timeout = t_state.txn_conf->post_connect_attempts_timeout; } else if (t_state.current.server == &t_state.parent_info) { - connect_timeout = t_state.http_config_param->parent_connect_timeout; + connect_timeout = t_state.txn_conf->parent_connect_timeout; } else { connect_timeout = t_state.txn_conf->connect_attempts_timeout; } @@ -6226,10 +6301,10 @@ HttpSM::setup_100_continue_transfer() void HttpSM::setup_error_transfer() { - if (t_state.internal_msg_buffer) { + if (t_state.internal_msg_buffer || is_response_body_precluded(t_state.http_return_code)) { // Since we need to send the error message, call the API // function - ink_assert(t_state.internal_msg_buffer_size > 0); + ink_assert(t_state.internal_msg_buffer_size > 0 || is_response_body_precluded(t_state.http_return_code)); t_state.api_next_action = HttpTransact::SM_ACTION_API_SEND_RESPONSE_HDR; do_api_callout(); } else { @@ -6324,7 +6399,7 @@ HttpSM::setup_internal_transfer(HttpSMHandler handler_arg) // Clear the decks before we setup the new producers // As things stand, we cannot have two static producers operating at // once - tunnel.kill_tunnel(); + tunnel.reset(); // Setup the tunnel to the client HttpTunnelProducer *p = @@ -6933,6 +7008,7 @@ HttpSM::kill_this() DebugSM("http", "[%" PRId64 "] deallocating sm", sm_id); // authAdapter.destroyState(); + HTTP_DECREMENT_DYN_STAT(http_current_client_transactions_stat); destroy(); } } @@ -7064,6 +7140,7 @@ HttpSM::update_stats() "dns_lookup_begin: %.3f " "dns_lookup_end: %.3f " "server_connect: %.3f " + "server_connect_end: %.3f " "server_first_read: %.3f " "server_read_header_done: %.3f " "server_close: %.3f " @@ -7082,6 +7159,7 @@ HttpSM::update_stats() milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_DNS_LOOKUP_BEGIN), milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_DNS_LOOKUP_END), milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_SERVER_CONNECT), + milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_SERVER_CONNECT_END), milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_SERVER_FIRST_READ), milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_SERVER_READ_HEADER_DONE), milestones.difference_sec(TS_MILESTONE_SM_START, TS_MILESTONE_SERVER_CLOSE), @@ -7236,7 +7314,14 @@ HttpSM::set_next_state() do_remap_request(true); /* run inline */ DebugSM("url_rewrite", "completed inline remapping request for [%" PRId64 "]", sm_id); t_state.url_remap_success = remapProcessor.finish_remap(&t_state); - call_transact_and_set_next_state(nullptr); + if (t_state.next_action == HttpTransact::SM_ACTION_SEND_ERROR_CACHE_NOOP && t_state.transact_return_point == nullptr) { + // It appears that we can now set the next_action to error and transact_return_point to nullptr when + // going through do_remap_request presumably due to a plugin setting an error. In that case, it seems + // that the error message has already been setup, so we can just return and avoid the further + // call_transact_and_set_next_state + } else { + call_transact_and_set_next_state(nullptr); + } } else { HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_remap_request); do_remap_request(false); /* dont run inline (iow on another thread) */ @@ -7265,7 +7350,7 @@ HttpSM::set_next_state() DebugSM("dns", "[HttpTransact::HandleRequest] Skipping DNS lookup for %s because it's loopback", t_state.dns_info.lookup_name); t_state.dns_info.lookup_success = true; - call_transact_and_set_next_state(NULL); + call_transact_and_set_next_state(nullptr); break; } else if (url_remap_mode == HttpTransact::URL_REMAP_FOR_OS && t_state.first_dns_lookup) { DebugSM("cdn", "Skipping DNS Lookup"); @@ -7325,6 +7410,32 @@ HttpSM::set_next_state() HTTP_SM_SET_DEFAULT_HANDLER(&HttpSM::state_hostdb_lookup); + // We need to close the previous attempt + // Because it could be a server side retry by DNS rr + if (server_entry) { + ink_assert(server_entry->vc_type == HTTP_SERVER_VC); + vc_table.cleanup_entry(server_entry); + server_entry = nullptr; + server_session = nullptr; + } else { + // Now that we have gotten the user agent request, we can cancel + // the inactivity timeout associated with it. Note, however, that + // we must not cancel the inactivity timeout if the message + // contains a body (as indicated by the non-zero request_content_length + // field). This indicates that a POST operation is taking place and + // that the client is still sending data to the origin server. The + // origin server cannot reply until the entire request is received. In + // light of this dependency, TS must ensure that the client finishes + // sending its request and for this reason, the inactivity timeout + // cannot be cancelled. + if (ua_session && !t_state.hdr_info.request_content_length) { + ua_session->cancel_inactivity_timeout(); + } else if (!ua_session) { + terminate_sm = true; + return; // Give up if there is no session + } + } + ink_assert(t_state.dns_info.looking_up != HttpTransact::UNDEFINED_LOOKUP); do_hostdb_lookup(); break; @@ -7677,7 +7788,7 @@ void HttpSM::do_redirect() { DebugSM("http_redirect", "[HttpSM::do_redirect]"); - if (!enable_redirection || redirection_tries >= t_state.txn_conf->number_of_redirections) { + if (!enable_redirection || redirection_tries > t_state.txn_conf->number_of_redirections) { tunnel.deallocate_redirect_postdata_buffers(); return; } @@ -7826,6 +7937,8 @@ HttpSM::redirect_request(const char *redirect_url, const int redirect_len) // we have a new OS and need to have DNS lookup the new OS t_state.dns_info.lookup_success = false; t_state.force_dns = false; + t_state.server_info.clear(); + t_state.parent_info.clear(); if (t_state.txn_conf->cache_http) { t_state.cache_info.object_read = nullptr; @@ -8021,14 +8134,13 @@ HttpSM::is_redirect_required() // Fill in the client protocols used. Return the number of entries returned int -HttpSM::populate_client_protocol(const char **result, int n) const +HttpSM::populate_client_protocol(ts::StringView *result, int n) const { int retval = 0; if (n > 0) { - const char *proto = HttpSM::find_proto_string(t_state.hdr_info.client_request.version_get()); + StringView proto = HttpSM::find_proto_string(t_state.hdr_info.client_request.version_get()); if (proto) { - result[0] = proto; - retval = 1; + result[retval++] = proto; if (n > retval && ua_session) { retval += ua_session->populate_protocol(result + retval, n - retval); } @@ -8039,29 +8151,28 @@ HttpSM::populate_client_protocol(const char **result, int n) const // Look for a specific protocol const char * -HttpSM::client_protocol_contains(const char *tag_prefix) const +HttpSM::client_protocol_contains(StringView tag_prefix) const { const char *retval = nullptr; - const char *proto = HttpSM::find_proto_string(t_state.hdr_info.client_request.version_get()); + StringView proto = HttpSM::find_proto_string(t_state.hdr_info.client_request.version_get()); if (proto) { - unsigned int tag_prefix_len = strlen(tag_prefix); - if (tag_prefix_len <= strlen(proto) && strncmp(tag_prefix, proto, tag_prefix_len) == 0) { - retval = proto; + StringView prefix(tag_prefix); + if (prefix.size() <= proto.size() && 0 == strncmp(proto.ptr(), prefix.ptr(), prefix.size())) { + retval = proto.ptr(); } else if (ua_session) { - retval = ua_session->protocol_contains(tag_prefix); + retval = ua_session->protocol_contains(prefix); } } return retval; } -const char * +StringView HttpSM::find_proto_string(HTTPVersion version) const { if (version == HTTPVersion(1, 1)) { - return TS_PROTO_TAG_HTTP_1_1; + return IP_PROTO_TAG_HTTP_1_1; } else if (version == HTTPVersion(1, 0)) { - return TS_PROTO_TAG_HTTP_1_0; - } else { - return nullptr; + return IP_PROTO_TAG_HTTP_1_0; } + return nullptr; } diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h index 7d7e1b83388..e9bd219b3cd 100644 --- a/proxy/http/HttpSM.h +++ b/proxy/http/HttpSM.h @@ -41,6 +41,7 @@ #include "InkAPIInternal.h" #include "../ProxyClientTransaction.h" #include "HdrUtils.h" +#include //#include "AuthHttpAdapter.h" /* Enable LAZY_BUF_ALLOC to delay allocation of buffers until they @@ -263,9 +264,12 @@ class HttpSM : public Continuation bool is_private(); bool is_redirect_required(); - int populate_client_protocol(const char **result, int n) const; - const char *client_protocol_contains(const char *tag_prefix) const; - const char *find_proto_string(HTTPVersion version) const; + /// Get the protocol stack for the inbound (client, user agent) connection. + /// @arg result [out] Array to store the results + /// @arg n [in] Size of the array @a result. + int populate_client_protocol(ts::StringView *result, int n) const; + const char *client_protocol_contains(ts::StringView tag_prefix) const; + ts::StringView find_proto_string(HTTPVersion version) const; int64_t sm_id; unsigned int magic; diff --git a/proxy/http/HttpServerSession.cc b/proxy/http/HttpServerSession.cc index 3bae75d3d5e..e7db34d79a1 100644 --- a/proxy/http/HttpServerSession.cc +++ b/proxy/http/HttpServerSession.cc @@ -92,14 +92,8 @@ HttpServerSession::new_connection(NetVConnection *new_vc) buf_reader = read_buffer->alloc_reader(); Debug("http_ss", "[%" PRId64 "] session born, netvc %p", con_id, new_vc); state = HSS_INIT; - RecString congestion_control_out; - if (REC_ReadConfigStringAlloc(congestion_control_out, "proxy.config.net.tcp_congestion_control_out") == REC_ERR_OKAY) { - int len = strlen(congestion_control_out); - if (len > 0) { - new_vc->set_tcp_congestion_control(congestion_control_out, len); - } - ats_free(congestion_control_out); - } + + new_vc->set_tcp_congestion_control(SERVER_SIDE); } VIO * diff --git a/proxy/http/HttpServerSession.h b/proxy/http/HttpServerSession.h index 5400d4eefa1..f110ed3e7fb 100644 --- a/proxy/http/HttpServerSession.h +++ b/proxy/http/HttpServerSession.h @@ -180,6 +180,20 @@ class HttpServerSession : public VConnection // an asyncronous cancel on NT MIOBuffer *read_buffer; + virtual int + populate_protocol(ts::StringView *result, int size) const + { + auto vc = this->get_netvc(); + return vc ? vc->populate_protocol(result, size) : 0; + } + + virtual const char * + protocol_contains(ts::StringView tag_prefix) const + { + auto vc = this->get_netvc(); + return vc ? vc->protocol_contains(tag_prefix) : nullptr; + } + private: HttpServerSession(HttpServerSession &); diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc index 6113b8badf6..9acb881ccc7 100644 --- a/proxy/http/HttpTransact.cc +++ b/proxy/http/HttpTransact.cc @@ -24,14 +24,14 @@ #include "ts/ink_platform.h" #include -#include +#include #include "HttpTransact.h" #include "HttpTransactHeaders.h" #include "HttpSM.h" #include "HttpCacheSM.h" //Added to get the scope of HttpCacheSM object - YTS Team, yamsat #include "HttpDebugNames.h" -#include "time.h" +#include #include "ts/ParseRules.h" #include "HTTP.h" #include "HdrUtils.h" @@ -260,7 +260,8 @@ find_server_and_update_current_info(HttpTransact::State *s) // wanted it for all requests to local_host. s->parent_result.result = PARENT_DIRECT; } else if (s->method == HTTP_WKSIDX_CONNECT && s->http_config_param->disable_ssl_parenting) { - s->parent_params->findParent(&s->request_data, &s->parent_result); + s->parent_params->findParent(&s->request_data, &s->parent_result, s->txn_conf->parent_fail_threshold, + s->txn_conf->parent_retry_time); if (!s->parent_result.is_some() || s->parent_result.is_api_result() || s->parent_result.parent_is_proxy()) { DebugTxn("http_trans", "request not cacheable, so bypass parent"); s->parent_result.result = PARENT_DIRECT; @@ -273,7 +274,8 @@ find_server_and_update_current_info(HttpTransact::State *s) // we are assuming both child and parent have similar configuration // with respect to whether a request is cacheable or not. // For example, the cache_urls_that_look_dynamic variable. - s->parent_params->findParent(&s->request_data, &s->parent_result); + s->parent_params->findParent(&s->request_data, &s->parent_result, s->txn_conf->parent_fail_threshold, + s->txn_conf->parent_retry_time); if (!s->parent_result.is_some() || s->parent_result.is_api_result() || s->parent_result.parent_is_proxy()) { DebugTxn("http_trans", "request not cacheable, so bypass parent"); s->parent_result.result = PARENT_DIRECT; @@ -281,10 +283,12 @@ find_server_and_update_current_info(HttpTransact::State *s) } else { switch (s->parent_result.result) { case PARENT_UNDEFINED: - s->parent_params->findParent(&s->request_data, &s->parent_result); + s->parent_params->findParent(&s->request_data, &s->parent_result, s->txn_conf->parent_fail_threshold, + s->txn_conf->parent_retry_time); break; case PARENT_SPECIFIED: - s->parent_params->nextParent(&s->request_data, &s->parent_result); + s->parent_params->nextParent(&s->request_data, &s->parent_result, s->txn_conf->parent_fail_threshold, + s->txn_conf->parent_retry_time); // Hack! // We already have a parent that failed, if we are now told @@ -577,8 +581,8 @@ HttpTransact::Forbidden(State *s) DebugTxn("http_trans", "[Forbidden]" "IpAllow marked request forbidden"); bootstrap_state_variables_from_request(s, &s->hdr_info.client_request); - build_error_response(s, HTTP_STATUS_FORBIDDEN, "Access Denied", "access#denied", NULL); - TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, NULL); + build_error_response(s, HTTP_STATUS_FORBIDDEN, "Access Denied", "access#denied", nullptr); + TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, nullptr); } void @@ -719,7 +723,7 @@ HttpTransact::StartRemapRequest(State *s) if (is_debug_tag_set("http_chdr_describe") || is_debug_tag_set("http_trans")) { DebugTxn("http_trans", "Before Remapping:"); - obj_describe(s->hdr_info.client_request.m_http, 1); + obj_describe(s->hdr_info.client_request.m_http, true); } if (url_remap_mode == URL_REMAP_DEFAULT || url_remap_mode == URL_REMAP_ALL) { @@ -758,11 +762,21 @@ HttpTransact::EndRemapRequest(State *s) //////////////////////////////////////////////////////////////// if (s->remap_redirect != nullptr) { SET_VIA_STRING(VIA_DETAIL_TUNNEL, VIA_DETAIL_TUNNEL_NO_FORWARD); - if (s->http_return_code == HTTP_STATUS_MOVED_PERMANENTLY) { - build_error_response(s, HTTP_STATUS_MOVED_PERMANENTLY, "Redirect", "redirect#moved_permanently", nullptr); - } else { - build_error_response(s, HTTP_STATUS_MOVED_TEMPORARILY, "Redirect", "redirect#moved_temporarily", nullptr); + const char *error_body_type; + switch (s->http_return_code) { + case HTTP_STATUS_MOVED_PERMANENTLY: + case HTTP_STATUS_PERMANENT_REDIRECT: + error_body_type = "redirect#moved_permanently"; + break; + case HTTP_STATUS_MOVED_TEMPORARILY: + case HTTP_STATUS_TEMPORARY_REDIRECT: + error_body_type = "redirect#moved_temporarily"; + break; + default: + Warning("Invalid status code for redirect '%d'. Building a response for a temporary redirect.", s->http_return_code); + error_body_type = "redirect#moved_temporarily"; } + build_error_response(s, s->http_return_code, "Redirect", error_body_type, nullptr); ats_free(s->remap_redirect); s->reverse_proxy = false; goto done; @@ -869,7 +883,7 @@ HttpTransact::EndRemapRequest(State *s) if (is_debug_tag_set("http_chdr_describe") || is_debug_tag_set("http_trans") || is_debug_tag_set("url_rewrite")) { DebugTxn("http_trans", "After Remapping:"); - obj_describe(s->hdr_info.client_request.m_http, 1); + obj_describe(s->hdr_info.client_request.m_http, true); } /* @@ -1050,8 +1064,8 @@ void HttpTransact::ModifyRequest(State *s) { int scheme, hostname_len; - const char *hostname; - HTTPHdr &request = s->hdr_info.client_request; + HTTPHdr &request = s->hdr_info.client_request; + static const int PORT_PADDING = 8; DebugTxn("http_trans", "START HttpTransact::ModifyRequest"); @@ -1085,10 +1099,15 @@ HttpTransact::ModifyRequest(State *s) // The solution should be to move the scheme detecting logic in to // the header class, rather than doing it in a random bit of // external code. - hostname = request.host_get(&hostname_len); + const char *buf = request.host_get(&hostname_len); if (!request.is_target_in_url()) { s->hdr_info.client_req_is_server_style = true; } + // Copy out buf to a hostname just in case its heap header memory is freed during coalescing + // due to later HdrHeap operations + char *hostname = (char *)alloca(hostname_len + PORT_PADDING); + memcpy(hostname, buf, hostname_len); + // Make clang analyzer happy. hostname is non-null iff request.is_target_in_url(). ink_assert(hostname || s->hdr_info.client_req_is_server_style); @@ -1102,17 +1121,12 @@ HttpTransact::ModifyRequest(State *s) if ((max_forwards != 0) && !s->hdr_info.client_req_is_server_style && s->method != HTTP_WKSIDX_CONNECT) { MIMEField *host_field = request.field_find(MIME_FIELD_HOST, MIME_LEN_HOST); - int host_val_len = hostname_len; - const char *host_val = hostname; - int port = url->port_get_raw(); - char *buf = nullptr; + in_port_t port = url->port_get_raw(); // Form the host:port string if not a default port (e.g. 80) + // We allocated extra space for the port above if (port > 0) { - buf = static_cast(alloca(host_val_len + 15)); - memcpy(buf, hostname, host_val_len); - host_val_len += snprintf(buf + host_val_len, 15, ":%d", port); - host_val = buf; + hostname_len += snprintf(hostname + hostname_len, PORT_PADDING, ":%u", port); } // No host_field means not equal to host and will need to be set, so create it now. @@ -1121,8 +1135,8 @@ HttpTransact::ModifyRequest(State *s) request.field_attach(host_field); } - if (!mimefield_value_equal(host_field, host_val, host_val_len)) { - request.field_value_set(host_field, host_val, host_val_len); + if (mimefield_value_equal(host_field, hostname, hostname_len) == false) { + request.field_value_set(host_field, hostname, hostname_len); request.mark_target_dirty(); } } @@ -1212,7 +1226,7 @@ HttpTransact::HandleRequest(State *s) DebugTxn("http_seq", "[HttpTransact::HandleRequest] request valid."); if (is_debug_tag_set("http_chdr_describe")) { - obj_describe(s->hdr_info.client_request.m_http, 1); + obj_describe(s->hdr_info.client_request.m_http, true); } // at this point we are guaranteed that the request is good and acceptable. @@ -1349,7 +1363,7 @@ HttpTransact::HandleRequest(State *s) if (s->dns_info.lookup_name[0] <= '9' && s->dns_info.lookup_name[0] >= '0' && (!s->state_machine->enable_redirection || !s->redirect_info.redirect_in_process) && s->parent_params->parent_table->hostMatch) { - s->force_dns = 1; + s->force_dns = true; } /* A redirect means we need to check some things again. If the cache is enabled then we need to check the new (redirected) request against the cache. @@ -1490,12 +1504,15 @@ HttpTransact::PPDNSLookup(State *s) if (!s->dns_info.lookup_success) { // Mark parent as down due to resolving failure HTTP_INCREMENT_DYN_STAT(http_total_parent_marked_down_count); - s->parent_params->markParentDown(&s->parent_result); + s->parent_params->markParentDown(&s->parent_result, s->txn_conf->parent_fail_threshold, s->txn_conf->parent_retry_time); // DNS lookup of parent failed, find next parent or o.s. find_server_and_update_current_info(s); if (!s->current.server->dst_addr.isValid()) { if (s->current.request_to == PARENT_PROXY) { TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup); + } else if (s->parent_result.result == PARENT_DIRECT && s->http_config_param->no_dns_forward_to_parent != 1) { + // We ran out of parents but parent configuration allows us to go to Origin Server directly + TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); } else { // We could be out of parents here if all the parents failed DNS lookup ink_assert(s->current.request_to == HOST_NONE); @@ -1712,7 +1729,7 @@ HttpTransact::OSDNSLookup(State *s) // If the SRV response has a port number, we should honor it. Otherwise we do the port defined in remap if (s->dns_info.srv_lookup_success) { s->server_info.dst_addr.port() = htons(s->dns_info.srv_port); - } else { + } else if (!s->api_server_addr_set) { s->server_info.dst_addr.port() = htons(s->hdr_info.client_request.port_get()); // now we can set the port. } ats_ip_copy(&s->request_data.dest_ip, &s->server_info.dst_addr); @@ -1980,6 +1997,9 @@ HttpTransact::LookupSkipOpenServer(State *s) if (s->current.request_to == PARENT_PROXY) { TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup); + } else if (s->parent_result.result == PARENT_FAIL) { + handle_parent_died(s); + return; } ink_assert(s->current.request_to == ORIGIN_SERVER); @@ -2178,13 +2198,10 @@ HttpTransact::HandleCacheOpenRead(State *s) // cache miss DebugTxn("http_trans", "CacheOpenRead -- miss"); SET_VIA_STRING(VIA_DETAIL_CACHE_LOOKUP, VIA_DETAIL_MISS_NOT_CACHED); - // StartAccessControl(s); - if (s->force_dns) { - HandleCacheOpenReadMiss(s); - } else { - // Cache Lookup Unsuccessful ..calling dns lookup - TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); - } + // Perform DNS for the origin when it is required. + // 1. If parent configuration does not allow to go to origin there is no need of performing DNS + // 2. If parent satisfies the request there is no need to go to origin to perfrom DNS + HandleCacheOpenReadMiss(s); } else { // cache hit DebugTxn("http_trans", "CacheOpenRead -- hit"); @@ -2686,36 +2703,40 @@ HttpTransact::HandleCacheOpenReadHit(State *s) } if (server_up || s->stale_icp_lookup) { - bool check_hostdb = get_ka_info_from_config(s, s->current.server); - DebugTxn("http_trans", "CacheOpenReadHit - check_hostdb %d", check_hostdb); - if (!s->stale_icp_lookup && (check_hostdb || !s->current.server->dst_addr.isValid())) { - // ink_release_assert(s->current.request_to == PARENT_PROXY || - // s->http_config_param->no_dns_forward_to_parent != 0); - - // We must be going a PARENT PROXY since so did - // origin server DNS lookup right after state Start - // - // If we end up here in the release case just fall - // through. The request will fail because of the - // missing ip but we won't take down the system - // - if (s->current.request_to == PARENT_PROXY) { - // Set ourselves up to handle pending revalidate issues - // after the PP DNS lookup - ink_assert(s->pending_work == nullptr); - s->pending_work = issue_revalidate; - - TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup); - } else if (s->current.request_to == ORIGIN_SERVER) { - TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); - } else { - handle_parent_died(s); - return; + // set a default version for the outgoing request + HTTPVersion http_version; + + if (s->current.server != nullptr) { + bool check_hostdb = get_ka_info_from_config(s, s->current.server); + DebugTxn("http_trans", "CacheOpenReadHit - check_hostdb %d", check_hostdb); + if (!s->stale_icp_lookup && (check_hostdb || !s->current.server->dst_addr.isValid())) { + // We must be going a PARENT PROXY since so did + // origin server DNS lookup right after state Start + // + // If we end up here in the release case just fall + // through. The request will fail because of the + // missing ip but we won't take down the system + // + if (s->current.request_to == PARENT_PROXY) { + // Set ourselves up to handle pending revalidate issues + // after the PP DNS lookup + ink_assert(s->pending_work == nullptr); + s->pending_work = issue_revalidate; + + TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup); + } else if (s->current.request_to == ORIGIN_SERVER) { + TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); + } else { + handle_parent_died(s); + return; + } } + // override the default version with what the server has + http_version = s->current.server->http_version; } - DebugTxn("http_trans", "CacheOpenReadHit - version %d", s->current.server->http_version.m_version); - build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version); + DebugTxn("http_trans", "CacheOpenReadHit - version %d", http_version.m_version); + build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, http_version); issue_revalidate(s); @@ -3127,7 +3148,12 @@ HttpTransact::HandleCacheOpenReadMiss(State *s) return; } if (!s->current.server->dst_addr.isValid()) { - ink_release_assert(s->current.request_to == PARENT_PROXY || s->http_config_param->no_dns_forward_to_parent != 0); + ink_release_assert(s->parent_result.result == PARENT_DIRECT || s->current.request_to == PARENT_PROXY || + s->http_config_param->no_dns_forward_to_parent != 0); + if (s->parent_result.result == PARENT_DIRECT && s->http_config_param->no_dns_forward_to_parent != 1) { + TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); + return; + } if (s->current.request_to == PARENT_PROXY) { TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, HttpTransact::PPDNSLookup); } else { @@ -3136,8 +3162,11 @@ HttpTransact::HandleCacheOpenReadMiss(State *s) } } build_request(s, &s->hdr_info.client_request, &s->hdr_info.server_request, s->current.server->http_version); - - s->next_action = how_to_open_connection(s); + s->current.attempts = 0; + s->next_action = how_to_open_connection(s); + if (s->current.server == &s->server_info && s->next_hop_scheme == URL_WKSIDX_HTTP) { + HttpTransactHeaders::remove_host_name_from_url(&s->hdr_info.server_request); + } } else { // miss, but only-if-cached is set build_error_response(s, HTTP_STATUS_GATEWAY_TIMEOUT, "Not Cached", "cache#not_in_cache", nullptr); s->next_action = SM_ACTION_SEND_ERROR_CACHE_NOOP; @@ -3560,7 +3589,7 @@ HttpTransact::handle_response_from_parent(State *s) DebugTxn("http_trans", "PARENT_RETRY_UNAVAILABLE_SERVER: marking parent down and trying another."); s->current.retry_type = PARENT_RETRY_NONE; HTTP_INCREMENT_DYN_STAT(http_total_parent_marked_down_count); - s->parent_params->markParentDown(&s->parent_result); + s->parent_params->markParentDown(&s->parent_result, s->txn_conf->parent_fail_threshold, s->txn_conf->parent_retry_time); next_lookup = find_server_and_update_current_info(s); } } @@ -3571,7 +3600,11 @@ HttpTransact::handle_response_from_parent(State *s) ink_assert(s->hdr_info.server_request.valid()); s->current.server->connect_result = ENOTCONN; - s->state_machine->do_hostdb_update_if_necessary(); + // only mark the parent down in hostdb if the configuration allows it, + // see proxy.config.http.parent_proxy.mark_down_hostdb in records.config. + if (s->txn_conf->parent_failures_update_hostdb) { + s->state_machine->do_hostdb_update_if_necessary(); + } char addrbuf[INET6_ADDRSTRLEN]; DebugTxn("http_trans", "[%d] failed to connect to parent %s", s->current.attempts, @@ -3580,7 +3613,7 @@ HttpTransact::handle_response_from_parent(State *s) // If the request is not retryable, just give up! if (!is_request_retryable(s)) { HTTP_INCREMENT_DYN_STAT(http_total_parent_marked_down_count); - s->parent_params->markParentDown(&s->parent_result); + s->parent_params->markParentDown(&s->parent_result, s->txn_conf->parent_fail_threshold, s->txn_conf->parent_retry_time); s->parent_result.result = PARENT_FAIL; handle_parent_died(s); return; @@ -3591,12 +3624,12 @@ HttpTransact::handle_response_from_parent(State *s) s->current.attempts++; // Are we done with this particular parent? - if ((s->current.attempts - 1) % s->http_config_param->per_parent_connect_attempts != 0) { + if ((s->current.attempts - 1) % s->txn_conf->per_parent_connect_attempts != 0) { // No we are not done with this parent so retry HTTP_INCREMENT_DYN_STAT(http_total_parent_switches_stat); s->next_action = how_to_open_connection(s); DebugTxn("http_trans", "%s Retrying parent for attempt %d, max %" PRId64, "[handle_response_from_parent]", - s->current.attempts, s->http_config_param->per_parent_connect_attempts); + s->current.attempts, s->txn_conf->per_parent_connect_attempts); return; } else { DebugTxn("http_trans", "%s %d per parent attempts exhausted", "[handle_response_from_parent]", s->current.attempts); @@ -3607,7 +3640,7 @@ HttpTransact::handle_response_from_parent(State *s) // us to mark the parent down if (s->current.state == CONNECTION_ERROR) { HTTP_INCREMENT_DYN_STAT(http_total_parent_marked_down_count); - s->parent_params->markParentDown(&s->parent_result); + s->parent_params->markParentDown(&s->parent_result, s->txn_conf->parent_fail_threshold, s->txn_conf->parent_retry_time); } // We are done so look for another parent if any next_lookup = find_server_and_update_current_info(s); @@ -3619,7 +3652,7 @@ HttpTransact::handle_response_from_parent(State *s) DebugTxn("http_trans", "[handle_response_from_parent] Error. No more retries."); if (s->current.state == CONNECTION_ERROR) { HTTP_INCREMENT_DYN_STAT(http_total_parent_marked_down_count); - s->parent_params->markParentDown(&s->parent_result); + s->parent_params->markParentDown(&s->parent_result, s->txn_conf->parent_fail_threshold, s->txn_conf->parent_retry_time); } s->parent_result.result = PARENT_FAIL; next_lookup = find_server_and_update_current_info(s); @@ -3634,11 +3667,8 @@ HttpTransact::handle_response_from_parent(State *s) TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, PPDNSLookup); break; case ORIGIN_SERVER: - s->current.attempts = 0; - s->next_action = how_to_open_connection(s); - if (s->current.server == &s->server_info && s->next_hop_scheme == URL_WKSIDX_HTTP) { - HttpTransactHeaders::remove_host_name_from_url(&s->hdr_info.server_request); - } + // Next lookup is Origin Server, try DNS for Origin Server + TRANSACT_RETURN(SM_ACTION_DNS_LOOKUP, OSDNSLookup); break; case HOST_NONE: handle_parent_died(s); @@ -6487,9 +6517,12 @@ HttpTransact::is_request_valid(State *s, HTTPHdr *incoming_request) bool HttpTransact::is_request_retryable(State *s) { + // If safe requests are retryable, it should be safe to retry safe requests irrespective of bytes sent or connection state + // according to RFC the following methods are safe (https://tools.ietf.org/html/rfc7231#section-4.2.1) // If there was no error establishing the connection (and we sent bytes)-- we cannot retry - if (s->current.state != CONNECTION_ERROR && s->state_machine->server_request_hdr_bytes > 0 && - s->state_machine->get_server_session()->get_netvc()->outstanding() != s->state_machine->server_request_hdr_bytes) { + if (!(s->txn_conf->safe_requests_retryable && HttpTransactHeaders::is_method_safe(s->method)) && + (s->current.state != CONNECTION_ERROR && s->state_machine->server_request_hdr_bytes > 0 && + s->state_machine->get_server_session()->get_netvc()->outstanding() != s->state_machine->server_request_hdr_bytes)) { return false; } @@ -7967,7 +8000,7 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing field = base_response->field_find(fields[i].name, fields[i].len); ink_assert(field != nullptr); value = field->value_get(&len); - outgoing_response->value_append(fields[i].name, fields[i].len, value, len, 0); + outgoing_response->value_append(fields[i].name, fields[i].len, value, len, false); } } } @@ -8025,11 +8058,6 @@ HttpTransact::build_response(State *s, HTTPHdr *base_response, HTTPHdr *outgoing HttpTransactHeaders::add_server_header_to_response(s->txn_conf, outgoing_response); - // auth-response update - // if (!s->state_machine->authAdapter.disabled()) { - // s->state_machine->authAdapter.UpdateResponseHeaders(outgoing_response); - // } - if (!s->cop_test_page && is_debug_tag_set("http_hdrs")) { if (base_response) { DUMP_HEADER("http_hdrs", base_response, s->state_machine_id, "Base Header for Building Response"); @@ -8185,8 +8213,8 @@ HttpTransact::build_error_response(State *s, HTTPStatus status_code, const char s->hdr_info.client_response.field_delete(MIME_FIELD_EXPIRES, MIME_LEN_EXPIRES); s->hdr_info.client_response.field_delete(MIME_FIELD_LAST_MODIFIED, MIME_LEN_LAST_MODIFIED); - if ((status_code == HTTP_STATUS_TEMPORARY_REDIRECT || status_code == HTTP_STATUS_MOVED_TEMPORARILY || - status_code == HTTP_STATUS_MOVED_PERMANENTLY) && + if ((status_code == HTTP_STATUS_PERMANENT_REDIRECT || status_code == HTTP_STATUS_TEMPORARY_REDIRECT || + status_code == HTTP_STATUS_MOVED_TEMPORARILY || status_code == HTTP_STATUS_MOVED_PERMANENTLY) && s->remap_redirect) { s->hdr_info.client_response.value_set(MIME_FIELD_LOCATION, MIME_LEN_LOCATION, s->remap_redirect, strlen(s->remap_redirect)); } @@ -8217,9 +8245,14 @@ HttpTransact::build_error_response(State *s, HTTPStatus status_code, const char s->internal_msg_buffer_size = len; s->internal_msg_buffer_fast_allocator_size = -1; - s->hdr_info.client_response.value_set(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE, body_type, strlen(body_type)); - s->hdr_info.client_response.value_set(MIME_FIELD_CONTENT_LANGUAGE, MIME_LEN_CONTENT_LANGUAGE, body_language, - strlen(body_language)); + if (len > 0) { + s->hdr_info.client_response.value_set(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE, body_type, strlen(body_type)); + s->hdr_info.client_response.value_set(MIME_FIELD_CONTENT_LANGUAGE, MIME_LEN_CONTENT_LANGUAGE, body_language, + strlen(body_language)); + } else { + s->hdr_info.client_response.field_delete(MIME_FIELD_CONTENT_TYPE, MIME_LEN_CONTENT_TYPE); + s->hdr_info.client_response.field_delete(MIME_FIELD_CONTENT_LANGUAGE, MIME_LEN_CONTENT_LANGUAGE); + } //////////////////////////////////////// // log a description in the error log // @@ -8344,6 +8377,8 @@ HttpTransact::get_error_string(int erno) // when HttpSM.cc::state_origin_server_read_response // receives an HTTP_EVENT_EOS. (line 1729 in HttpSM.cc, // version 1.145.2.13.2.57) + case ENET_CONNECT_FAILED: + return ("connect failed"); case UNKNOWN_INTERNAL_ERROR: return ("internal error - server connection terminated"); default: @@ -8353,7 +8388,7 @@ HttpTransact::get_error_string(int erno) } ink_time_t -ink_cluster_time(void) +ink_cluster_time() { int highest_delta; ink_time_t local_time; diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h index 9049891443a..a0b0eabdb5a 100644 --- a/proxy/http/HttpTransact.h +++ b/proxy/http/HttpTransact.h @@ -702,8 +702,14 @@ class HttpTransact port_attribute(HttpProxyPort::TRANSPORT_DEFAULT), is_transparent(false) { - memset(&src_addr, 0, sizeof(src_addr)); - memset(&dst_addr, 0, sizeof(dst_addr)); + clear(); + } + void + clear() + { + ink_zero(src_addr); + ink_zero(dst_addr); + connect_result = 0; } }; @@ -1342,23 +1348,31 @@ class HttpTransact typedef void (*TransactEntryFunc_t)(HttpTransact::State *s); +//////////////////////////////////////////////////////// +// the spec says about message body the following: // +// All responses to the HEAD request method MUST NOT // +// include a message-body, even though the presence // +// of entity-header fields might lead one to believe // +// they do. All 1xx (informational), 204 (no content),// +// and 304 (not modified) responses MUST NOT include // +// a message-body. // +//////////////////////////////////////////////////////// inline bool -is_response_body_precluded(HTTPStatus status_code, int method) +is_response_body_precluded(HTTPStatus status_code) { - //////////////////////////////////////////////////////// - // the spec says about message body the following: // - // All responses to the HEAD request method MUST NOT // - // include a message-body, even though the presence // - // of entity-header fields might lead one to believe // - // they do. All 1xx (informational), 204 (no content),// - // and 304 (not modified) responses MUST NOT include // - // a message-body. // - //////////////////////////////////////////////////////// - if (((status_code != HTTP_STATUS_OK) && ((status_code == HTTP_STATUS_NOT_MODIFIED) || ((status_code < HTTP_STATUS_OK) && (status_code >= HTTP_STATUS_CONTINUE)) || - (status_code == 204))) || - (method == HTTP_WKSIDX_HEAD)) { + (status_code == HTTP_STATUS_NO_CONTENT)))) { + return true; + } else { + return false; + } +} + +inline bool +is_response_body_precluded(HTTPStatus status_code, int method) +{ + if ((method == HTTP_WKSIDX_HEAD) || is_response_body_precluded(status_code)) { return true; } else { return false; diff --git a/proxy/http/HttpTransactCache.cc b/proxy/http/HttpTransactCache.cc index c6a801c7a47..af1adb5c0ed 100644 --- a/proxy/http/HttpTransactCache.cc +++ b/proxy/http/HttpTransactCache.cc @@ -26,7 +26,7 @@ #include "HttpTransact.h" #include "HttpTransactHeaders.h" #include "HttpTransactCache.h" -#include "time.h" +#include #include "HTTP.h" #include "HttpCompat.h" #include "ts/InkErrno.h" diff --git a/proxy/http/HttpTransactHeaders.cc b/proxy/http/HttpTransactHeaders.cc index 7046f9542cb..db49bee3dbb 100644 --- a/proxy/http/HttpTransactHeaders.cc +++ b/proxy/http/HttpTransactHeaders.cc @@ -31,6 +31,9 @@ #include "I_Machine.h" +#include +#include + bool HttpTransactHeaders::is_method_cacheable(const HttpConfigParams *http_config_param, const int method) { @@ -74,6 +77,19 @@ HttpTransactHeaders::is_this_method_supported(int the_scheme, int the_method) } } +bool +HttpTransactHeaders::is_method_safe(int method) +{ + return (method == HTTP_WKSIDX_GET || method == HTTP_WKSIDX_OPTIONS || method == HTTP_WKSIDX_HEAD || method == HTTP_WKSIDX_TRACE); +} + +bool +HttpTransactHeaders::is_method_idempotent(int method) +{ + return (method == HTTP_WKSIDX_CONNECT || method == HTTP_WKSIDX_DELETE || method == HTTP_WKSIDX_GET || + method == HTTP_WKSIDX_HEAD || method == HTTP_WKSIDX_PUT || method == HTTP_WKSIDX_OPTIONS || method == HTTP_WKSIDX_TRACE); +} + void HttpTransactHeaders::insert_supported_methods_in_response(HTTPHdr *response, int scheme) { @@ -672,6 +688,54 @@ HttpTransactHeaders::insert_server_header_in_response(const char *server_tag, in } } +/// write the protocol stack to the @a via_string. +/// If @a detailed then do the full stack, otherwise just the "top level" protocol. +size_t +write_via_protocol_stack(char *via_string, size_t len, bool detailed, ts::StringView *proto_buf, int n_proto) +{ + char *via = via_string; // keep original pointer for size computation later. + char *limit = via_string + len; + static constexpr ts::StringView tls_prefix{"tls/", ts::StringView::literal}; + + if (n_proto <= 0 || via == nullptr || len <= 0) { + // nothing + } else if (detailed) { + for (ts::StringView *v = proto_buf, *v_limit = proto_buf + n_proto; v < v_limit && (via + v->size() + 1) < limit; ++v) { + if (v != proto_buf) { + *via++ = ' '; + } + memcpy(via, v->ptr(), v->size()); + via += v->size(); + } + } else { + ts::StringView *proto_end = proto_buf + n_proto; + bool http_1_0_p = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_1_0) != proto_end; + bool http_1_1_p = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_1_1) != proto_end; + + if ((http_1_0_p || http_1_1_p) && via + 10 < limit) { + bool tls_p = std::find_if(proto_buf, proto_end, [](ts::StringView tag) { return tls_prefix.isPrefixOf(tag); }) != proto_end; + bool http_2_p = std::find(proto_buf, proto_end, IP_PROTO_TAG_HTTP_2_0) != proto_end; + + memcpy(via, "http", 4); + via += 4; + if (tls_p) + *via++ = 's'; + *via++ = '/'; + if (http_2_p) { + *via++ = '2'; + } else if (http_1_0_p) { + memcpy(via, "1.0", 3); + via += 3; + } else if (http_1_1_p) { + memcpy(via, "1.1", 3); + via += 3; + } + *via++ = ' '; + } + } + return via - via_string; +} + /////////////////////////////////////////////////////////////////////////////// // Name : insert_via_header_in_request // Description: takes in existing via_string and inserts it in header @@ -721,6 +785,7 @@ HttpTransactHeaders::insert_via_header_in_request(HttpTransact::State *s, HTTPHd { char new_via_string[1024]; // 512-bytes for hostname+via string, 512-bytes for the debug info char *via_string = new_via_string; + char *via_limit = via_string + sizeof(new_via_string); if ((s->http_config_param->proxy_hostname_len + s->http_config_param->proxy_request_via_string_len) > 512) { header->value_append(MIME_FIELD_VIA, MIME_LEN_VIA, "TrafficServer", 13, true); @@ -728,26 +793,10 @@ HttpTransactHeaders::insert_via_header_in_request(HttpTransact::State *s, HTTPHd } char *incoming_via = s->via_string; - int scheme = s->orig_scheme; - ink_assert(scheme >= 0); - - int scheme_len = hdrtoken_index_to_length(scheme); - int32_t hversion = header->version_get().m_version; + std::array proto_buf; // 10 seems like a reasonable number of protos to print + int n_proto = s->state_machine->populate_client_protocol(proto_buf.data(), proto_buf.size()); - memcpy(via_string, hdrtoken_index_to_wks(scheme), scheme_len); - via_string += scheme_len; - - // Common case (I hope?) - if ((HTTP_MAJOR(hversion) == 1) && HTTP_MINOR(hversion) == 1) { - memcpy(via_string, "/1.1 ", 5); - via_string += 5; - } else { - *via_string++ = '/'; - *via_string++ = '0' + HTTP_MAJOR(hversion); - *via_string++ = '.'; - *via_string++ = '0' + HTTP_MINOR(hversion); - *via_string++ = ' '; - } + via_string += write_via_protocol_stack(via_string, via_limit - via_string, false, proto_buf.data(), n_proto); via_string += nstrcpy(via_string, s->http_config_param->proxy_hostname); *via_string++ = '['; @@ -772,6 +821,14 @@ HttpTransactHeaders::insert_via_header_in_request(HttpTransact::State *s, HTTPHd via_string += VIA_SERVER - VIA_CLIENT; } *via_string++ = ']'; + + // reserve 4 for " []" and 3 for "])". + if (via_limit - via_string > 4 && s->txn_conf->insert_request_via_string > 3) { // Ultra highest verbosity + *via_string++ = ' '; + *via_string++ = '['; + via_string += write_via_protocol_stack(via_string, via_limit - via_string - 3, true, proto_buf.data(), n_proto); + *via_string++ = ']'; + } } *via_string++ = ')'; @@ -806,6 +863,7 @@ HttpTransactHeaders::insert_via_header_in_response(HttpTransact::State *s, HTTPH { char new_via_string[1024]; // 512-bytes for hostname+via string, 512-bytes for the debug info char *via_string = new_via_string; + char *via_limit = via_string + sizeof(new_via_string); if ((s->http_config_param->proxy_hostname_len + s->http_config_param->proxy_response_via_string_len) > 512) { header->value_append(MIME_FIELD_VIA, MIME_LEN_VIA, "TrafficServer", 13, true); @@ -813,14 +871,17 @@ HttpTransactHeaders::insert_via_header_in_response(HttpTransact::State *s, HTTPH } char *incoming_via = s->via_string; + std::array proto_buf; // 10 seems like a reasonable number of protos to print + int n_proto = 0; - const char *proto_buf[10]; // 10 seems like a reasonable number of protos to print - int retval = s->state_machine->populate_client_protocol(proto_buf, countof(proto_buf)); - for (int i = 0; i < retval; i++) { - memcpy(via_string, proto_buf[i], strlen(proto_buf[i])); - via_string += strlen(proto_buf[i]); - *via_string++ = ' '; + // Should suffice - if we're adding a response VIA, the connection is HTTP and only 1.0 and 1.1 are supported outbound. + proto_buf[n_proto++] = HTTP_MINOR(header->version_get().m_version) == 0 ? IP_PROTO_TAG_HTTP_1_0 : IP_PROTO_TAG_HTTP_1_1; + + auto ss = s->state_machine->get_server_session(); + if (ss) { + n_proto += ss->populate_protocol(proto_buf.data() + n_proto, proto_buf.size() - n_proto); } + via_string += write_via_protocol_stack(via_string, via_limit - via_string, false, proto_buf.data(), n_proto); via_string += nstrcpy(via_string, s->http_config_param->proxy_hostname); *via_string++ = ' '; @@ -841,6 +902,13 @@ HttpTransactHeaders::insert_via_header_in_response(HttpTransact::State *s, HTTPH via_string += VIA_PROXY - VIA_CACHE; } *via_string++ = ']'; + + if (via_limit - via_string > 4 && s->txn_conf->insert_response_via_string > 3) { // Ultra highest verbosity + *via_string++ = ' '; + *via_string++ = '['; + via_string += write_via_protocol_stack(via_string, via_limit - via_string - 3, true, proto_buf.data(), n_proto); + *via_string++ = ']'; + } } *via_string++ = ')'; diff --git a/proxy/http/HttpTransactHeaders.h b/proxy/http/HttpTransactHeaders.h index f6719ab224c..c27a6a36968 100644 --- a/proxy/http/HttpTransactHeaders.h +++ b/proxy/http/HttpTransactHeaders.h @@ -57,6 +57,8 @@ class HttpTransactHeaders ink_time_t base_response_date, ink_time_t now); static bool does_server_allow_response_to_be_stored(HTTPHdr *resp); static bool downgrade_request(bool *origin_server_keep_alive, HTTPHdr *outgoing_request); + static bool is_method_safe(int method); + static bool is_method_idempotent(int method); static void generate_and_set_squid_codes(HTTPHdr *header, char *via_string, HttpTransact::SquidLogInfo *squid_codes); diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc index 0f38783db96..eb4b1dd41d9 100644 --- a/proxy/http/HttpTunnel.cc +++ b/proxy/http/HttpTunnel.cc @@ -582,11 +582,11 @@ HttpTunnel::reset() void HttpTunnel::kill_tunnel() { - for (int i = 0; i < MAX_PRODUCERS; ++i) { - if (producers[i].vc != nullptr) { - chain_abort_all(&producers[i]); + for (auto &producer : producers) { + if (producer.vc != nullptr) { + chain_abort_all(&producer); } - ink_assert(producers[i].alive == false); + ink_assert(producer.alive == false); } active = false; this->deallocate_buffers(); @@ -627,29 +627,29 @@ HttpTunnel::deallocate_buffers() { int num = 0; ink_release_assert(active == false); - for (int i = 0; i < MAX_PRODUCERS; ++i) { - if (producers[i].read_buffer != nullptr) { - ink_assert(producers[i].vc != nullptr); - free_MIOBuffer(producers[i].read_buffer); - producers[i].read_buffer = nullptr; - producers[i].buffer_start = nullptr; + for (auto &producer : producers) { + if (producer.read_buffer != nullptr) { + ink_assert(producer.vc != nullptr); + free_MIOBuffer(producer.read_buffer); + producer.read_buffer = nullptr; + producer.buffer_start = nullptr; num++; } - if (producers[i].chunked_handler.dechunked_buffer != nullptr) { - ink_assert(producers[i].vc != nullptr); - free_MIOBuffer(producers[i].chunked_handler.dechunked_buffer); - producers[i].chunked_handler.dechunked_buffer = nullptr; + if (producer.chunked_handler.dechunked_buffer != nullptr) { + ink_assert(producer.vc != nullptr); + free_MIOBuffer(producer.chunked_handler.dechunked_buffer); + producer.chunked_handler.dechunked_buffer = nullptr; num++; } - if (producers[i].chunked_handler.chunked_buffer != nullptr) { - ink_assert(producers[i].vc != nullptr); - free_MIOBuffer(producers[i].chunked_handler.chunked_buffer); - producers[i].chunked_handler.chunked_buffer = nullptr; + if (producer.chunked_handler.chunked_buffer != nullptr) { + ink_assert(producer.vc != nullptr); + free_MIOBuffer(producer.chunked_handler.chunked_buffer); + producer.chunked_handler.chunked_buffer = nullptr; num++; } - producers[i].chunked_handler.max_chunk_header_len = 0; + producer.chunked_handler.max_chunk_header_len = 0; } return num; } diff --git a/proxy/http/HttpUpdateSM.cc b/proxy/http/HttpUpdateSM.cc index 16e3463c67f..91d6074cc1a 100644 --- a/proxy/http/HttpUpdateSM.cc +++ b/proxy/http/HttpUpdateSM.cc @@ -74,7 +74,7 @@ HttpUpdateSM::start_scheduled_update(Continuation *cont, HTTPHdr *request) // Fix ME: What should these be set to since there is not a // real client ats_ip4_set(&t_state.client_info.src_addr, htonl(INADDR_LOOPBACK), 0); - t_state.backdoor_request = 0; + t_state.backdoor_request = false; t_state.client_info.port_attribute = HttpProxyPort::TRANSPORT_DEFAULT; t_state.req_flavor = HttpTransact::REQ_FLAVOR_SCHEDULED_UPDATE; diff --git a/proxy/http/remap/AclFiltering.cc b/proxy/http/remap/AclFiltering.cc index 7f593c24b06..62ad0beade9 100644 --- a/proxy/http/remap/AclFiltering.cc +++ b/proxy/http/remap/AclFiltering.cc @@ -30,7 +30,7 @@ // =============================================================================== void -acl_filter_rule::reset(void) +acl_filter_rule::reset() { int i; for (i = (argc = 0); i < ACL_FILTER_MAX_ARGV; i++) { @@ -91,7 +91,7 @@ acl_filter_rule::name(const char *_name) } void -acl_filter_rule::print(void) +acl_filter_rule::print() { int i; printf("-----------------------------------------------------------------------------------------\n"); @@ -105,8 +105,8 @@ acl_filter_rule::print(void) } } printf("nonstandard methods="); - for (MethodMap::iterator iter = nonstandard_methods.begin(), end = nonstandard_methods.end(); iter != end; ++iter) { - printf("%s ", iter->c_str()); + for (const auto &nonstandard_method : nonstandard_methods) { + printf("%s ", nonstandard_method.c_str()); } printf("\n"); printf("src_ip_cnt=%d\n", src_ip_cnt); diff --git a/proxy/http/remap/RemapConfig.cc b/proxy/http/remap/RemapConfig.cc index 40f245bee62..1f43d4a6c86 100644 --- a/proxy/http/remap/RemapConfig.cc +++ b/proxy/http/remap/RemapConfig.cc @@ -223,7 +223,7 @@ parse_activate_directive(const char *directive, BUILD_TABLE_INFO *bti, char *err } if ((rp = acl_filter_rule::find_byname(bti->rules_list, (const char *)bti->paramv[1])) == nullptr) { - snprintf(errbuf, errbufsize, "Undefined filter \"%s\" in directive \"%s\"", bti->paramv[1], directive); + snprintf(errbuf, errbufsize, R"(Undefined filter "%s" in directive "%s")", bti->paramv[1], directive); Debug("url_rewrite", "[parse_directive] %s", errbuf); return (const char *)errbuf; } @@ -250,7 +250,7 @@ parse_deactivate_directive(const char *directive, BUILD_TABLE_INFO *bti, char *e } if ((rp = acl_filter_rule::find_byname(bti->rules_list, (const char *)bti->paramv[1])) == nullptr) { - snprintf(errbuf, errbufsize, "Undefined filter \"%s\" in directive \"%s\"", bti->paramv[1], directive); + snprintf(errbuf, errbufsize, R"(Undefined filter "%s" in directive "%s")", bti->paramv[1], directive); Debug("url_rewrite", "[parse_directive] %s", errbuf); return (const char *)errbuf; } @@ -832,19 +832,19 @@ remap_load_plugin(const char **argv, int argc, url_mapping *mp, char *errbuf, in pi->fp_tsremap_os_response = (remap_plugin_info::_tsremap_os_response *)dlsym(pi->dlh, TSREMAP_FUNCNAME_OS_RESPONSE); if (!pi->fp_tsremap_init) { - snprintf(errbuf, errbufsize, "Can't find \"%s\" function in remap plugin \"%s\"", TSREMAP_FUNCNAME_INIT, c); + snprintf(errbuf, errbufsize, R"(Can't find "%s" function in remap plugin "%s")", TSREMAP_FUNCNAME_INIT, c); retcode = -10; } else if (!pi->fp_tsremap_new_instance && pi->fp_tsremap_delete_instance) { snprintf(errbuf, errbufsize, - "Can't find \"%s\" function in remap plugin \"%s\" which is required if \"%s\" function exists", + R"(Can't find "%s" function in remap plugin "%s" which is required if "%s" function exists)", TSREMAP_FUNCNAME_NEW_INSTANCE, c, TSREMAP_FUNCNAME_DELETE_INSTANCE); retcode = -11; } else if (!pi->fp_tsremap_do_remap) { - snprintf(errbuf, errbufsize, "Can't find \"%s\" function in remap plugin \"%s\"", TSREMAP_FUNCNAME_DO_REMAP, c); + snprintf(errbuf, errbufsize, R"(Can't find "%s" function in remap plugin "%s")", TSREMAP_FUNCNAME_DO_REMAP, c); retcode = -12; } else if (pi->fp_tsremap_new_instance && !pi->fp_tsremap_delete_instance) { snprintf(errbuf, errbufsize, - "Can't find \"%s\" function in remap plugin \"%s\" which is required if \"%s\" function exists", + R"(Can't find "%s" function in remap plugin "%s" which is required if "%s" function exists)", TSREMAP_FUNCNAME_DELETE_INSTANCE, c, TSREMAP_FUNCNAME_NEW_INSTANCE); retcode = -13; } diff --git a/proxy/http/remap/UrlMappingPathIndex.cc b/proxy/http/remap/UrlMappingPathIndex.cc index 9276866ef3a..f7b502142c8 100644 --- a/proxy/http/remap/UrlMappingPathIndex.cc +++ b/proxy/http/remap/UrlMappingPathIndex.cc @@ -24,8 +24,8 @@ UrlMappingPathIndex::~UrlMappingPathIndex() { - for (UrlMappingGroup::iterator group_iter = m_tries.begin(); group_iter != m_tries.end(); ++group_iter) { - delete group_iter->second; // Delete the Trie + for (auto &m_trie : m_tries) { + delete m_trie.second; // Delete the Trie } m_tries.clear(); } @@ -86,7 +86,7 @@ UrlMappingPathIndex::Search(URL *request_url, int request_port, bool normal_sear void UrlMappingPathIndex::Print() { - for (UrlMappingGroup::iterator group_iter = m_tries.begin(); group_iter != m_tries.end(); ++group_iter) { - group_iter->second->Print(); + for (auto &m_trie : m_tries) { + m_trie.second->Print(); } } diff --git a/proxy/http2/HPACK.cc b/proxy/http2/HPACK.cc index bd021e4f533..9ce3e5f6065 100644 --- a/proxy/http2/HPACK.cc +++ b/proxy/http2/HPACK.cc @@ -295,6 +295,12 @@ HpackIndexingTable::add_header_field(const MIMEField *field) _dynamic_table->add_header_field(field); } +uint32_t +HpackIndexingTable::maximum_size() const +{ + return _dynamic_table->maximum_size(); +} + uint32_t HpackIndexingTable::size() const { @@ -351,6 +357,12 @@ HpackDynamicTable::add_header_field(const MIMEField *field) } } +uint32_t +HpackDynamicTable::maximum_size() const +{ + return _maximum_size; +} + uint32_t HpackDynamicTable::size() const { @@ -619,6 +631,18 @@ encode_literal_header_field_with_new_name(uint8_t *buf_start, const uint8_t *buf return p - buf_start; } +int64_t +encode_dynamic_table_size_update(uint8_t *buf_start, const uint8_t *buf_end, uint32_t size) +{ + const int64_t len = encode_integer(buf_start, buf_end, size, 5); + if (len == -1) { + return -1; + } + buf_start[0] |= 0x20; + + return len; +} + // // [RFC 7541] 5.1. Integer representation // @@ -684,7 +708,7 @@ decode_string(Arena &arena, char **str, uint32_t &str_length, const uint8_t *buf *str = arena.str_alloc(encoded_string_len * 2); len = huffman_decode(*str, p, encoded_string_len); - if (len == HPACK_ERROR_COMPRESSION_ERROR) { + if (len < 0) { return HPACK_ERROR_COMPRESSION_ERROR; } str_length = len; @@ -830,7 +854,8 @@ decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start, // [RFC 7541] 6.3. Dynamic Table Size Update // int64_t -update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, HpackIndexingTable &indexing_table) +update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, HpackIndexingTable &indexing_table, + uint32_t maximum_table_size) { if (buf_start == buf_end) { return HPACK_ERROR_COMPRESSION_ERROR; @@ -843,6 +868,10 @@ update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, Hpac return HPACK_ERROR_COMPRESSION_ERROR; } + if (size > maximum_table_size) { + return HPACK_ERROR_COMPRESSION_ERROR; + } + if (indexing_table.update_maximum_size(size) == false) { return HPACK_ERROR_COMPRESSION_ERROR; } @@ -852,7 +881,7 @@ update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, Hpac int64_t hpack_decode_header_block(HpackIndexingTable &indexing_table, HTTPHdr *hdr, const uint8_t *in_buf, const size_t in_buf_len, - uint32_t max_header_size) + uint32_t max_header_size, uint32_t maximum_table_size) { const uint8_t *cursor = in_buf; const uint8_t *const in_buf_end = in_buf + in_buf_len; @@ -897,7 +926,7 @@ hpack_decode_header_block(HpackIndexingTable &indexing_table, HTTPHdr *hdr, cons if (header_field_started) { return HPACK_ERROR_COMPRESSION_ERROR; } - read_bytes = update_dynamic_table_size(cursor, in_buf_end, indexing_table); + read_bytes = update_dynamic_table_size(cursor, in_buf_end, indexing_table, maximum_table_size); if (read_bytes == HPACK_ERROR_COMPRESSION_ERROR) { return HPACK_ERROR_COMPRESSION_ERROR; } @@ -928,7 +957,8 @@ hpack_decode_header_block(HpackIndexingTable &indexing_table, HTTPHdr *hdr, cons } int64_t -hpack_encode_header_block(HpackIndexingTable &indexing_table, uint8_t *out_buf, const size_t out_buf_len, HTTPHdr *hdr) +hpack_encode_header_block(HpackIndexingTable &indexing_table, uint8_t *out_buf, const size_t out_buf_len, HTTPHdr *hdr, + int32_t maximum_table_size) { uint8_t *cursor = out_buf; const uint8_t *const out_buf_end = out_buf + out_buf_len; @@ -936,6 +966,16 @@ hpack_encode_header_block(HpackIndexingTable &indexing_table, uint8_t *out_buf, ink_assert(http_hdr_type_get(hdr->m_http) != HTTP_TYPE_UNKNOWN); + // Update dynamic table size + if (maximum_table_size >= 0) { + indexing_table.update_maximum_size(maximum_table_size); + written = encode_dynamic_table_size_update(cursor, out_buf_end, maximum_table_size); + if (written == HPACK_ERROR_COMPRESSION_ERROR) { + return HPACK_ERROR_COMPRESSION_ERROR; + } + cursor += written; + } + MIMEFieldIter field_iter; for (MIMEField *field = hdr->iter_get_first(&field_iter); field != nullptr; field = hdr->iter_get_next(&field_iter)) { HpackFieldType field_type; @@ -977,3 +1017,9 @@ hpack_encode_header_block(HpackIndexingTable &indexing_table, uint8_t *out_buf, } return cursor - out_buf; } + +int32_t +hpack_get_maximum_table_size(HpackIndexingTable &indexing_table) +{ + return indexing_table.maximum_size(); +} diff --git a/proxy/http2/HPACK.h b/proxy/http2/HPACK.h index e0c60f98eab..38bee2cf19d 100644 --- a/proxy/http2/HPACK.h +++ b/proxy/http2/HPACK.h @@ -122,6 +122,7 @@ class HpackDynamicTable const MIMEField *get_header_field(uint32_t index) const; void add_header_field(const MIMEField *field); + uint32_t maximum_size() const; uint32_t size() const; bool update_maximum_size(uint32_t new_size); @@ -146,6 +147,7 @@ class HpackIndexingTable int get_header_field(uint32_t index, MIMEFieldWrapper &header_field) const; void add_header_field(const MIMEField *field); + uint32_t maximum_size() const; uint32_t size() const; bool update_maximum_size(uint32_t new_size); @@ -167,12 +169,15 @@ int64_t decode_indexed_header_field(MIMEFieldWrapper &header, const uint8_t *buf HpackIndexingTable &indexing_table); int64_t decode_literal_header_field(MIMEFieldWrapper &header, const uint8_t *buf_start, const uint8_t *buf_end, HpackIndexingTable &indexing_table); -int64_t update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, HpackIndexingTable &indexing_table); +int64_t update_dynamic_table_size(const uint8_t *buf_start, const uint8_t *buf_end, HpackIndexingTable &indexing_table, + uint32_t maximum_table_size); // High level interfaces typedef HpackIndexingTable HpackHandle; int64_t hpack_decode_header_block(HpackHandle &handle, HTTPHdr *hdr, const uint8_t *in_buf, const size_t in_buf_len, - uint32_t max_header_size); -int64_t hpack_encode_header_block(HpackHandle &handle, uint8_t *out_buf, const size_t out_buf_len, HTTPHdr *hdr); + uint32_t max_header_size, uint32_t maximum_table_size); +int64_t hpack_encode_header_block(HpackHandle &handle, uint8_t *out_buf, const size_t out_buf_len, HTTPHdr *hdr, + int32_t maximum_table_size = -1); +int32_t hpack_get_maximum_table_size(HpackHandle &handle); #endif /* __HPACK_H__ */ diff --git a/proxy/http2/HTTP2.cc b/proxy/http2/HTTP2.cc index 62df96b54a8..16b518b1dd3 100644 --- a/proxy/http2/HTTP2.cc +++ b/proxy/http2/HTTP2.cc @@ -43,7 +43,8 @@ const unsigned HTTP2_LEN_AUTHORITY = countof(":authority") - 1; const unsigned HTTP2_LEN_PATH = countof(":path") - 1; const unsigned HTTP2_LEN_STATUS = countof(":status") - 1; -static size_t HTTP2_LEN_STATUS_VALUE_STR = 3; +static size_t HTTP2_LEN_STATUS_VALUE_STR = 3; +static const int HTTP2_MAX_TABLE_SIZE_LIMIT = 64 * 1024; // Statistics RecRawStatBlock *http2_rsb; @@ -123,25 +124,9 @@ memcpy_and_advance(uint8_t(&dst), byte_pointer &src) ++src.u8; } -static bool -http2_frame_flags_are_valid(uint8_t ftype, uint8_t fflags) -{ - if (ftype >= HTTP2_FRAME_TYPE_MAX) { - // Skip validation for Unkown frame type - [RFC 7540] 5.5. Extending HTTP/2 - return true; - } - - // The frame flags are valid for this frame if nothing outside the defined bits is set. - return (fflags & ~HTTP2_FRAME_FLAGS_MASKS[ftype]) == 0; -} - bool http2_frame_header_is_valid(const Http2FrameHeader &hdr, unsigned max_frame_size) { - if (!http2_frame_flags_are_valid(hdr.type, hdr.flags)) { - return false; - } - // 6.1 If a DATA frame is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type PROTOCOL_ERROR. if (hdr.type == HTTP2_FRAME_TYPE_DATA && hdr.streamid == 0) { @@ -311,7 +296,7 @@ http2_write_goaway(const Http2Goaway &goaway, IOVec iov) } write_and_advance(ptr, goaway.last_streamid); - write_and_advance(ptr, goaway.error_code); + write_and_advance(ptr, static_cast(goaway.error_code)); return true; } @@ -405,7 +390,7 @@ http2_parse_goaway(IOVec iov, Http2Goaway &goaway) memcpy_and_advance(ec.bytes, ptr); goaway.last_streamid = ntohl(sid.value); - goaway.error_code = ntohl(ec.value); + goaway.error_code = static_cast(ntohl(ec.value)); return true; } @@ -462,7 +447,7 @@ http2_convert_header_from_2_to_1_1(HTTPHdr *headers) memcpy(url + scheme_len, "://", 3); memcpy(url + scheme_len + 3, authority, authority_len); memcpy(url + scheme_len + 3 + authority_len, path, path_len); - url_parse(headers->m_heap, headers->m_http->u.req.m_url_impl, &url_start, url + url_length, 1); + url_parse(headers->m_heap, headers->m_http->u.req.m_url_impl, &url_start, url + url_length, true); arena.str_free(url); // Get value of :method @@ -470,7 +455,7 @@ http2_convert_header_from_2_to_1_1(HTTPHdr *headers) method = field->value_get(&method_len); int method_wks_idx = hdrtoken_tokenize(method, method_len); - http_hdr_method_set(headers->m_heap, headers->m_http, method, method_wks_idx, method_len, 0); + http_hdr_method_set(headers->m_heap, headers->m_http, method, method_wks_idx, method_len, false); } else { return PARSE_RESULT_ERROR; } @@ -597,17 +582,24 @@ http2_generate_h2_header_from_1_1(HTTPHdr *headers, HTTPHdr *h2_headers) } Http2ErrorCode -http2_encode_header_blocks(HTTPHdr *in, uint8_t *out, uint32_t out_len, uint32_t *len_written, HpackHandle &handle) -{ +http2_encode_header_blocks(HTTPHdr *in, uint8_t *out, uint32_t out_len, uint32_t *len_written, HpackHandle &handle, + int32_t maximum_table_size) +{ + // Limit the maximum table size to 64kB, which is the size advertised by major clients + maximum_table_size = min(maximum_table_size, HTTP2_MAX_TABLE_SIZE_LIMIT); + // Set maximum table size only if it is different from current maximum size + if (maximum_table_size == hpack_get_maximum_table_size(handle)) { + maximum_table_size = -1; + } // TODO: It would be better to split Cookie header value - int64_t result = hpack_encode_header_block(handle, out, out_len, in); + int64_t result = hpack_encode_header_block(handle, out, out_len, in, maximum_table_size); if (result < 0) { - return HTTP2_ERROR_COMPRESSION_ERROR; + return Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR; } if (len_written) { *len_written = result; } - return HTTP2_ERROR_NO_ERROR; + return Http2ErrorCode::HTTP2_ERROR_NO_ERROR; } /* @@ -615,22 +607,22 @@ http2_encode_header_blocks(HTTPHdr *in, uint8_t *out, uint32_t out_len, uint32_t */ Http2ErrorCode http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_t buf_len, uint32_t *len_read, HpackHandle &handle, - bool &trailing_header) + bool &trailing_header, uint32_t maximum_table_size) { const MIMEField *field; const char *value; int len; bool is_trailing_header = trailing_header; - int64_t result = hpack_decode_header_block(handle, hdr, buf_start, buf_len, Http2::max_request_header_size); + int64_t result = hpack_decode_header_block(handle, hdr, buf_start, buf_len, Http2::max_request_header_size, maximum_table_size); if (result < 0) { if (result == HPACK_ERROR_COMPRESSION_ERROR) { - return HTTP2_ERROR_COMPRESSION_ERROR; + return Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR; } else if (result == HPACK_ERROR_SIZE_EXCEEDED_ERROR) { - return HTTP2_ERROR_ENHANCE_YOUR_CALM; + return Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM; } - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } if (len_read) { *len_read = result; @@ -649,18 +641,20 @@ http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_ if (len && value[0] == ':') { ++pseudo_header_count; if (pseudo_header_count > expected_pseudo_header_count) { - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } + } else if (len <= 0) { + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } else { if (pseudo_header_count != expected_pseudo_header_count) { - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } } // Check whether header field name is lower case // This check should be here but it will fail because WKSs in MIMEField is old fashioned. // for (uint32_t i = 0; i < len; ++i) { // if (ParseRules::is_upalpha(value[i])) { - // return HTTP2_ERROR_PROTOCOL_ERROR; + // return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; // } // } } @@ -668,7 +662,7 @@ http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_ // rfc7540,sec8.1.2.2: Any message containing connection-specific header // fields MUST be treated as malformed if (hdr->field_find(MIME_FIELD_CONNECTION, MIME_LEN_CONNECTION) != nullptr) { - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } // :path pseudo header MUST NOT empty for http or https URIs @@ -676,7 +670,7 @@ http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_ if (field) { field->value_get(&len); if (len == 0) { - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } } @@ -693,7 +687,7 @@ http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_ if (field) { value = field->value_get(&len); if (!(len == 8 && memcmp(value, "trailers", 8) == 0)) { - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } } @@ -706,15 +700,15 @@ http2_decode_header_blocks(HTTPHdr *hdr, const uint8_t *buf_start, const uint32_ hdr->field_find(HTTP2_VALUE_AUTHORITY, HTTP2_LEN_AUTHORITY) == nullptr || hdr->field_find(HTTP2_VALUE_STATUS, HTTP2_LEN_STATUS) != nullptr) { // Decoded header field is invalid - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } } else { // Pseudo headers is insufficient - return HTTP2_ERROR_PROTOCOL_ERROR; + return Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; } } - return HTTP2_ERROR_NO_ERROR; + return Http2ErrorCode::HTTP2_ERROR_NO_ERROR; } // Initialize this subsystem with librecords configs (for now) @@ -756,12 +750,20 @@ Http2::init() ink_release_assert(http2_settings_parameter_is_valid({HTTP2_SETTINGS_HEADER_TABLE_SIZE, header_table_size})); ink_release_assert(http2_settings_parameter_is_valid({HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE, max_header_list_size})); +#define HTTP2_CLEAR_DYN_STAT(x) \ + do { \ + RecSetRawStatSum(http2_rsb, x, 0); \ + RecSetRawStatCount(http2_rsb, x, 0); \ + } while (0); + // Setup statistics http2_rsb = RecAllocateRawStatBlock(static_cast(HTTP2_N_STATS)); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_CURRENT_CLIENT_SESSION_NAME, RECD_INT, RECP_NON_PERSISTENT, static_cast(HTTP2_STAT_CURRENT_CLIENT_SESSION_COUNT), RecRawStatSyncSum); + HTTP2_CLEAR_DYN_STAT(HTTP2_STAT_CURRENT_CLIENT_SESSION_COUNT); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_CURRENT_CLIENT_STREAM_NAME, RECD_INT, RECP_NON_PERSISTENT, static_cast(HTTP2_STAT_CURRENT_CLIENT_STREAM_COUNT), RecRawStatSyncSum); + HTTP2_CLEAR_DYN_STAT(HTTP2_STAT_CURRENT_CLIENT_STREAM_COUNT); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_TOTAL_CLIENT_STREAM_NAME, RECD_INT, RECP_PERSISTENT, static_cast(HTTP2_STAT_TOTAL_CLIENT_STREAM_COUNT), RecRawStatSyncCount); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_TOTAL_TRANSACTIONS_TIME_NAME, RECD_INT, RECP_PERSISTENT, @@ -907,16 +909,20 @@ const static struct { {HTTP2_FRAME_TYPE_MAX, 0x40, true}, {HTTP2_FRAME_TYPE_MAX, 0x80, true}}; +static const uint8_t HTTP2_FRAME_FLAGS_MASKS[HTTP2_FRAME_TYPE_MAX] = { + HTTP2_FLAGS_DATA_MASK, HTTP2_FLAGS_HEADERS_MASK, HTTP2_FLAGS_PRIORITY_MASK, HTTP2_FLAGS_RST_STREAM_MASK, + HTTP2_FLAGS_SETTINGS_MASK, HTTP2_FLAGS_PUSH_PROMISE_MASK, HTTP2_FLAGS_PING_MASK, HTTP2_FLAGS_GOAWAY_MASK, + HTTP2_FLAGS_WINDOW_UPDATE_MASK, HTTP2_FLAGS_CONTINUATION_MASK, +}; + REGRESSION_TEST(HTTP2_FRAME_FLAGS)(RegressionTest *t, int, int *pstatus) { TestBox box(t, pstatus); box = REGRESSION_TEST_PASSED; - for (unsigned int i = 0; i < sizeof(http2_frame_flags_test_case) / sizeof(http2_frame_flags_test_case[0]); ++i) { - box.check(http2_frame_flags_are_valid(http2_frame_flags_test_case[i].ftype, http2_frame_flags_test_case[i].fflags) == - http2_frame_flags_test_case[i].valid, - "Validation of frame flags (type: %d, flags: %d) are expected %d, but not", http2_frame_flags_test_case[i].ftype, - http2_frame_flags_test_case[i].fflags, http2_frame_flags_test_case[i].valid); + for (auto i : http2_frame_flags_test_case) { + box.check((i.ftype >= HTTP2_FRAME_TYPE_MAX || (i.fflags & ~HTTP2_FRAME_FLAGS_MASKS[i.ftype]) == 0) == i.valid, + "Validation of frame flags (type: %d, flags: %d) are expected %d, but not", i.ftype, i.fflags, i.valid); } } diff --git a/proxy/http2/HTTP2.h b/proxy/http2/HTTP2.h index 5c982c421a5..19272a5c8a3 100644 --- a/proxy/http2/HTTP2.h +++ b/proxy/http2/HTTP2.h @@ -96,14 +96,14 @@ extern RecRawStatBlock *http2_rsb; // Container for statistics. static const Http2WindowSize HTTP2_MAX_WINDOW_SIZE = 0x7FFFFFFF; // [RFC 7540] 5.4. Error Handling -enum Http2ErrorClass { +enum class Http2ErrorClass { HTTP2_ERROR_CLASS_NONE, HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_CLASS_STREAM, }; // [RFC 7540] 7. Error Codes -enum Http2ErrorCode { +enum class Http2ErrorCode { HTTP2_ERROR_NO_ERROR = 0, HTTP2_ERROR_PROTOCOL_ERROR = 1, HTTP2_ERROR_INTERNAL_ERROR = 2, @@ -123,7 +123,7 @@ enum Http2ErrorCode { }; // [RFC 7540] 5.1. Stream States -enum Http2StreamState { +enum class Http2StreamState { HTTP2_STREAM_STATE_IDLE, HTTP2_STREAM_STATE_RESERVED_LOCAL, HTTP2_STREAM_STATE_RESERVED_REMOTE, @@ -215,12 +215,6 @@ enum Http2FrameFlagsContinuation { HTTP2_FLAGS_CONTINUATION_MASK = 0x04, }; -static const uint8_t HTTP2_FRAME_FLAGS_MASKS[HTTP2_FRAME_TYPE_MAX] = { - HTTP2_FLAGS_DATA_MASK, HTTP2_FLAGS_HEADERS_MASK, HTTP2_FLAGS_PRIORITY_MASK, HTTP2_FLAGS_RST_STREAM_MASK, - HTTP2_FLAGS_SETTINGS_MASK, HTTP2_FLAGS_PUSH_PROMISE_MASK, HTTP2_FLAGS_PING_MASK, HTTP2_FLAGS_GOAWAY_MASK, - HTTP2_FLAGS_WINDOW_UPDATE_MASK, HTTP2_FLAGS_CONTINUATION_MASK, -}; - // [RFC 7540] 6.5.2. Defined SETTINGS Parameters enum Http2SettingsIdentifier { HTTP2_SETTINGS_HEADER_TABLE_SIZE = 1, @@ -243,8 +237,8 @@ struct Http2FrameHeader { // [RFC 7540] 5.4. Error Handling struct Http2Error { - Http2Error(const Http2ErrorClass error_class = HTTP2_ERROR_CLASS_NONE, const Http2ErrorCode error_code = HTTP2_ERROR_NO_ERROR, - const char *err_msg = NULL) + Http2Error(const Http2ErrorClass error_class = Http2ErrorClass::HTTP2_ERROR_CLASS_NONE, + const Http2ErrorCode error_code = Http2ErrorCode::HTTP2_ERROR_NO_ERROR, const char *err_msg = NULL) { cls = error_class; code = error_code; @@ -283,9 +277,9 @@ struct Http2HeadersParameter { // [RFC 7540] 6.8 GOAWAY Format struct Http2Goaway { - Http2Goaway() : last_streamid(0), error_code(0) {} + Http2Goaway() : last_streamid(0), error_code(Http2ErrorCode::HTTP2_ERROR_NO_ERROR) {} Http2StreamId last_streamid; - uint32_t error_code; + Http2ErrorCode error_code; // NOTE: we don't (de)serialize the variable length debug data at this layer // because there's @@ -354,9 +348,9 @@ bool http2_parse_goaway(IOVec, Http2Goaway &); bool http2_parse_window_update(IOVec, uint32_t &); -Http2ErrorCode http2_decode_header_blocks(HTTPHdr *, const uint8_t *, const uint32_t, uint32_t *, HpackHandle &, bool &); +Http2ErrorCode http2_decode_header_blocks(HTTPHdr *, const uint8_t *, const uint32_t, uint32_t *, HpackHandle &, bool &, uint32_t); -Http2ErrorCode http2_encode_header_blocks(HTTPHdr *, uint8_t *, uint32_t, uint32_t *, HpackHandle &); +Http2ErrorCode http2_encode_header_blocks(HTTPHdr *, uint8_t *, uint32_t, uint32_t *, HpackHandle &, int32_t); ParseResult http2_convert_header_from_2_to_1_1(HTTPHdr *); void http2_generate_h2_header_from_1_1(HTTPHdr *headers, HTTPHdr *h2_headers); diff --git a/proxy/http2/Http2ClientSession.cc b/proxy/http2/Http2ClientSession.cc index 5c4506d6660..b7ee905cc68 100644 --- a/proxy/http2/Http2ClientSession.cc +++ b/proxy/http2/Http2ClientSession.cc @@ -67,6 +67,7 @@ Http2ClientSession::Http2ClientSession() sm_writer(nullptr), upgrade_context(), kill_me(false), + half_close_local(false), recursion(0) { } @@ -184,6 +185,8 @@ Http2ClientSession::new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOB DebugHttp2Ssn("session born, netvc %p", this->client_vc); + this->client_vc->set_tcp_congestion_control(CLIENT_SIDE); + this->read_buffer = iobuf ? iobuf : new_MIOBuffer(HTTP2_HEADER_BUFFER_SIZE_INDEX); this->read_buffer->water_mark = connection_state.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE); this->sm_reader = reader ? reader : this->read_buffer->alloc_reader(); @@ -279,6 +282,15 @@ Http2ClientSession::reenable(VIO *vio) this->client_vc->reenable(vio); } +void +Http2ClientSession::set_half_close_local_flag(bool flag) +{ + if (!half_close_local && flag) { + DebugHttp2Ssn("session half-close local"); + } + half_close_local = flag; +} + int Http2ClientSession::main_event_handler(int event, void *edata) { @@ -396,7 +408,7 @@ Http2ClientSession::state_start_frame_read(int event, void *edata) int Http2ClientSession::do_start_frame_read(Http2ErrorCode &ret_error) { - ret_error = HTTP2_ERROR_NO_ERROR; + ret_error = Http2ErrorCode::HTTP2_ERROR_NO_ERROR; ink_release_assert(this->sm_reader->read_avail() >= (int64_t)HTTP2_FRAME_HEADER_LEN); uint8_t buf[HTTP2_FRAME_HEADER_LEN]; @@ -417,19 +429,13 @@ Http2ClientSession::do_start_frame_read(Http2ErrorCode &ret_error) this->sm_reader->consume(nbytes); if (!http2_frame_header_is_valid(this->current_hdr, this->connection_state.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE))) { - ret_error = HTTP2_ERROR_PROTOCOL_ERROR; + ret_error = Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; return -1; } // If we know up front that the payload is too long, nuke this connection. if (this->current_hdr.length > this->connection_state.server_settings.get(HTTP2_SETTINGS_MAX_FRAME_SIZE)) { - ret_error = HTTP2_ERROR_FRAME_SIZE_ERROR; - return -1; - } - - // Allow only stream id = 0 or streams started by client. - if (this->current_hdr.streamid != 0 && !http2_is_client_streamid(this->current_hdr.streamid)) { - ret_error = HTTP2_ERROR_PROTOCOL_ERROR; + ret_error = Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR; return -1; } @@ -438,7 +444,7 @@ Http2ClientSession::do_start_frame_read(Http2ErrorCode &ret_error) if (continued_stream_id != 0 && (continued_stream_id != this->current_hdr.streamid || this->current_hdr.type != HTTP2_FRAME_TYPE_CONTINUATION)) { - ret_error = HTTP2_ERROR_PROTOCOL_ERROR; + ret_error = Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR; return -1; } return 0; @@ -487,10 +493,12 @@ Http2ClientSession::state_process_frame_read(int event, VIO *vio, bool inside_fr Http2ErrorCode err; if (do_start_frame_read(err) < 0) { // send an error if specified. Otherwise, just go away - if (err > HTTP2_ERROR_NO_ERROR) { + if (err > Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { SCOPED_MUTEX_LOCK(lock, this->connection_state.mutex, this_ethread()); if (!this->connection_state.is_state_closed()) { - this->connection_state.send_goaway_frame(this->current_hdr.streamid, err); + this->connection_state.send_goaway_frame(this->connection_state.get_latest_stream_id_in(), err); + this->set_half_close_local_flag(true); + this->do_io_close(); } } return 0; diff --git a/proxy/http2/Http2ClientSession.h b/proxy/http2/Http2ClientSession.h index dcb8830a0e6..5de4e802714 100644 --- a/proxy/http2/Http2ClientSession.h +++ b/proxy/http2/Http2ClientSession.h @@ -28,6 +28,8 @@ #include "Plugin.h" #include "ProxyClientSession.h" #include "Http2ConnectionState.h" +#include +#include // Name Edata Description // HTTP2_SESSION_EVENT_INIT Http2ClientSession * HTTP/2 session is born @@ -152,15 +154,15 @@ class Http2ClientSession : public ProxyClientSession { public: typedef ProxyClientSession super; ///< Parent type. - Http2ClientSession(); - typedef int (Http2ClientSession::*SessionHandler)(int, void *); + Http2ClientSession(); + // Implement ProxyClientSession interface. - void start(); - virtual void destroy(); - virtual void free(); - virtual void new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOBufferReader *reader, bool backdoor); + void start() override; + void destroy() override; + void free() override; + void new_connection(NetVConnection *new_vc, MIOBuffer *iobuf, IOBufferReader *reader, bool backdoor) override; bool ready_to_free() const @@ -169,20 +171,20 @@ class Http2ClientSession : public ProxyClientSession } // Implement VConnection interface. - VIO *do_io_read(Continuation *c, int64_t nbytes = INT64_MAX, MIOBuffer *buf = 0); - VIO *do_io_write(Continuation *c = NULL, int64_t nbytes = INT64_MAX, IOBufferReader *buf = 0, bool owner = false); - void do_io_close(int lerrno = -1); - void do_io_shutdown(ShutdownHowTo_t howto); - void reenable(VIO *vio); - - virtual NetVConnection * - get_netvc() const + VIO *do_io_read(Continuation *c, int64_t nbytes = INT64_MAX, MIOBuffer *buf = 0) override; + VIO *do_io_write(Continuation *c = NULL, int64_t nbytes = INT64_MAX, IOBufferReader *buf = 0, bool owner = false) override; + void do_io_close(int lerrno = -1) override; + void do_io_shutdown(ShutdownHowTo_t howto) override; + void reenable(VIO *vio) override; + + NetVConnection * + get_netvc() const override { return client_vc; } - virtual void - release_netvc() + void + release_netvc() override { // Make sure the vio's are also released to avoid later surprises in inactivity timeout if (client_vc) { @@ -212,14 +214,14 @@ class Http2ClientSession : public ProxyClientSession return upgrade_context; } - virtual int - get_transact_count() const + int + get_transact_count() const override { return connection_state.get_stream_requests(); } - virtual void - release(ProxyClientTransaction *trans) + void + release(ProxyClientTransaction *trans) override { } @@ -242,39 +244,45 @@ class Http2ClientSession : public ProxyClientSession return recursion > 0; } - virtual const char * - get_protocol_string() const + const char * + get_protocol_string() const override { return "http/2"; } virtual int - populate_protocol(const char **result, int size) const + populate_protocol(ts::StringView *result, int size) const override { int retval = 0; - if (size > 0) { - result[0] = TS_PROTO_TAG_HTTP_2_0; - retval = 1; - if (size > 1) { - retval += super::populate_protocol(result + 1, size - 1); + if (size > retval) { + result[retval++] = IP_PROTO_TAG_HTTP_2_0; + if (size > retval) { + retval += super::populate_protocol(result + retval, size - retval); } } return retval; } virtual const char * - protocol_contains(const char *tag_prefix) const + protocol_contains(ts::StringView prefix) const override { - const char *retval = NULL; - unsigned int tag_len = strlen(tag_prefix); - if (tag_len <= strlen(TS_PROTO_TAG_HTTP_2_0) && strncmp(tag_prefix, TS_PROTO_TAG_HTTP_2_0, tag_len) == 0) { - retval = TS_PROTO_TAG_HTTP_2_0; + const char *retval = nullptr; + + if (prefix.size() <= IP_PROTO_TAG_HTTP_2_0.size() && strncmp(IP_PROTO_TAG_HTTP_2_0.ptr(), prefix.ptr(), prefix.size()) == 0) { + retval = IP_PROTO_TAG_HTTP_2_0.ptr(); } else { - retval = super::protocol_contains(tag_prefix); + retval = super::protocol_contains(prefix); } return retval; } + void set_half_close_local_flag(bool flag); + bool + get_half_close_local_flag() const + { + return half_close_local; + } + private: Http2ClientSession(Http2ClientSession &); // noncopyable Http2ClientSession &operator=(const Http2ClientSession &); // noncopyable @@ -306,6 +314,7 @@ class Http2ClientSession : public ProxyClientSession VIO *write_vio; int dying_event; bool kill_me; + bool half_close_local; int recursion; }; diff --git a/proxy/http2/Http2ConnectionState.cc b/proxy/http2/Http2ConnectionState.cc index 69a4b3bab08..dbeea209104 100644 --- a/proxy/http2/Http2ConnectionState.cc +++ b/proxy/http2/Http2ConnectionState.cc @@ -33,7 +33,7 @@ #define DebugHttp2Stream(ua_session, stream_id, fmt, ...) \ DebugSsn(ua_session, "http2_con", "[%" PRId64 "] [%u] " fmt, ua_session->connection_id(), stream_id, ##__VA_ARGS__); -typedef Http2Error (*http2_frame_dispatch)(Http2ConnectionState &, const Http2Frame &); +using http2_frame_dispatch = Http2Error (*)(Http2ConnectionState &, const Http2Frame &); static const int buffer_size_index[HTTP2_FRAME_TYPE_MAX] = { BUFFER_SIZE_INDEX_16K, // HTTP2_FRAME_TYPE_DATA @@ -66,7 +66,6 @@ read_rcv_buffer(char *buf, size_t bufsize, unsigned &nbytes, const Http2Frame &f static Http2Error rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) { - char buf[BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_DATA])]; unsigned nbytes = 0; Http2StreamId id = frame.header().streamid; uint8_t pad_length = 0; @@ -78,24 +77,28 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // recipient MUST // respond with a connection error of type PROTOCOL_ERROR. if (!http2_is_client_streamid(id)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv data bad frame client id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv data bad frame client id"); } Http2Stream *stream = cstate.find_stream(id); if (stream == nullptr) { if (cstate.is_valid_streamid(id)) { // This error occurs fairly often, and is probably innocuous (SM initiates the shutdown) - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_STREAM_CLOSED, NULL); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED, nullptr); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv data stream freed with invalid id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv data stream freed with invalid id"); } } // If a DATA frame is received whose stream is not in "open" or "half closed // (local)" state, // the recipient MUST respond with a stream error of type STREAM_CLOSED. - if (stream->get_state() != HTTP2_STREAM_STATE_OPEN && stream->get_state() != HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_STREAM_CLOSED, "recv data stream closed"); + if (stream->get_state() != Http2StreamState::HTTP2_STREAM_STATE_OPEN && + stream->get_state() != Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED, + "recv data stream closed"); } if (frame.header().flags & HTTP2_FLAGS_DATA_PADDED) { @@ -105,7 +108,8 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // If the length of the padding is the length of the // frame payload or greater, the recipient MUST treat this as a // connection error of type PROTOCOL_ERROR. - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv data pad > payload"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv data pad > payload"); } } @@ -113,26 +117,28 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) if (frame.header().flags & HTTP2_FLAGS_DATA_END_STREAM) { stream->recv_end_stream = true; if (!stream->change_state(frame.header().type, frame.header().flags)) { - cstate.send_rst_stream_frame(id, HTTP2_ERROR_STREAM_CLOSED); - return Http2Error(HTTP2_ERROR_CLASS_NONE); + cstate.send_rst_stream_frame(id, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } if (!stream->payload_length_is_valid()) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv data bad payload length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv data bad payload length"); } } // If Data length is 0, do nothing. if (payload_length == 0) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } // Check whether Window Size is acceptable if (cstate.server_rwnd < payload_length) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FLOW_CONTROL_ERROR, + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FLOW_CONTROL_ERROR, "recv data cstate.server_rwnd < payload_length"); } if (stream->server_rwnd < payload_length) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_FLOW_CONTROL_ERROR, "recv data stream->server_rwnd < payload_length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_FLOW_CONTROL_ERROR, + "recv data stream->server_rwnd < payload_length"); } // Update Window size @@ -144,8 +150,12 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // update its offset via consume. Otherwise, we will read the same data on the // second time through IOBufferReader *myreader = frame.reader()->clone(); + // Skip pad length field + if (frame.header().flags & HTTP2_FLAGS_DATA_PADDED) { + myreader->consume(HTTP2_DATA_PADLEN_LEN); + } while (nbytes < payload_length - pad_length) { - size_t read_len = sizeof(buf); + size_t read_len = BUFFER_SIZE_FOR_INDEX(buffer_size_index[HTTP2_FRAME_TYPE_DATA]); if (nbytes + read_len > unpadded_length) { read_len -= nbytes + read_len - unpadded_length; } @@ -171,7 +181,7 @@ rcv_data_frame(Http2ConnectionState &cstate, const Http2Frame &frame) cstate.send_window_update_frame(stream->get_id(), diff_size); } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } /* @@ -192,7 +202,8 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) DebugHttp2Stream(cstate.ua_session, stream_id, "Received HEADERS frame"); if (!http2_is_client_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers bad client id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers bad client id"); } Http2Stream *stream = nullptr; @@ -201,11 +212,12 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) if (cstate.is_valid_streamid(stream_id)) { stream = cstate.find_stream(stream_id); if (stream == nullptr || !stream->has_trailing_header()) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_STREAM_CLOSED, "recv headers cannot find existing stream_id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED, + "recv headers cannot find existing stream_id"); } } else { // Create new stream - Http2Error error(HTTP2_ERROR_CLASS_NONE); + Http2Error error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); stream = cstate.create_stream(stream_id, error); new_stream = true; if (!stream) { @@ -216,7 +228,7 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // keep track of how many bytes we get in the frame stream->request_header_length += payload_length; if (stream->request_header_length > Http2::max_request_header_size) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_PROTOCOL_ERROR, + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, "recv headers payload for headers greater than header length"); } @@ -234,11 +246,13 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) frame.reader()->memcpy(buf, HTTP2_HEADERS_PADLEN_LEN); if (!http2_parse_headers_parameter(make_iovec(buf, HTTP2_HEADERS_PADLEN_LEN), params)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers failed to parse"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers failed to parse"); } if (params.pad_length > payload_length) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers pad > payload length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers pad > payload length"); } header_block_fragment_offset += HTTP2_HEADERS_PADLEN_LEN; @@ -251,11 +265,13 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) frame.reader()->memcpy(buf, HTTP2_PRIORITY_LEN, header_block_fragment_offset); if (!http2_parse_priority_parameter(make_iovec(buf, HTTP2_PRIORITY_LEN), params.priority)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers prioirity parameters failed parse"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers prioirity parameters failed parse"); } // Protocol error if the stream depends on itself if (stream_id == params.priority.stream_dependency) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers self dependency"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers self dependency"); } header_block_fragment_offset += HTTP2_PRIORITY_LEN; @@ -266,6 +282,7 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) DependencyTree::Node *node = cstate.dependency_tree->find(stream_id); if (node != nullptr) { stream->priority_node = node; + node->t = stream; } else { DebugHttp2Stream(cstate.ua_session, stream_id, "PRIORITY - dep: %d, weight: %d, excl: %d, tree size: %d", params.priority.stream_dependency, params.priority.weight, params.priority.exclusive_flag, @@ -284,14 +301,15 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) if (frame.header().flags & HTTP2_FLAGS_HEADERS_END_HEADERS) { // NOTE: If there are END_HEADERS flag, decode stored Header Blocks. if (!stream->change_state(HTTP2_FRAME_TYPE_HEADERS, frame.header().flags) && stream->has_trailing_header() == false) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, "recv headers end headers and not trailing header"); } bool empty_request = false; if (stream->has_trailing_header()) { if (!(frame.header().flags & HTTP2_FLAGS_HEADERS_END_STREAM)) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers tailing header without endstream"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers tailing header without endstream"); } // If the flag has already been set before decoding header blocks, this is the trailing header. // Set a flag to avoid initializing fetcher for now. @@ -300,15 +318,19 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) empty_request = true; } - Http2ErrorCode result = stream->decode_header_blocks(*cstate.local_hpack_handle); + Http2ErrorCode result = + stream->decode_header_blocks(*cstate.local_hpack_handle, cstate.server_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE)); - if (result != HTTP2_ERROR_NO_ERROR) { - if (result == HTTP2_ERROR_COMPRESSION_ERROR) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_COMPRESSION_ERROR, "recv headers compression error"); - } else if (result == HTTP2_ERROR_ENHANCE_YOUR_CALM) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_ENHANCE_YOUR_CALM, "recv headers enhance your calm"); + if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { + if (result == Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR, + "recv headers compression error"); + } else if (result == Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM, + "recv headers enhance your calm"); } else { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_PROTOCOL_ERROR, "recv headers malformed request"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv headers malformed request"); } } @@ -325,7 +347,7 @@ rcv_headers_frame(Http2ConnectionState &cstate, const Http2Frame &frame) cstate.set_continued_stream_id(stream_id); } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } /* @@ -343,17 +365,19 @@ rcv_priority_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // If a PRIORITY frame is received with a stream identifier of 0x0, the // recipient MUST respond with a connection error of type PROTOCOL_ERROR. if (stream_id == 0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "priority 0 stream_id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "priority 0 stream_id"); } // A PRIORITY frame with a length other than 5 octets MUST be treated as // a stream error (Section 5.4.2) of type FRAME_SIZE_ERROR. if (payload_length != HTTP2_PRIORITY_LEN) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_FRAME_SIZE_ERROR, "priority bad length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "priority bad length"); } if (!Http2::stream_priority_enabled) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } uint8_t buf[HTTP2_PRIORITY_LEN] = {0}; @@ -361,7 +385,14 @@ rcv_priority_frame(Http2ConnectionState &cstate, const Http2Frame &frame) Http2Priority priority; if (!http2_parse_priority_parameter(make_iovec(buf, HTTP2_PRIORITY_LEN), priority)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "priority parse error"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "priority parse error"); + } + + // A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + if (stream_id == priority.stream_dependency) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "PRIORITY frame depends on itself"); } DebugHttp2Stream(cstate.ua_session, stream_id, "PRIORITY - dep: %d, weight: %d, excl: %d, tree size: %d", @@ -383,7 +414,7 @@ rcv_priority_frame(Http2ConnectionState &cstate, const Http2Frame &frame) } } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static Http2Error @@ -400,35 +431,40 @@ rcv_rst_stream_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // frame is received with a stream identifier of 0x0, the recipient MUST // treat this as a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. - if (!http2_is_client_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "reset access stream with invalid id"); + if (stream_id == 0) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "reset access stream with invalid id"); } Http2Stream *stream = cstate.find_stream(stream_id); if (stream == nullptr) { if (cstate.is_valid_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "reset frame bad id stream not found"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "reset frame bad id stream not found"); } } // A RST_STREAM frame with a length other than 4 octets MUST be treated // as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR. if (frame.header().length != HTTP2_RST_STREAM_LEN) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FRAME_SIZE_ERROR, "reset frame wrong length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "reset frame wrong length"); } if (stream == nullptr || !stream->change_state(frame.header().type, frame.header().flags)) { // If a RST_STREAM frame identifying an idle stream is received, the // recipient MUST treat this as a connection error of type PROTOCOL_ERROR. - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "reset missing stream or bad stream state"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "reset missing stream or bad stream state"); } end = frame.reader()->memcpy(buf, sizeof(buf), 0); if (!http2_parse_rst_stream(make_iovec(buf, end - buf), rst_stream)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "reset failed to parse"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "reset failed to parse"); } if (stream != nullptr) { @@ -437,7 +473,7 @@ rcv_rst_stream_frame(Http2ConnectionState &cstate, const Http2Frame &frame) cstate.delete_stream(stream); } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static Http2Error @@ -455,7 +491,8 @@ rcv_settings_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // anything other than 0x0, the endpoint MUST respond with a connection // error (Section 5.4.1) of type PROTOCOL_ERROR. if (stream_id != 0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv settings stream not 0"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv settings stream not 0"); } // [RFC 7540] 6.5. Receipt of a SETTINGS frame with the ACK flag set and a @@ -463,9 +500,10 @@ rcv_settings_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // error of type FRAME_SIZE_ERROR. if (frame.header().flags & HTTP2_FLAGS_SETTINGS_ACK) { if (frame.header().length == 0) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FRAME_SIZE_ERROR, "recv settings ACK header length not 0"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "recv settings ACK header length not 0"); } } @@ -473,21 +511,25 @@ rcv_settings_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // be treated as a connection error (Section 5.4.1) of type // FRAME_SIZE_ERROR. if (frame.header().length % 6 != 0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FRAME_SIZE_ERROR, "recv settings header wrong length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "recv settings header wrong length"); } while (nbytes < frame.header().length) { unsigned read_bytes = read_rcv_buffer(buf, sizeof(buf), nbytes, frame); if (!http2_parse_settings_parameter(make_iovec(buf, read_bytes), param)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv settings parse failed"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv settings parse failed"); } if (!http2_settings_parameter_is_valid(param)) { if (param.id == HTTP2_SETTINGS_INITIAL_WINDOW_SIZE) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FLOW_CONTROL_ERROR, "recv settings bad initial window size"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FLOW_CONTROL_ERROR, + "recv settings bad initial window size"); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "recv settings bad param"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "recv settings bad param"); } } @@ -509,7 +551,7 @@ rcv_settings_frame(Http2ConnectionState &cstate, const Http2Frame &frame) Http2Frame ackFrame(HTTP2_FRAME_TYPE_SETTINGS, 0, HTTP2_FLAGS_SETTINGS_ACK); cstate.ua_session->handleEvent(HTTP2_SESSION_EVENT_XMIT, &ackFrame); - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static Http2Error @@ -519,7 +561,8 @@ rcv_push_promise_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // [RFC 7540] 8.2. A client cannot push. Thus, servers MUST treat the receipt of a // PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR. - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "promise not allowed"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "promise not allowed"); } static Http2Error @@ -534,18 +577,19 @@ rcv_ping_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // than 0x0, the recipient MUST respond with a connection error of type // PROTOCOL_ERROR. if (stream_id != 0x0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "ping id not 0"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, "ping id not 0"); } // Receipt of a PING frame with a length field value other than 8 MUST // be treated as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR. if (frame.header().length != HTTP2_PING_LEN) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FRAME_SIZE_ERROR, "ping bad length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "ping bad length"); } // An endpoint MUST NOT respond to PING frames containing this flag. if (frame.header().flags & HTTP2_FLAGS_PING_ACK) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } frame.reader()->memcpy(opaque_data, HTTP2_PING_LEN, 0); @@ -553,7 +597,7 @@ rcv_ping_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // ACK (0x1): An endpoint MUST set this flag in PING responses. cstate.send_ping_frame(stream_id, HTTP2_FLAGS_PING_ACK, opaque_data); - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static Http2Error @@ -569,24 +613,26 @@ rcv_goaway_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // An endpoint MUST treat a GOAWAY frame with a stream identifier other // than 0x0 as a connection error of type PROTOCOL_ERROR. if (stream_id != 0x0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "goaway id non-zero"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "goaway id non-zero"); } while (nbytes < frame.header().length) { unsigned read_bytes = read_rcv_buffer(buf, sizeof(buf), nbytes, frame); if (!http2_parse_goaway(make_iovec(buf, read_bytes), goaway)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "goaway failed parse"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "goaway failed parse"); } } DebugHttp2Stream(cstate.ua_session, stream_id, "GOAWAY: last stream id=%d, error code=%d", goaway.last_streamid, - goaway.error_code); + static_cast(goaway.error_code)); cstate.handleEvent(HTTP2_SESSION_EVENT_FINI, nullptr); // eventProcessor.schedule_imm(&cs, ET_NET, VC_EVENT_ERROR); - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static Http2Error @@ -600,7 +646,8 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // treated as a connection error of type FRAME_SIZE_ERROR. if (frame.header().length != HTTP2_WINDOW_UPDATE_LEN) { DebugHttp2Stream(cstate.ua_session, stream_id, "Received WINDOW_UPDATE frame - length incorrect"); - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FRAME_SIZE_ERROR, "window update bad length"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "window update bad length"); } frame.reader()->memcpy(buf, sizeof(buf), 0); @@ -610,9 +657,11 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // control window increment of 0 as a connection error of type PROTOCOL_ERROR; if (size == 0) { if (stream_id == 0) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "window update length=0 and id=0"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "window update length=0 and id=0"); } else { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_PROTOCOL_ERROR, "window update length=0"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "window update length=0"); } } @@ -629,7 +678,8 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // connection, a GOAWAY frame with an error code of FLOW_CONTROL_ERROR // is sent. if (size > HTTP2_MAX_WINDOW_SIZE - cstate.client_rwnd) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_FLOW_CONTROL_ERROR, "window update too big"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FLOW_CONTROL_ERROR, + "window update too big"); } cstate.client_rwnd += size; @@ -640,9 +690,10 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame) if (stream == nullptr) { if (cstate.is_valid_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "window update stream invalid id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "window update stream invalid id"); } } @@ -657,18 +708,19 @@ rcv_window_update_frame(Http2ConnectionState &cstate, const Http2Frame &frame) // connection, a GOAWAY frame with an error code of FLOW_CONTROL_ERROR // is sent. if (size > HTTP2_MAX_WINDOW_SIZE - stream->client_rwnd) { - return Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_FLOW_CONTROL_ERROR, "window update too big 2"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_FLOW_CONTROL_ERROR, + "window update too big 2"); } stream->client_rwnd += size; ssize_t wnd = min(cstate.client_rwnd, stream->client_rwnd); - if (!stream->is_closed() && stream->get_state() == HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE && wnd > 0) { + if (!stream->is_closed() && stream->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE && wnd > 0) { stream->send_response_body(); } } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } /* @@ -687,7 +739,8 @@ rcv_continuation_frame(Http2ConnectionState &cstate, const Http2Frame &frame) DebugHttp2Stream(cstate.ua_session, stream_id, "Received CONTINUATION frame"); if (!http2_is_client_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation bad client id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation bad client id"); } // Find opened stream @@ -698,25 +751,30 @@ rcv_continuation_frame(Http2ConnectionState &cstate, const Http2Frame &frame) Http2Stream *stream = cstate.find_stream(stream_id); if (stream == nullptr) { if (cstate.is_valid_streamid(stream_id)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_STREAM_CLOSED, "continuation stream freed with valid id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED, + "continuation stream freed with valid id"); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation stream freed with invalid id"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation stream freed with invalid id"); } } else { switch (stream->get_state()) { - case HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_STREAM_CLOSED, "continuation half close remote"); - case HTTP2_STREAM_STATE_IDLE: + case Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_STREAM_CLOSED, + "continuation half close remote"); + case Http2StreamState::HTTP2_STREAM_STATE_IDLE: break; default: - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation bad state"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation bad state"); } } // keep track of how many bytes we get in the frame stream->request_header_length += payload_length; if (stream->request_header_length > Http2::max_request_header_size) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation payload for headers exceeded"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation payload for headers exceeded"); } uint32_t header_blocks_offset = stream->header_blocks_length; @@ -730,18 +788,23 @@ rcv_continuation_frame(Http2ConnectionState &cstate, const Http2Frame &frame) cstate.clear_continued_stream_id(); if (!stream->change_state(HTTP2_FRAME_TYPE_CONTINUATION, frame.header().flags)) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation no state change"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation no state change"); } - Http2ErrorCode result = stream->decode_header_blocks(*cstate.local_hpack_handle); + Http2ErrorCode result = + stream->decode_header_blocks(*cstate.local_hpack_handle, cstate.server_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE)); - if (result != HTTP2_ERROR_NO_ERROR) { - if (result == HTTP2_ERROR_COMPRESSION_ERROR) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_COMPRESSION_ERROR, "continuation compression error"); - } else if (result == HTTP2_ERROR_ENHANCE_YOUR_CALM) { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_ENHANCE_YOUR_CALM, "continuation enhance your calm"); + if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { + if (result == Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_COMPRESSION_ERROR, + "continuation compression error"); + } else if (result == Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM, + "continuation enhance your calm"); } else { - return Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, "continuation malformed request"); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "continuation malformed request"); } } @@ -754,7 +817,7 @@ rcv_continuation_frame(Http2ConnectionState &cstate, const Http2Frame &frame) DebugHttp2Stream(cstate.ua_session, stream_id, "No END_HEADERS flag, expecting CONTINUATION frame"); } - return Http2Error(HTTP2_ERROR_CLASS_NONE); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); } static const http2_frame_dispatch frame_handlers[HTTP2_FRAME_TYPE_MAX] = { @@ -803,6 +866,8 @@ Http2ConnectionState::main_event_handler(int event, void *edata) // Finalize HTTP/2 Connection case HTTP2_SESSION_EVENT_FINI: { + SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread()); + ink_assert(this->fini_received == false); this->fini_received = true; cleanup_streams(); @@ -832,30 +897,24 @@ Http2ConnectionState::main_event_handler(int event, void *edata) if (frame_handlers[frame->header().type]) { error = frame_handlers[frame->header().type](*this, *frame); } else { - error = Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_INTERNAL_ERROR, "no handler"); + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_INTERNAL_ERROR, "no handler"); } - if (error.cls != HTTP2_ERROR_CLASS_NONE) { + if (error.cls != Http2ErrorClass::HTTP2_ERROR_CLASS_NONE) { ip_port_text_buffer ipb; const char *client_ip = ats_ip_ntop(ua_session->get_client_addr(), ipb, sizeof(ipb)); - if (error.cls == HTTP2_ERROR_CLASS_CONNECTION) { + if (error.cls == Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION) { if (error.msg) { Error("HTTP/2 connection error client_ip=%s session_id=%" PRId64 " %s", client_ip, ua_session->connection_id(), error.msg); } - this->send_goaway_frame(stream_id, error.code); + this->send_goaway_frame(this->latest_streamid_in, error.code); + this->ua_session->set_half_close_local_flag(true); + this_ethread()->schedule_imm_local((Continuation *)this, HTTP2_SESSION_EVENT_FINI); + // The streams will be cleaned up by the HTTP2_SESSION_EVENT_FINI event // The Http2ClientSession will shutdown because connection_state.is_state_closed() will be true - - // XXX We need to think a bit harder about how to coordinate the client - // session and the - // protocol connection. At this point, the protocol is shutting down, - // but there's no way - // to tell that to the client session. Perhaps this could be solved by - // implementing the - // half-closed state ... - SET_HANDLER(&Http2ConnectionState::state_closed); - } else if (error.cls == HTTP2_ERROR_CLASS_STREAM) { + } else if (error.cls == Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM) { if (error.msg) { Error("HTTP/2 stream error client_ip=%s session_id=%" PRId64 " %s", client_ip, ua_session->connection_id(), error.msg); } @@ -893,6 +952,13 @@ Http2ConnectionState::state_closed(int /* event */, void * /* edata */) Http2Stream * Http2ConnectionState::create_stream(Http2StreamId new_id, Http2Error &error) { + // In half_close state, TS doesn't create new stream. Because GOAWAY frame is sent to client + if (ua_session && ua_session->get_half_close_local_flag()) { + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_REFUSED_STREAM, + "refused to create new stream, because ua_session is in half_close state"); + return nullptr; + } + bool client_streamid = http2_is_client_streamid(new_id); // 5.1.1 The identifier of a newly established stream MUST be numerically @@ -903,13 +969,13 @@ Http2ConnectionState::create_stream(Http2StreamId new_id, Http2Error &error) // connection error (Section 5.4.1) of type PROTOCOL_ERROR. if (client_streamid) { if (new_id <= latest_streamid_in) { - error = Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, "recv headers new client id less than latest stream id"); return nullptr; } } else { if (new_id <= latest_streamid_out) { - error = Http2Error(HTTP2_ERROR_CLASS_CONNECTION, HTTP2_ERROR_PROTOCOL_ERROR, + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, "recv headers new server id less than latest stream id"); return nullptr; } @@ -920,13 +986,13 @@ Http2ConnectionState::create_stream(Http2StreamId new_id, Http2Error &error) // stream limit to be exceeded MUST treat this as a stream error. if (client_streamid) { if (client_streams_in_count >= server_settings.get(HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS)) { - error = Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_REFUSED_STREAM, + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_REFUSED_STREAM, "recv headers creating inbound stream beyond max_concurrent limit"); return nullptr; } } else { if (client_streams_out_count >= client_settings.get(HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS)) { - error = Http2Error(HTTP2_ERROR_CLASS_STREAM, HTTP2_ERROR_REFUSED_STREAM, + error = Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_STREAM, Http2ErrorCode::HTTP2_ERROR_REFUSED_STREAM, "recv headers creating outbound stream beyond max_concurrent limit"); return nullptr; } @@ -935,7 +1001,7 @@ Http2ConnectionState::create_stream(Http2StreamId new_id, Http2Error &error) Http2Stream *new_stream = THREAD_ALLOC_INIT(http2StreamAllocator, this_ethread()); new_stream->init(new_id, client_settings.get(HTTP2_SETTINGS_INITIAL_WINDOW_SIZE)); - ink_assert(NULL != new_stream); + ink_assert(nullptr != new_stream); ink_assert(!stream_list.in(new_stream)); stream_list.push(new_stream); @@ -980,7 +1046,8 @@ Http2ConnectionState::restart_streams() while (s) { Http2Stream *next = s->link.next; - if (!s->is_closed() && s->get_state() == HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE && min(this->client_rwnd, s->client_rwnd) > 0) { + if (!s->is_closed() && s->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE && + min(this->client_rwnd, s->client_rwnd) > 0) { s->send_response_body(); } ink_assert(s != next); @@ -1002,6 +1069,8 @@ Http2ConnectionState::cleanup_streams() ink_assert(stream_list.empty()); if (!is_state_closed()) { + SCOPED_MUTEX_LOCK(lock, this->ua_session->mutex, this_ethread()); + ua_session->get_netvc()->add_to_keep_alive_queue(); ua_session->get_netvc()->cancel_active_timeout(); } @@ -1010,7 +1079,7 @@ Http2ConnectionState::cleanup_streams() bool Http2ConnectionState::delete_stream(Http2Stream *stream) { - ink_assert(NULL != stream); + ink_assert(nullptr != stream); // If stream has already been removed from the list, just go on if (!stream_list.in(stream)) { @@ -1029,8 +1098,8 @@ Http2ConnectionState::delete_stream(Http2Stream *stream) } } - if (stream->get_state() == HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL) { - send_rst_stream_frame(stream->get_id(), HTTP2_ERROR_NO_ERROR); + if (stream->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL) { + send_rst_stream_frame(stream->get_id(), Http2ErrorCode::HTTP2_ERROR_NO_ERROR); } stream_list.remove(stream); @@ -1055,18 +1124,24 @@ Http2ConnectionState::release_stream(Http2Stream *stream) stream_list.remove(stream); } - // If the number of clients is 0, then mark the connection as inactive - if (total_client_streams_count == 0 && ua_session) { - ua_session->clear_session_active(); - if (ua_session->get_netvc()) { - ua_session->get_netvc()->add_to_keep_alive_queue(); - ua_session->get_netvc()->cancel_active_timeout(); + if (ua_session) { + SCOPED_MUTEX_LOCK(lock, this->ua_session->mutex, this_ethread()); + + // If the number of clients is 0 and ua_session is active, then mark the connection as inactive + if (total_client_streams_count == 0 && ua_session->is_active()) { + ua_session->clear_session_active(); + UnixNetVConnection *vc = static_cast(ua_session->get_netvc()); + if (vc) { + vc->cancel_active_timeout(); + vc->add_to_keep_alive_queue(); + } } - } - if (ua_session && fini_received && total_client_streams_count == 0) { - // We were shutting down, go ahead and terminate the session - ua_session->destroy(); + if (fini_received && total_client_streams_count == 0) { + // We were shutting down, go ahead and terminate the session + ua_session->destroy(); + ua_session = nullptr; + } } } @@ -1217,7 +1292,8 @@ Http2ConnectionState::send_data_frames(Http2Stream *stream) { // To follow RFC 7540 must not send more frames other than priority on // a closed stream. So we return without sending - if (stream->get_state() == HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL || stream->get_state() == HTTP2_STREAM_STATE_CLOSED) { + if (stream->get_state() == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL || + stream->get_state() == Http2StreamState::HTTP2_STREAM_STATE_CLOSED) { DebugSsn(this->ua_session, "http2_cs", "Shutdown half closed local stream %d", stream->get_id()); this->delete_stream(stream); return; @@ -1266,8 +1342,9 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream) h2_hdr.destroy(); return; } - Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle)); - if (result != HTTP2_ERROR_NO_ERROR) { + Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle), + client_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE)); + if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { h2_hdr.destroy(); ats_free(buf); return; @@ -1291,7 +1368,10 @@ Http2ConnectionState::send_headers_frame(Http2Stream *stream) // Change stream state if (!stream->change_state(HTTP2_FRAME_TYPE_HEADERS, flags)) { - this->send_goaway_frame(stream->get_id(), HTTP2_ERROR_PROTOCOL_ERROR); + this->send_goaway_frame(this->latest_streamid_in, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR); + this->ua_session->set_half_close_local_flag(true); + this_ethread()->schedule_imm_local((Continuation *)this, HTTP2_SESSION_EVENT_FINI); + h2_hdr.destroy(); ats_free(buf); return; @@ -1355,8 +1435,9 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url) h2_hdr.destroy(); return; } - Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle)); - if (result != HTTP2_ERROR_NO_ERROR) { + Http2ErrorCode result = http2_encode_header_blocks(&h2_hdr, buf, buf_len, &header_blocks_size, *(this->remote_hpack_handle), + client_settings.get(HTTP2_SETTINGS_HEADER_TABLE_SIZE)); + if (result != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { h2_hdr.destroy(); ats_free(buf); return; @@ -1403,7 +1484,7 @@ Http2ConnectionState::send_push_promise_frame(Http2Stream *stream, URL &url) } ats_free(buf); - Http2Error error(HTTP2_ERROR_CLASS_NONE); + Http2Error error(Http2ErrorClass::HTTP2_ERROR_CLASS_NONE); stream = this->create_stream(id, error); if (!stream) { return; @@ -1434,7 +1515,7 @@ Http2ConnectionState::send_rst_stream_frame(Http2StreamId id, Http2ErrorCode ec) { DebugHttp2Stream(ua_session, id, "Send RST_STREAM frame"); - if (ec != HTTP2_ERROR_NO_ERROR) { + if (ec != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { HTTP2_INCREMENT_THREAD_DYN_STAT(HTTP2_STAT_STREAM_ERRORS_COUNT, this_ethread()); } @@ -1448,7 +1529,10 @@ Http2ConnectionState::send_rst_stream_frame(Http2StreamId id, Http2ErrorCode ec) Http2Stream *stream = find_stream(id); if (stream != nullptr) { if (!stream->change_state(HTTP2_FRAME_TYPE_RST_STREAM, 0)) { - this->send_goaway_frame(stream->get_id(), HTTP2_ERROR_PROTOCOL_ERROR); + this->send_goaway_frame(this->latest_streamid_in, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR); + this->ua_session->set_half_close_local_flag(true); + this_ethread()->schedule_imm_local((Continuation *)this, HTTP2_SESSION_EVENT_FINI); + return; } } @@ -1481,7 +1565,10 @@ Http2ConnectionState::send_settings_frame(const Http2ConnectionSettings &new_set // Write settings to send buffer if (!http2_write_settings(param, iov)) { - send_goaway_frame(0, HTTP2_ERROR_INTERNAL_ERROR); + this->send_goaway_frame(this->latest_streamid_in, Http2ErrorCode::HTTP2_ERROR_INTERNAL_ERROR); + this->ua_session->set_half_close_local_flag(true); + this_ethread()->schedule_imm_local((Continuation *)this, HTTP2_SESSION_EVENT_FINI); + return; } iov.iov_base = reinterpret_cast(iov.iov_base) + HTTP2_SETTINGS_PARAMETER_LEN; @@ -1516,12 +1603,14 @@ Http2ConnectionState::send_ping_frame(Http2StreamId id, uint8_t flag, const uint this->ua_session->handleEvent(HTTP2_SESSION_EVENT_XMIT, &ping); } +// As for gracefull shutdown, TS should process outstanding stream as long as possible. +// As for signal connection error, TS should close connection immediately. void Http2ConnectionState::send_goaway_frame(Http2StreamId id, Http2ErrorCode ec) { - DebugHttp2Stream(ua_session, id, "Send GOAWAY frame"); + DebugHttp2Con(ua_session, "Send GOAWAY frame, last_stream_id: %d", id); - if (ec != HTTP2_ERROR_NO_ERROR) { + if (ec != Http2ErrorCode::HTTP2_ERROR_NO_ERROR) { HTTP2_INCREMENT_THREAD_DYN_STAT(HTTP2_STAT_CONNECTION_ERRORS_COUNT, this_ethread()); } @@ -1540,8 +1629,6 @@ Http2ConnectionState::send_goaway_frame(Http2StreamId id, Http2ErrorCode ec) // xmit event SCOPED_MUTEX_LOCK(lock, this->ua_session->mutex, this_ethread()); this->ua_session->handleEvent(HTTP2_SESSION_EVENT_XMIT, &frame); - - handleEvent(HTTP2_SESSION_EVENT_FINI, nullptr); } void diff --git a/proxy/http2/Http2DebugNames.cc b/proxy/http2/Http2DebugNames.cc index 8009cfd3570..fb776bd2a1b 100644 --- a/proxy/http2/Http2DebugNames.cc +++ b/proxy/http2/Http2DebugNames.cc @@ -47,23 +47,23 @@ Http2DebugNames::get_settings_param_name(uint16_t id) } const char * -Http2DebugNames::get_state_name(uint16_t id) +Http2DebugNames::get_state_name(Http2StreamState id) { switch (id) { - case HTTP2_STREAM_STATE_IDLE: - return "HTTP2_STREAM_STATE_IDLE"; - case HTTP2_STREAM_STATE_RESERVED_LOCAL: - return "HTTP2_STREAM_STATE_RESERVED_LOCAL"; - case HTTP2_STREAM_STATE_RESERVED_REMOTE: - return "HTTP2_STREAM_STATE_RESERVED_REMOTE"; - case HTTP2_STREAM_STATE_OPEN: - return "HTTP2_STREAM_STATE_OPEN"; - case HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL: - return "HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL"; - case HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: - return "HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE"; - case HTTP2_STREAM_STATE_CLOSED: - return "HTTP2_STREAM_STATE_CLOSED"; + case Http2StreamState::HTTP2_STREAM_STATE_IDLE: + return "Http2StreamState::HTTP2_STREAM_STATE_IDLE"; + case Http2StreamState::HTTP2_STREAM_STATE_RESERVED_LOCAL: + return "Http2StreamState::HTTP2_STREAM_STATE_RESERVED_LOCAL"; + case Http2StreamState::HTTP2_STREAM_STATE_RESERVED_REMOTE: + return "Http2StreamState::HTTP2_STREAM_STATE_RESERVED_REMOTE"; + case Http2StreamState::HTTP2_STREAM_STATE_OPEN: + return "Http2StreamState::HTTP2_STREAM_STATE_OPEN"; + case Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL: + return "Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL"; + case Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: + return "Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE"; + case Http2StreamState::HTTP2_STREAM_STATE_CLOSED: + return "Http2StreamState::HTTP2_STREAM_STATE_CLOSED"; } return "UNKNOWN"; diff --git a/proxy/http2/Http2DebugNames.h b/proxy/http2/Http2DebugNames.h index b8364776ec8..c9160bf5e47 100644 --- a/proxy/http2/Http2DebugNames.h +++ b/proxy/http2/Http2DebugNames.h @@ -26,11 +26,13 @@ #include "ts/ink_defs.h" +enum class Http2StreamState; + class Http2DebugNames { public: static const char *get_settings_param_name(uint16_t id); - static const char *get_state_name(uint16_t id); + static const char *get_state_name(Http2StreamState id); }; #endif // __HTTP2_DEBUG_NAMES_H__ diff --git a/proxy/http2/Http2Stream.cc b/proxy/http2/Http2Stream.cc index dd92b831ec8..33ed6af8aff 100644 --- a/proxy/http2/Http2Stream.cc +++ b/proxy/http2/Http2Stream.cc @@ -41,6 +41,7 @@ Http2Stream::main_event_handler(int event, void *edata) } return 0; } + reentrancy_count++; ink_release_assert(this->get_thread() == this_ethread()); SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread()); if (e == cross_thread_event) { @@ -110,36 +111,30 @@ Http2Stream::main_event_handler(int event, void *edata) this->update_read_request(INT64_MAX, true); } break; - case VC_EVENT_EOS: { - // If there are active VIO's send the EOS through them + case VC_EVENT_EOS: if (e->cookie == &read_vio) { SCOPED_MUTEX_LOCK(lock, read_vio.mutex, this_ethread()); read_vio._cont->handleEvent(VC_EVENT_EOS, &read_vio); } else if (e->cookie == &write_vio) { SCOPED_MUTEX_LOCK(lock, write_vio.mutex, this_ethread()); write_vio._cont->handleEvent(VC_EVENT_EOS, &write_vio); - } else { - // Otherwise, handle the EOS yourself and shut down - SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread()); - // Clean up after yourself if this was an EOS - ink_release_assert(this->closed); - - // Safe to initiate SSN_CLOSE if this is the last stream - static_cast(parent)->connection_state.release_stream(this); - this->destroy(); } break; } + reentrancy_count--; + // Clean stream up if the terminate flag is set and we are at the bottom of the handler stack + if (terminate_stream && reentrancy_count == 0) { + destroy(); } return 0; } Http2ErrorCode -Http2Stream::decode_header_blocks(HpackHandle &hpack_handle) +Http2Stream::decode_header_blocks(HpackHandle &hpack_handle, uint32_t maximum_table_size) { - return http2_decode_header_blocks(&_req_header, (const uint8_t *)header_blocks, header_blocks_length, NULL, hpack_handle, - trailing_header); + return http2_decode_header_blocks(&_req_header, (const uint8_t *)header_blocks, header_blocks_length, nullptr, hpack_handle, + trailing_header, maximum_table_size); } void @@ -178,38 +173,38 @@ bool Http2Stream::change_state(uint8_t type, uint8_t flags) { switch (_state) { - case HTTP2_STREAM_STATE_IDLE: + case Http2StreamState::HTTP2_STREAM_STATE_IDLE: if (type == HTTP2_FRAME_TYPE_HEADERS) { if (recv_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; } else if (send_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; } else { - _state = HTTP2_STREAM_STATE_OPEN; + _state = Http2StreamState::HTTP2_STREAM_STATE_OPEN; } } else if (type == HTTP2_FRAME_TYPE_CONTINUATION) { if (recv_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; } else if (send_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; } else { - _state = HTTP2_STREAM_STATE_OPEN; + _state = Http2StreamState::HTTP2_STREAM_STATE_OPEN; } } else if (type == HTTP2_FRAME_TYPE_PUSH_PROMISE) { - _state = HTTP2_STREAM_STATE_RESERVED_LOCAL; + _state = Http2StreamState::HTTP2_STREAM_STATE_RESERVED_LOCAL; } else { return false; } break; - case HTTP2_STREAM_STATE_OPEN: + case Http2StreamState::HTTP2_STREAM_STATE_OPEN: if (type == HTTP2_FRAME_TYPE_RST_STREAM) { - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; } else if (type == HTTP2_FRAME_TYPE_DATA) { if (recv_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; } else if (send_end_stream) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL; } else { // Do not change state } @@ -219,48 +214,48 @@ Http2Stream::change_state(uint8_t type, uint8_t flags) } break; - case HTTP2_STREAM_STATE_RESERVED_LOCAL: + case Http2StreamState::HTTP2_STREAM_STATE_RESERVED_LOCAL: if (type == HTTP2_FRAME_TYPE_HEADERS) { if (flags & HTTP2_FLAGS_HEADERS_END_HEADERS) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; } } else if (type == HTTP2_FRAME_TYPE_CONTINUATION) { if (flags & HTTP2_FLAGS_CONTINUATION_END_HEADERS) { - _state = HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; + _state = Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE; } } else { return false; } break; - case HTTP2_STREAM_STATE_RESERVED_REMOTE: + case Http2StreamState::HTTP2_STREAM_STATE_RESERVED_REMOTE: // Currently ATS supports only HTTP/2 server features return false; - case HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL: + case Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_LOCAL: if (type == HTTP2_FRAME_TYPE_RST_STREAM || recv_end_stream) { - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; } else { // Error, set state closed - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; return false; } break; - case HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: + case Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE: if (type == HTTP2_FRAME_TYPE_RST_STREAM || send_end_stream) { - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; } else if (type == HTTP2_FRAME_TYPE_HEADERS) { // w/o END_STREAM flag // No state change here. Expect a following DATA frame with END_STREAM flag. return true; } else { // Error, set state closed - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; return false; } break; - case HTTP2_STREAM_STATE_CLOSED: + case Http2StreamState::HTTP2_STREAM_STATE_CLOSED: // No state changing return true; @@ -319,17 +314,14 @@ void Http2Stream::do_io_close(int /* flags */) { SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread()); - // disengage us from the SM super::release(nullptr); - if (!sent_delete) { + if (!closed) { Debug("http2_stream", "do_io_close stream %d", this->get_id()); // When we get here, the SM has initiated the shutdown. Either it received a WRITE_COMPLETE, or it is shutting down. Any // remaining IO operations back to client should be abandoned. The SM-side buffers backing these operations will be deleted // by the time this is called from transaction_done. - - sent_delete = true; - closed = true; + closed = true; if (parent && this->is_client_state_writeable()) { // Make sure any trailing end of stream frames are sent @@ -365,7 +357,10 @@ Http2Stream::transaction_done() // Safe to initiate SSN_CLOSE if this is the last stream ink_assert(cross_thread_event == nullptr); // Schedule the destroy to occur after we unwind here. IF we call directly, may delete with reference on the stack. - cross_thread_event = this->get_thread()->schedule_imm(this, VC_EVENT_EOS, nullptr); + terminate_stream = true; + if (terminate_stream && reentrancy_count == 0) { + destroy(); + } } } @@ -380,7 +375,7 @@ Http2Stream::initiating_close() // Set the state of the connection to closed // TODO - these states should be combined closed = true; - _state = HTTP2_STREAM_STATE_CLOSED; + _state = Http2StreamState::HTTP2_STREAM_STATE_CLOSED; // leaving the reference to the SM, so we can detach from the SM when we actually destroy // current_reader = NULL; @@ -397,7 +392,7 @@ Http2Stream::initiating_close() // We are sending signals rather than calling the handlers directly to avoid the case where // the HttpTunnel handler causes the HttpSM to be deleted on the stack. bool sent_write_complete = false; - if (current_reader && this->is_client_state_writeable()) { + if (current_reader) { // Push out any last IO events if (write_vio._cont) { SCOPED_MUTEX_LOCK(lock, write_vio.mutex, this_ethread()); @@ -451,7 +446,7 @@ Http2Stream::send_tracked_event(Event *event, int send_event, VIO *vio) void Http2Stream::update_read_request(int64_t read_len, bool call_update) { - if (closed || sent_delete || parent == nullptr || current_reader == nullptr || read_vio.mutex == nullptr) { + if (closed || parent == nullptr || current_reader == nullptr || read_vio.mutex == nullptr) { return; } if (this->get_thread() != this_ethread()) { @@ -508,7 +503,7 @@ bool Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len, bool call_update) { bool retval = true; - if (!this->is_client_state_writeable() || closed || sent_delete || parent == nullptr || write_vio.mutex == nullptr) { + if (!this->is_client_state_writeable() || closed || parent == nullptr || write_vio.mutex == nullptr) { return retval; } if (this->get_thread() != this_ethread()) { @@ -575,7 +570,6 @@ Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len, // As with update_read_request, should be safe to call handler directly here if // call_update is true. Commented out for now while tracking a performance regression if (call_update) { // Coming from reenable. Safe to call the handler directly - inactive_timeout_at = Thread::get_hrtime() + inactive_timeout; if (write_vio._cont && this->current_reader) write_vio._cont->handleEvent(send_event, &write_vio); } else { // Called from do_io_write. Might still be setting up state. Send an event to let the dust settle @@ -585,7 +579,6 @@ Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len, this->mark_body_done(); // Send the data frame send_response_body(); - retval = false; } } break; @@ -606,7 +599,6 @@ Http2Stream::update_write_request(IOBufferReader *buf_reader, int64_t write_len, } else { send_response_body(); if (call_update) { // Coming from reenable. Safe to call the handler directly - inactive_timeout_at = Thread::get_hrtime() + inactive_timeout; if (write_vio._cont && this->current_reader) write_vio._cont->handleEvent(send_event, &write_vio); } else { // Called from do_io_write. Might still be setting up state. Send an event to let the dust settle @@ -639,6 +631,7 @@ Http2Stream::send_response_body() // Send DATA frames directly parent->connection_state.send_data_frames(this); } + inactive_timeout_at = Thread::get_hrtime() + inactive_timeout; } void @@ -659,6 +652,28 @@ void Http2Stream::destroy() { Debug("http2_stream", "Destroy stream %d, sent %" PRIu64 " bytes", this->_id, this->bytes_sent); + SCOPED_MUTEX_LOCK(lock, this->mutex, this_ethread()); + // Clean up after yourself if this was an EOS + ink_release_assert(this->closed); + ink_release_assert(reentrancy_count == 0); + + // Safe to initiate SSN_CLOSE if this is the last stream + if (parent) { + // release_stream and delete_stream indirectly call each other and seem to have a lot of commonality + // Should get resolved at somepoint. + Http2ClientSession *h2_parent = static_cast(parent); + h2_parent->connection_state.release_stream(this); + + // Current Http2ConnectionState implementation uses a memory pool for instantiating streams and DLL<> stream_list for storing + // active streams. Destroying a stream before deleting it from stream_list and then creating a new one + reusing the same chunk + // from the memory pool right away always leads to destroying the DLL structure (deadlocks, inconsistencies). + // The following is meant as a safety net since the consequences are disastrous. Until the design/implementation changes it + // seems + // less error prone to (double) delete before destroying (noop if already deleted). + if (h2_parent->connection_state.delete_stream(this)) { + Warning("Http2Stream was about to be deallocated without removing it from the active stream list"); + } + } // Clean up the write VIO in case of inactivity timeout this->do_io_write(nullptr, 0, nullptr); @@ -682,17 +697,8 @@ Http2Stream::destroy() } chunked_handler.clear(); super::destroy(); - - // Current Http2ConnectionState implementation uses a memory pool for instantiating streams and DLL<> stream_list for storing - // active streams. Destroying a stream before deleting it from stream_list and then creating a new one + reusing the same chunk - // from the memory pool right away always leads to destroying the DLL structure (deadlocks, inconsistencies). - // The following is meant as a safety net since the consequences are disastrous. Until the design/implementation changes it seems - // less error prone to (double) delete before destroying (noop if already deleted). - if (parent) { - if (static_cast(parent)->connection_state.delete_stream(this)) { - Warning("Http2Stream was about to be deallocated without removing it from the active stream list"); - } - } + clear_timers(); + clear_io_events(); THREAD_FREE(this, http2StreamAllocator, this_ethread()); } diff --git a/proxy/http2/Http2Stream.h b/proxy/http2/Http2Stream.h index 121b91d3933..6c36ca25284 100644 --- a/proxy/http2/Http2Stream.h +++ b/proxy/http2/Http2Stream.h @@ -58,14 +58,7 @@ class Http2Stream : public ProxyClientTransaction _start_time(0), _thread(NULL), _id(sid), - _state(HTTP2_STREAM_STATE_IDLE), - trailing_header(false), - body_done(false), - closed(false), - sent_delete(false), - chunked(false), - data_length(0), - bytes_sent(0), + _state(Http2StreamState::HTTP2_STREAM_STATE_IDLE), cross_thread_event(NULL), active_timeout(0), active_event(NULL), @@ -117,13 +110,13 @@ class Http2Stream : public ProxyClientTransaction bytes_sent += num_bytes; } - const Http2StreamId + Http2StreamId get_id() const { return _id; } - const Http2StreamState + Http2StreamState get_state() const { return _state; @@ -169,7 +162,7 @@ class Http2Stream : public ProxyClientTransaction return content_length == 0 || content_length == data_length; } - Http2ErrorCode decode_header_blocks(HpackHandle &hpack_handle); + Http2ErrorCode decode_header_blocks(HpackHandle &hpack_handle, uint32_t maximum_table_size); void send_request(Http2ConnectionState &cstate); VIO *do_io_read(Continuation *c, int64_t nbytes, MIOBuffer *buf); VIO *do_io_write(Continuation *c, int64_t nbytes, IOBufferReader *abuffer, bool owner = false); @@ -238,8 +231,9 @@ class Http2Stream : public ProxyClientTransaction bool is_client_state_writeable() const { - return _state == HTTP2_STREAM_STATE_OPEN || _state == HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE || - HTTP2_STREAM_STATE_RESERVED_LOCAL; + return _state == Http2StreamState::HTTP2_STREAM_STATE_OPEN || + _state == Http2StreamState::HTTP2_STREAM_STATE_HALF_CLOSED_REMOTE || + _state == Http2StreamState::HTTP2_STREAM_STATE_RESERVED_LOCAL; } bool @@ -259,6 +253,7 @@ class Http2Stream : public ProxyClientTransaction void response_process_data(bool &is_done); bool response_is_data_available() const; Event *send_tracked_event(Event *event, int send_event, VIO *vio); + HTTPParser http_parser; ink_hrtime _start_time; EThread *_thread; @@ -270,14 +265,35 @@ class Http2Stream : public ProxyClientTransaction VIO read_vio; VIO write_vio; - bool trailing_header; - bool body_done; - bool closed; - bool sent_delete; - bool chunked; - - uint64_t data_length; - uint64_t bytes_sent; + bool trailing_header = false; + bool body_done = false; + bool chunked = false; + + // A brief disucssion of similar flags and state variables: _state, closed, terminate_stream + // + // _state tracks the HTTP2 state of the stream. This field completely coincides with the H2 spec. + // + // closed is a flag that gets set when the framework indicates that the stream should be shutdown. This flag + // is set from either do_io_close, which indicates that the HttpSM is starting the close, or initiating_close, + // which indicates that the HTTP2 infrastructure is starting the close (e.g. due to the HTTP2 session shuttig down + // or a end of stream frame being received. The closed flag does not indicate that it is safe to delete the stream + // immediately. Perhaps the closed flag could be folded into the _state field. + // + // terminate_stream flag gets set from the transaction_done() method. This means that the HttpSM has shutdown. Now + // we can delete the stream object. To ensure that the session and transaction close hooks are executed in the correct order + // we need to enforce that the stream is not deleted until after the state machine has shutdown. The reentrancy_count is + // associated with the terminate_stream flag. We need to make sure that we don't delete the stream object while we have stream + // methods on the stack. The reentrancy count is incremented as we enter the stream event handler. As we leave the event + // handler we decrement the reentrancy count, and check to see if the teriminate_stream flag and destroy the object if that is the + // case. + // The same pattern is used with HttpSM for object clean up. + // + bool closed = false; + int reentrancy_count = 0; + bool terminate_stream = false; + + uint64_t data_length = 0; + uint64_t bytes_sent = 0; ChunkedHandler chunked_handler; Event *cross_thread_event; diff --git a/proxy/http2/HuffmanCodec.cc b/proxy/http2/HuffmanCodec.cc index 45ec21441a7..34ec166a7d8 100644 --- a/proxy/http2/HuffmanCodec.cc +++ b/proxy/http2/HuffmanCodec.cc @@ -151,21 +151,26 @@ hpack_huffman_fin() int64_t huffman_decode(char *dst_start, const uint8_t *src, uint32_t src_len) { - char *dst_end = dst_start; - uint8_t shift = 7; - Node *current = HUFFMAN_TREE_ROOT; + char *dst_end = dst_start; + uint8_t shift = 7; + Node *current = HUFFMAN_TREE_ROOT; + int byte_boundary_crossed = 0; + bool includes_zero = false; while (src_len) { if (*src & (1 << shift)) { current = current->right; } else { - current = current->left; + current = current->left; + includes_zero = true; } if (current->leaf_node == true) { *dst_end = current->ascii_code; ++dst_end; - current = HUFFMAN_TREE_ROOT; + current = HUFFMAN_TREE_ROOT; + byte_boundary_crossed = 0; + includes_zero = false; } if (shift) { --shift; @@ -173,7 +178,17 @@ huffman_decode(char *dst_start, const uint8_t *src, uint32_t src_len) shift = 7; ++src; --src_len; + ++byte_boundary_crossed; } + if (byte_boundary_crossed > 3) { + return -1; + } + } + if (byte_boundary_crossed > 1) { + return -1; + } + if (includes_zero) { + return -1; } return dst_end - dst_start; diff --git a/proxy/http2/RegressionHPACK.cc b/proxy/http2/RegressionHPACK.cc index 4d8c66dac21..ecb064894db 100644 --- a/proxy/http2/RegressionHPACK.cc +++ b/proxy/http2/RegressionHPACK.cc @@ -30,6 +30,7 @@ const static int DYNAMIC_TABLE_SIZE_FOR_REGRESSION_TEST = 256; const static int BUFSIZE_FOR_REGRESSION_TEST = 128; const static int MAX_TEST_FIELD_NUM = 8; const static int MAX_REQUEST_HEADER_SIZE = 131072; +const static int MAX_TABLE_SIZE = 4096; /*********************************************************************************** * * @@ -305,14 +306,13 @@ REGRESSION_TEST(HPACK_EncodeInteger)(RegressionTest *t, int, int *pstatus) box = REGRESSION_TEST_PASSED; uint8_t buf[BUFSIZE_FOR_REGRESSION_TEST]; - for (unsigned int i = 0; i < sizeof(integer_test_case) / sizeof(integer_test_case[0]); i++) { + for (const auto &i : integer_test_case) { memset(buf, 0, BUFSIZE_FOR_REGRESSION_TEST); - int len = encode_integer(buf, buf + BUFSIZE_FOR_REGRESSION_TEST, integer_test_case[i].raw_integer, integer_test_case[i].prefix); + int len = encode_integer(buf, buf + BUFSIZE_FOR_REGRESSION_TEST, i.raw_integer, i.prefix); - box.check(len == integer_test_case[i].encoded_field_len, "encoded length was %d, expecting %d", len, - integer_test_case[i].encoded_field_len); - box.check(len > 0 && memcmp(buf, integer_test_case[i].encoded_field, len) == 0, "encoded value was invalid"); + box.check(len == i.encoded_field_len, "encoded length was %d, expecting %d", len, i.encoded_field_len); + box.check(len > 0 && memcmp(buf, i.encoded_field, len) == 0, "encoded value was invalid"); } } @@ -343,14 +343,13 @@ REGRESSION_TEST(HPACK_EncodeIndexedHeaderField)(RegressionTest *t, int, int *pst uint8_t buf[BUFSIZE_FOR_REGRESSION_TEST]; - for (unsigned int i = 0; i < sizeof(indexed_test_case) / sizeof(indexed_test_case[0]); i++) { + for (const auto &i : indexed_test_case) { memset(buf, 0, BUFSIZE_FOR_REGRESSION_TEST); - int len = encode_indexed_header_field(buf, buf + BUFSIZE_FOR_REGRESSION_TEST, indexed_test_case[i].index); + int len = encode_indexed_header_field(buf, buf + BUFSIZE_FOR_REGRESSION_TEST, i.index); - box.check(len == indexed_test_case[i].encoded_field_len, "encoded length was %d, expecting %d", len, - indexed_test_case[i].encoded_field_len); - box.check(len > 0 && memcmp(buf, indexed_test_case[i].encoded_field, len) == 0, "encoded value was invalid"); + box.check(len == i.encoded_field_len, "encoded length was %d, expecting %d", len, i.encoded_field_len); + box.check(len > 0 && memcmp(buf, i.encoded_field, len) == 0, "encoded value was invalid"); } } @@ -457,15 +456,11 @@ REGRESSION_TEST(HPACK_DecodeInteger)(RegressionTest *t, int, int *pstatus) uint32_t actual; - for (unsigned int i = 0; i < sizeof(integer_test_case) / sizeof(integer_test_case[0]); i++) { - int len = - decode_integer(actual, integer_test_case[i].encoded_field, - integer_test_case[i].encoded_field + integer_test_case[i].encoded_field_len, integer_test_case[i].prefix); + for (const auto &i : integer_test_case) { + int len = decode_integer(actual, i.encoded_field, i.encoded_field + i.encoded_field_len, i.prefix); - box.check(len == integer_test_case[i].encoded_field_len, "decoded length was %d, expecting %d", len, - integer_test_case[i].encoded_field_len); - box.check(actual == integer_test_case[i].raw_integer, "decoded value was %d, expected %d", actual, - integer_test_case[i].raw_integer); + box.check(len == i.encoded_field_len, "decoded length was %d, expecting %d", len, i.encoded_field_len); + box.check(actual == i.raw_integer, "decoded value was %d, expected %d", actual, i.raw_integer); } } @@ -480,15 +475,12 @@ REGRESSION_TEST(HPACK_DecodeString)(RegressionTest *t, int, int *pstatus) hpack_huffman_init(); - for (unsigned int i = 0; i < sizeof(string_test_case) / sizeof(string_test_case[0]); i++) { - int len = decode_string(arena, &actual, actual_len, string_test_case[i].encoded_field, - string_test_case[i].encoded_field + string_test_case[i].encoded_field_len); + for (const auto &i : string_test_case) { + int len = decode_string(arena, &actual, actual_len, i.encoded_field, i.encoded_field + i.encoded_field_len); - box.check(len == string_test_case[i].encoded_field_len, "decoded length was %d, expecting %d", len, - string_test_case[i].encoded_field_len); - box.check(actual_len == string_test_case[i].raw_string_len, "length of decoded string was %d, expecting %d", actual_len, - string_test_case[i].raw_string_len); - box.check(memcmp(actual, string_test_case[i].raw_string, actual_len) == 0, "decoded string was invalid"); + box.check(len == i.encoded_field_len, "decoded length was %d, expecting %d", len, i.encoded_field_len); + box.check(actual_len == i.raw_string_len, "length of decoded string was %d, expecting %d", actual_len, i.raw_string_len); + box.check(memcmp(actual, i.raw_string, actual_len) == 0, "decoded string was invalid"); } } @@ -499,26 +491,23 @@ REGRESSION_TEST(HPACK_DecodeIndexedHeaderField)(RegressionTest *t, int, int *pst HpackIndexingTable indexing_table(4096); - for (unsigned int i = 0; i < sizeof(indexed_test_case) / sizeof(indexed_test_case[0]); i++) { + for (const auto &i : indexed_test_case) { ats_scoped_obj headers(new HTTPHdr); headers->create(HTTP_TYPE_REQUEST); MIMEField *field = mime_field_create(headers->m_heap, headers->m_http->m_fields_impl); MIMEFieldWrapper header(field, headers->m_heap, headers->m_http->m_fields_impl); - int len = - decode_indexed_header_field(header, indexed_test_case[i].encoded_field, - indexed_test_case[i].encoded_field + indexed_test_case[i].encoded_field_len, indexing_table); + int len = decode_indexed_header_field(header, i.encoded_field, i.encoded_field + i.encoded_field_len, indexing_table); - box.check(len == indexed_test_case[i].encoded_field_len, "decoded length was %d, expecting %d", len, - indexed_test_case[i].encoded_field_len); + box.check(len == i.encoded_field_len, "decoded length was %d, expecting %d", len, i.encoded_field_len); int name_len; const char *name = header.name_get(&name_len); - box.check(len > 0 && memcmp(name, indexed_test_case[i].raw_name, name_len) == 0, "decoded header name was invalid"); + box.check(len > 0 && memcmp(name, i.raw_name, name_len) == 0, "decoded header name was invalid"); int actual_value_len; const char *actual_value = header.value_get(&actual_value_len); - box.check(memcmp(actual_value, indexed_test_case[i].raw_value, actual_value_len) == 0, "decoded header value was invalid"); + box.check(memcmp(actual_value, i.raw_value, actual_value_len) == 0, "decoded header value was invalid"); } } @@ -529,27 +518,23 @@ REGRESSION_TEST(HPACK_DecodeLiteralHeaderField)(RegressionTest *t, int, int *pst HpackIndexingTable indexing_table(4096); - for (unsigned int i = 0; i < sizeof(literal_test_case) / sizeof(literal_test_case[0]); i++) { + for (const auto &i : literal_test_case) { ats_scoped_obj headers(new HTTPHdr); headers->create(HTTP_TYPE_REQUEST); MIMEField *field = mime_field_create(headers->m_heap, headers->m_http->m_fields_impl); MIMEFieldWrapper header(field, headers->m_heap, headers->m_http->m_fields_impl); - int len = - decode_literal_header_field(header, literal_test_case[i].encoded_field, - literal_test_case[i].encoded_field + literal_test_case[i].encoded_field_len, indexing_table); + int len = decode_literal_header_field(header, i.encoded_field, i.encoded_field + i.encoded_field_len, indexing_table); - box.check(len == literal_test_case[i].encoded_field_len, "decoded length was %d, expecting %d", len, - literal_test_case[i].encoded_field_len); + box.check(len == i.encoded_field_len, "decoded length was %d, expecting %d", len, i.encoded_field_len); int name_len; const char *name = header.name_get(&name_len); - box.check(name_len > 0 && memcmp(name, literal_test_case[i].raw_name, name_len) == 0, "decoded header name was invalid"); + box.check(name_len > 0 && memcmp(name, i.raw_name, name_len) == 0, "decoded header name was invalid"); int actual_value_len; const char *actual_value = header.value_get(&actual_value_len); - box.check(actual_value_len > 0 && memcmp(actual_value, literal_test_case[i].raw_value, actual_value_len) == 0, - "decoded header value was invalid"); + box.check(actual_value_len > 0 && memcmp(actual_value, i.raw_value, actual_value_len) == 0, "decoded header value was invalid"); } } @@ -565,7 +550,7 @@ REGRESSION_TEST(HPACK_Decode)(RegressionTest *t, int, int *pstatus) headers->create(HTTP_TYPE_REQUEST); hpack_decode_header_block(indexing_table, headers, encoded_field_request_test_case[i].encoded_field, - encoded_field_request_test_case[i].encoded_field_len, MAX_REQUEST_HEADER_SIZE); + encoded_field_request_test_case[i].encoded_field_len, MAX_REQUEST_HEADER_SIZE, MAX_TABLE_SIZE); for (unsigned int j = 0; j < sizeof(raw_field_request_test_case[i]) / sizeof(raw_field_request_test_case[i][0]); j++) { const char *expected_name = raw_field_request_test_case[i][j].raw_name; diff --git a/proxy/http2/test_HPACK.cc b/proxy/http2/test_HPACK.cc index 572677e2548..c6f3b931889 100644 --- a/proxy/http2/test_HPACK.cc +++ b/proxy/http2/test_HPACK.cc @@ -33,6 +33,7 @@ #include "ts/TestBox.h" const static int MAX_REQUEST_HEADER_SIZE = 131072; +const static int MAX_TABLE_SIZE = 4096; using namespace std; @@ -163,7 +164,7 @@ compare_header_fields(HTTPHdr *a, HTTPHdr *b) // Returns -1 if test passes, or returns the failed sequence number int -test_decoding(const string filename) +test_decoding(const string &filename) { HpackIndexingTable indexing_table(INITIAL_TABLE_SIZE); string line, name, value; @@ -197,7 +198,7 @@ test_decoding(const string filename) case 'w': parse_line(line, 6, name, value); unpacked_len = unpack(value, unpacked); - hpack_decode_header_block(indexing_table, &decoded, unpacked, unpacked_len, MAX_REQUEST_HEADER_SIZE); + hpack_decode_header_block(indexing_table, &decoded, unpacked, unpacked_len, MAX_REQUEST_HEADER_SIZE, MAX_TABLE_SIZE); break; } break; @@ -216,7 +217,7 @@ test_decoding(const string filename) } int -test_encoding(const string filename_in, const string filename_out) +test_encoding(const string &filename_in, const string &filename_out) { HpackIndexingTable indexing_table_for_encoding(INITIAL_TABLE_SIZE), indexing_table_for_decoding(INITIAL_TABLE_SIZE); string line, name, value; @@ -252,13 +253,14 @@ test_encoding(const string filename_in, const string filename_out) result = seqnum; break; } - hpack_decode_header_block(indexing_table_for_decoding, &decoded, encoded, written, MAX_REQUEST_HEADER_SIZE); + hpack_decode_header_block(indexing_table_for_decoding, &decoded, encoded, written, MAX_REQUEST_HEADER_SIZE, + MAX_TABLE_SIZE); if (compare_header_fields(&decoded, &original) != 0) { result = seqnum; break; } pack(encoded, written, packed); - ofs << " \"wire\": \"" << packed << "\"" << endl; + ofs << R"( "wire": ")" << packed << "\"" << endl; ofs << " }," << endl; } // Prepare for next sequence @@ -297,19 +299,19 @@ test_encoding(const string filename_in, const string filename_out) result = seqnum; return result; } - hpack_decode_header_block(indexing_table_for_decoding, &decoded, encoded, written, MAX_REQUEST_HEADER_SIZE); + hpack_decode_header_block(indexing_table_for_decoding, &decoded, encoded, written, MAX_REQUEST_HEADER_SIZE, MAX_TABLE_SIZE); if (compare_header_fields(&decoded, &original) != 0) { result = seqnum; return result; } pack(encoded, written, packed); - ofs << " \"wire\": \"" << packed << "\"" << endl; + ofs << R"( "wire": ")" << packed << "\"" << endl; ofs << " }" << endl; decoded.destroy(); original.destroy(); ofs << " ]," << endl; - ofs << " \"description\": \"Apache Traffic Server\"" << endl; + ofs << R"( "description": "Apache Traffic Server")" << endl; ofs << "}" << endl; return result; diff --git a/proxy/http2/test_Http2DependencyTree.cc b/proxy/http2/test_Http2DependencyTree.cc index c70180c7d7d..52bf218a1d6 100644 --- a/proxy/http2/test_Http2DependencyTree.cc +++ b/proxy/http2/test_Http2DependencyTree.cc @@ -22,7 +22,7 @@ */ #include -#include +#include #include #include "ts/TestBox.h" @@ -31,7 +31,7 @@ using namespace std; -typedef Http2DependencyTree Tree; +using Tree = Http2DependencyTree; /** * Exclusive Dependency Creation diff --git a/proxy/http2/test_Huffmancode.cc b/proxy/http2/test_Huffmancode.cc index a8d6a64ccd0..24344d7b8c1 100644 --- a/proxy/http2/test_Huffmancode.cc +++ b/proxy/http2/test_Huffmancode.cc @@ -22,10 +22,10 @@ */ #include "HuffmanCodec.h" -#include +#include #include -#include -#include +#include +#include using namespace std; @@ -70,10 +70,10 @@ random_test() const int size = 1024; char *dst_start = (char *)malloc(size * 2); char string[size]; - for (int i = 0; i < size; i++) { + for (char &i : string) { // coverity[dont_call] - long num = lrand48(); - string[i] = (char)num; + long num = lrand48(); + i = (char)num; } const uint8_t *src = (const uint8_t *)string; uint32_t src_len = sizeof(string); @@ -160,12 +160,12 @@ const static struct { void encode_test() { - for (uint64_t i = 0; i < sizeof(huffman_encode_test_data) / sizeof(huffman_encode_test_data[0]); ++i) { - uint8_t *dst = static_cast(malloc(huffman_encode_test_data[i].expect_len)); - int64_t encoded_len = huffman_encode(dst, huffman_encode_test_data[i].src, huffman_encode_test_data[i].src_len); + for (const auto &i : huffman_encode_test_data) { + uint8_t *dst = static_cast(malloc(i.expect_len)); + int64_t encoded_len = huffman_encode(dst, i.src, i.src_len); - assert(encoded_len == huffman_encode_test_data[i].expect_len); - assert(memcmp(huffman_encode_test_data[i].expect, dst, encoded_len) == 0); + assert(encoded_len == i.expect_len); + assert(memcmp(i.expect, dst, encoded_len) == 0); free(dst); } diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc index 1e125d445f7..7362791497c 100644 --- a/proxy/logging/Log.cc +++ b/proxy/logging/Log.cc @@ -171,7 +171,7 @@ Log::change_configuration() -------------------------------------------------------------------------*/ struct PeriodicWakeup; -typedef int (PeriodicWakeup::*PeriodicWakeupHandler)(int, void *); +using PeriodicWakeupHandler = int (PeriodicWakeup::*)(int, void *); struct PeriodicWakeup : Continuation { int m_preproc_threads; int m_flush_threads; @@ -576,7 +576,7 @@ Log::init_fields() Ptr cache_code_map = make_ptr(new LogFieldAliasTable); cache_code_map->init( - 49, SQUID_LOG_EMPTY, "UNDEFINED", SQUID_LOG_TCP_HIT, "TCP_HIT", SQUID_LOG_TCP_DISK_HIT, "TCP_DISK_HIT", SQUID_LOG_TCP_MEM_HIT, + 50, SQUID_LOG_EMPTY, "UNDEFINED", SQUID_LOG_TCP_HIT, "TCP_HIT", SQUID_LOG_TCP_DISK_HIT, "TCP_DISK_HIT", SQUID_LOG_TCP_MEM_HIT, "TCP_MEM_HIT", SQUID_LOG_TCP_MISS, "TCP_MISS", SQUID_LOG_TCP_EXPIRED_MISS, "TCP_EXPIRED_MISS", SQUID_LOG_TCP_REFRESH_HIT, "TCP_REFRESH_HIT", SQUID_LOG_TCP_REF_FAIL_HIT, "TCP_REFRESH_FAIL_HIT", SQUID_LOG_TCP_REFRESH_MISS, "TCP_REFRESH_MISS", SQUID_LOG_TCP_CLIENT_REFRESH, "TCP_CLIENT_REFRESH_MISS", SQUID_LOG_TCP_IMS_HIT, "TCP_IMS_HIT", SQUID_LOG_TCP_IMS_MISS, diff --git a/proxy/logging/LogAccess.h b/proxy/logging/LogAccess.h index 1c0e12d1ce2..f1d724349f8 100644 --- a/proxy/logging/LogAccess.h +++ b/proxy/logging/LogAccess.h @@ -276,8 +276,8 @@ class LogAccess inkcoreapi virtual int marshal_process_uuid(char *); // STR // These two are special, in that they are shared for all log types / implementations - inkcoreapi int marshal_entry_type(char *); // INT - inkcoreapi int marshal_cache_lookup_url_canon(char *); // STR + inkcoreapi int marshal_entry_type(char *); // INT + inkcoreapi virtual int marshal_cache_lookup_url_canon(char *); // STR // named fields from within a http header // diff --git a/proxy/logging/LogAccessHttp.cc b/proxy/logging/LogAccessHttp.cc index ecd9213d1c6..7100c812149 100644 --- a/proxy/logging/LogAccessHttp.cc +++ b/proxy/logging/LogAccessHttp.cc @@ -71,7 +71,7 @@ LogAccessHttp::LogAccessHttp(HttpSM *sm) m_proxy_resp_content_type_str(nullptr), m_proxy_resp_content_type_len(0), m_cache_lookup_url_canon_str(nullptr), - m_cache_lookup_url_canon_len(0) + m_cache_lookup_url_canon_len(-1) { ink_assert(m_http_sm != nullptr); } @@ -269,9 +269,14 @@ LogAccessHttp::marshal_cache_lookup_url_canon(char *buf) int len = INK_MIN_ALIGN; validate_lookup_url(); - len = round_strlen(m_cache_lookup_url_canon_len + 1); // +1 for eos - if (buf) { - marshal_mem(buf, m_cache_lookup_url_canon_str, m_cache_lookup_url_canon_len, len); + if (0 >= m_cache_lookup_url_canon_len) { + // If the lookup URL isn't populated, we'll fall back to the request URL. + len = marshal_client_req_url_canon(buf); + } else { + len = round_strlen(m_cache_lookup_url_canon_len + 1); // +1 for eos + if (buf) { + marshal_mem(buf, m_cache_lookup_url_canon_str, m_cache_lookup_url_canon_len, len); + } } return len; @@ -319,7 +324,7 @@ LogAccessHttp::marshal_client_auth_user_name(char *buf) -------------------------------------------------------------------------*/ void -LogAccessHttp::validate_unmapped_url(void) +LogAccessHttp::validate_unmapped_url() { if (m_client_req_unmapped_url_canon_len < 0) { if (m_http_sm->t_state.unmapped_url.valid()) { @@ -342,7 +347,7 @@ LogAccessHttp::validate_unmapped_url(void) -------------------------------------------------------------------------*/ void -LogAccessHttp::validate_unmapped_url_path(void) +LogAccessHttp::validate_unmapped_url_path() { int len; char *c; @@ -378,7 +383,7 @@ LogAccessHttp::validate_unmapped_url_path(void) m_cache_lookup__url_canon_len fields. -------------------------------------------------------------------------*/ void -LogAccessHttp::validate_lookup_url(void) +LogAccessHttp::validate_lookup_url() { if (m_cache_lookup_url_canon_len < 0) { if (m_http_sm->t_state.cache_info.lookup_url_storage.valid()) { diff --git a/proxy/logging/LogBuffer.cc b/proxy/logging/LogBuffer.cc index 859940ebadd..bace10b56b5 100644 --- a/proxy/logging/LogBuffer.cc +++ b/proxy/logging/LogBuffer.cc @@ -27,9 +27,9 @@ */ #include "ts/ink_platform.h" -#include -#include -#include +#include +#include +#include #include "P_EventSystem.h" #include "LogField.h" diff --git a/proxy/logging/LogCollationClientSM.cc b/proxy/logging/LogCollationClientSM.cc index 9c5efcb241c..1e68b0bb281 100644 --- a/proxy/logging/LogCollationClientSM.cc +++ b/proxy/logging/LogCollationClientSM.cc @@ -27,10 +27,10 @@ #include "ts/ink_platform.h" -#include -#include -#include -#include +#include +#include +#include +#include #include #include "P_EventSystem.h" diff --git a/proxy/logging/LogCollationHostSM.cc b/proxy/logging/LogCollationHostSM.cc index 20e609d848a..4a96d071c27 100644 --- a/proxy/logging/LogCollationHostSM.cc +++ b/proxy/logging/LogCollationHostSM.cc @@ -27,10 +27,10 @@ #include "ts/ink_config.h" -#include -#include -#include -#include +#include +#include +#include +#include #include #include "P_EventSystem.h" diff --git a/proxy/logging/LogField.cc b/proxy/logging/LogField.cc index 35412655757..01382792d49 100644 --- a/proxy/logging/LogField.cc +++ b/proxy/logging/LogField.cc @@ -204,7 +204,7 @@ static const milestone milestones[] = { }; void -LogField::init_milestone_container(void) +LogField::init_milestone_container() { if (m_milestone_map.empty()) { for (unsigned i = 0; i < countof(milestones); ++i) { @@ -242,7 +242,7 @@ LogField::LogField(const char *name, const char *symbol, Type type, MarshalFunc } LogField::LogField(const char *name, const char *symbol, Type type, MarshalFunc marshal, UnmarshalFuncWithMap unmarshal, - Ptr map, SetFunc _setfunc) + const Ptr &map, SetFunc _setfunc) : m_name(ats_strdup(name)), m_symbol(ats_strdup(symbol)), m_type(type), diff --git a/proxy/logging/LogField.h b/proxy/logging/LogField.h index 1c12e60f400..c9f0356c269 100644 --- a/proxy/logging/LogField.h +++ b/proxy/logging/LogField.h @@ -120,7 +120,7 @@ class LogField LogField(const char *name, const char *symbol, Type type, MarshalFunc marshal, UnmarshalFunc unmarshal, SetFunc _setFunc = NULL); LogField(const char *name, const char *symbol, Type type, MarshalFunc marshal, UnmarshalFuncWithMap unmarshal, - Ptr map, SetFunc _setFunc = NULL); + const Ptr &map, SetFunc _setFunc = NULL); LogField(const char *field, Container container, SetFunc _setFunc = NULL); LogField(const LogField &rhs); diff --git a/proxy/logging/LogFile.cc b/proxy/logging/LogFile.cc index d9dbd132a97..e6a5bb61c7c 100644 --- a/proxy/logging/LogFile.cc +++ b/proxy/logging/LogFile.cc @@ -31,7 +31,7 @@ #include "ts/SimpleTokenizer.h" #include "ts/ink_file.h" -#include +#include #include #include #include diff --git a/proxy/logging/LogFilter.cc b/proxy/logging/LogFilter.cc index 6a5fcb91dd9..163d85a931d 100644 --- a/proxy/logging/LogFilter.cc +++ b/proxy/logging/LogFilter.cc @@ -757,8 +757,8 @@ LogFilterIP::LogFilterIP(const char *name, LogField *field, LogFilter::Action ac LogFilterIP::LogFilterIP(const LogFilterIP &rhs) : LogFilter(rhs.m_name, rhs.m_field, rhs.m_action, rhs.m_operator) { - for (IpMap::iterator spot(rhs.m_map.begin()), limit(rhs.m_map.end()); spot != limit; ++spot) { - m_map.mark(spot->min(), spot->max(), spot->data()); + for (auto &spot : rhs.m_map) { + m_map.mark(spot.min(), spot.max(), spot.data()); } this->init(); } diff --git a/proxy/logging/LogFormat.cc b/proxy/logging/LogFormat.cc index 0222759f931..c9e2ff79e3d 100644 --- a/proxy/logging/LogFormat.cc +++ b/proxy/logging/LogFormat.cc @@ -28,9 +28,9 @@ ***************************************************************************/ #include "ts/ink_config.h" -#include -#include -#include +#include +#include +#include #include "ts/INK_MD5.h" diff --git a/proxy/logging/LogHost.cc b/proxy/logging/LogHost.cc index 64d039683df..68eeff77f7a 100644 --- a/proxy/logging/LogHost.cc +++ b/proxy/logging/LogHost.cc @@ -312,7 +312,7 @@ LogHost::display(FILE *fd) fprintf(fd, "LogHost: %s:%u, %s\n", name(), port(), (connected(NOPING)) ? "connected" : "not connected"); LogHost *host = this; - while (host->failover_link.next != NULL) { + while (host->failover_link.next != nullptr) { fprintf(fd, "Failover: %s:%u, %s\n", host->name(), host->port(), (host->connected(NOPING)) ? "connected" : "not connected"); host = host->failover_link.next; } diff --git a/proxy/logging/LogUtils.cc b/proxy/logging/LogUtils.cc index ee8bc4bfc5e..e841944a5d4 100644 --- a/proxy/logging/LogUtils.cc +++ b/proxy/logging/LogUtils.cc @@ -24,12 +24,12 @@ #include "ts/ink_config.h" #include "ts/ink_string.h" -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include diff --git a/proxy/logstats.cc b/proxy/logstats.cc index 54cc2628242..f2f31c71104 100644 --- a/proxy/logstats.cc +++ b/proxy/logstats.cc @@ -36,7 +36,6 @@ #include "LogObject.h" #include "hdrs/HTTP.h" -#include #include #if defined(solaris) #include @@ -51,6 +50,7 @@ #include #include #include +#include #include #include #include diff --git a/proxy/shared/DiagsConfig.cc b/proxy/shared/DiagsConfig.cc index 5d405cd16ee..1623daa6fa8 100644 --- a/proxy/shared/DiagsConfig.cc +++ b/proxy/shared/DiagsConfig.cc @@ -73,13 +73,13 @@ DiagsConfig::reconfigure_diags() e = (int)REC_readInteger("proxy.config.diags.debug.enabled", &found); if (e && found) { - c.enabled[DiagsTagType_Debug] = 1; // implement OR logic + c.enabled[DiagsTagType_Debug] = true; // implement OR logic } all_found = all_found && found; e = (int)REC_readInteger("proxy.config.diags.action.enabled", &found); if (e && found) { - c.enabled[DiagsTagType_Action] = 1; // implement OR logic + c.enabled[DiagsTagType_Action] = true; // implement OR logic } all_found = all_found && found; @@ -224,16 +224,16 @@ DiagsConfig::config_diags_norecords() if (diags->base_debug_tags) { diags->activate_taglist(diags->base_debug_tags, DiagsTagType_Debug); - c.enabled[DiagsTagType_Debug] = 1; + c.enabled[DiagsTagType_Debug] = true; } else { - c.enabled[DiagsTagType_Debug] = 0; + c.enabled[DiagsTagType_Debug] = false; } if (diags->base_action_tags) { diags->activate_taglist(diags->base_action_tags, DiagsTagType_Action); - c.enabled[DiagsTagType_Action] = 1; + c.enabled[DiagsTagType_Action] = true; } else { - c.enabled[DiagsTagType_Action] = 0; + c.enabled[DiagsTagType_Action] = false; } #if !defined(__GNUC__) diff --git a/proxy/shared/UglyLogStubs.cc b/proxy/shared/UglyLogStubs.cc index 916636f0756..c083af49747 100644 --- a/proxy/shared/UglyLogStubs.cc +++ b/proxy/shared/UglyLogStubs.cc @@ -38,8 +38,8 @@ int fds_limit = 8000; class FakeUDPNetProcessor : public UDPNetProcessor { - virtual int - start(int, size_t) + int + start(int, size_t) override { ink_release_assert(false); return 0; diff --git a/rc/trafficserver.in b/rc/trafficserver.in index cc3ea6390b7..ee4c54f6c4d 100644 --- a/rc/trafficserver.in +++ b/rc/trafficserver.in @@ -314,9 +314,9 @@ case "$1" in test "x$VERBOSE" != "xno" && log_end_msg "$retval" exit "$retval" elif [ "$DISTRIB_ID" = "fedora" -o "$DISTRIB_ID" = "redhat" ]; then - action "Stopping ${TC_NAME}:" killproc -p $TC_PIDFILE $TC_DAEMON - action "Stopping ${TM_NAME}:" killproc -p $TM_PIDFILE $TM_DAEMON - action "Stopping ${TS_NAME}:" killproc -p $TS_PIDFILE $TS_DAEMON + action "Stopping ${TC_NAME}:" killproc -p $TC_PIDFILE -d 35 $TC_DAEMON + action "Stopping ${TM_NAME}:" killproc -p $TM_PIDFILE -d 35 $TM_DAEMON + action "Stopping ${TS_NAME}:" killproc -p $TS_PIDFILE -d 35 $TS_DAEMON elif [ "$DISTRIB_ID" = "gentoo" ]; then ebegin "Starting ${TS_PACKAGE_NAME}" do_stop diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..95afdf2fb04 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[pep8] +max-line-length = 132 diff --git a/tests/autest.sh b/tests/autest.sh new file mode 100755 index 00000000000..33c4d218a78 --- /dev/null +++ b/tests/autest.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pushd $(dirname $0) > /dev/null +export PYTHONPATH=$(pwd):$PYTHONPATH +RED='\033[0;31m' +GREEN='\033[1;32m' +NC='\033[0m' # No Color +if [ ! -f ./env-test/bin/autest ]; then\ + echo -e "${RED}AuTest is not installed! Bootstrapping system...${NC}";\ + ./bootstrap.py;\ + echo -e "${GREEN}Done!${NC}";\ + fi +# this is for rhel or centos systems +test -r /opt/rh/rh-python35/enable && . /opt/rh/rh-python35/enable +. env-test/bin/activate +./env-test/bin/autest -D gold_tests "$@" +ret=$? +popd > /dev/null +exit $ret diff --git a/tests/bootstrap.py b/tests/bootstrap.py new file mode 100755 index 00000000000..44a6d14a9e0 --- /dev/null +++ b/tests/bootstrap.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +# this script sets up the testing packages to allow the tests + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import argparse +import os +import subprocess +import platform +import sys + +pip_packages = [ + "autest", + "hyper" +] + + +distro_packages = { + "RHEL": [ + "install epel-release", + "install python35", + "install rh-python35-python-virtualenv" + ], + "Fedora": [ + "install python3", + "install python3-virtualenv", + "install python-virtualenv", + ], + "Ubuntu": [ + "install python3", + "install python3-virtualenv", + "install virtualenv" + ], + "CentOS": [ + "install epel-release", + "install rh-python35-python-virtualenv" + ] +} + + +def command_output(cmd_str): + print(cmd_str) + proc = subprocess.Popen( + cmd_str, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True) + + # while command runs get output + while proc.poll() == None: + tmp = proc.stdout.readline() + sys.stdout.write(tmp) + + for last_output in proc.stdout.readlines(): + sys.stdout.write(last_output) + + return proc.returncode + + +def get_distro(): + return platform.linux_distribution() + + +def distro_version(): + return int(get_distro()[1].split(".")[0]) + + +def isFedora(): + return get_distro()[0].startswith("Fedora") + + +def isCentOS(): + return get_distro()[0].startswith("CentOS") + + +def distro(): + if isFedora(): + return "Fedora" + if isCentOS(): + return "CentOS" + if get_distro()[0].startswith("Red Hat"): + return "RHEL" + if get_distro()[0].startswith("Ubuntu"): + return "Ubuntu" + + +def isRedHatBased(): + return get_distro()[0].startswith("Red Hat") or get_distro()[0].startswith( + "Fedora") or get_distro()[0].startswith("CentOS") + + +def isInstalled(prog): + out = subprocess.Popen( + ["which", prog], stdout=subprocess.PIPE).communicate() + if out[0] != '': + return True + return False + + +def installManagerName(): + if isRedHatBased() and distro_version() >= 22: + ret = "sudo dnf -y" # Fedora 22 or newer + elif isRedHatBased(): + ret = "sudo yum -y" # Red Hat distro + else: + ret = "sudo apt-get -y" # Ubuntu/Debian + + return ret + + +def installToolName(): + if isRedHatBased(): + ret = "rpm -ihv" # Red Hat Based + else: + ret = "dpkg -iv" # Ubuntu/Debian + + return ret + + +def run_cmds(cmds): + for cmd in cmds: + # print (cmd.split[" "]) + # subprocess.call(cmd.split[" "]) + if command_output(cmd): + print("'{0}'' - Failed".format(cmd)) + + +def gen_package_cmds(packages): + + # main install tool/manager (yum, dnf, apt-get, etc) + mtool = installManagerName() + # core install tool (rpm, dpkg, etc) + itool = installToolName() + ret = [] + + for p in packages: + if p.startswith("wget"): + pth = p[5:] + pack = os.path.split(pth)[1] + cmd = ["wget {0}".format(pth), "{0} ./{1}".format(itool, pack)] + else: + cmd = ["{0} {1}".format(mtool, p)] + ret.extend(cmd) + return ret + + +extra = '' +if distro() == 'RHEL' or distro() == 'CentOS': + extra = ". /opt/rh/rh-python35/enable ;" + + +def venv_cmds(path): + ''' + Create virtual environment and add it + to the path being used for the script + ''' + + return [ + # first command only needed for rhel and centos systems at this time + extra + " virtualenv --python=python3 {0}".format(path), + extra + " {0}/bin/pip install pip --upgrade".format(path) + ] + + +def main(): + " main script logic" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--use-pip", nargs='?', default="pip", help="Which pip to use") + + parser.add_argument( + "venv_path", + nargs='?', + default="env-test", + help="The directory to us to for the virtualenv") + + parser.add_argument( + "--disable-virtualenv", + default=False, + action='store_true', + help="Do not create virtual environment to install packages under") + + parser.add_argument( + '-V', '--version', action='version', version='%(prog)s 1.0.0') + + args = parser.parse_args() + # print(args) + # print(get_distro()) + + # do we know of packages to install for the given platform + dist = distro() + cmds = [] + if dist: + cmds = gen_package_cmds(distro_packages[dist]) + + # test to see if we should use a certain version of pip + path_to_pip = None + if args.use_pip != "pip": + path_to_pip = args.use_pip + + # install on the system, or use virtualenv for pip based stuff + if not args.disable_virtualenv: + # Create virtual env + cmds += venv_cmds(args.venv_path) + if path_to_pip is None: + path_to_pip = os.path.join(args.venv_path, "bin", args.use_pip) + + cmds += [extra + "{0} install {1}".format(path_to_pip, " ".join(pip_packages))] + + run_cmds(cmds) + + +if __name__ == '__main__': + main() diff --git a/tests/getting_started.md b/tests/getting_started.md new file mode 100644 index 00000000000..3d36a9d205d --- /dev/null +++ b/tests/getting_started.md @@ -0,0 +1,197 @@ + +# Getting Started + +This directory contains different tests for Apache Trafficserver. It is recommended that all test move to this common area under the correct location based on the type of test being added. + +## Layout +The current layout is: + +**gold_tests/** - contains all the TSQA v4 based tests that run on the Reusable Gold Testing System (AuTest) +**tools/** - contain programs used to help with testing. + +In the future a directory called **"unit/"** will be added for adding unit tests based on some standardized testing system. + + +## Scripts + +To help with easy running of the tests, there is a autest.sh and bootstrap.py file. + +### autest.sh +This file is a simple wrapper that will call the AuTest program in a python virtualenv. If the virtualenv is not setup it will try to install system. That will set up the Reusable Gold Testing System on most systems in a Python virtual environment. The wrapper add some basic options to the command to point to the location of the tests. Add --help for more details on options for running autest test system. + +### bootstrap.py +This script should try to install python35 or better on the system, and needed python packages for running the tests. + +# Advance setup + +AuTest can be install manually instead of using the wrapper script. The advange of this is that it is often easier to debug issues with the testing system, or the tests. There are two ways this can be done. +1. run the bootstrap script then source the path with a "source ./env-test/bin/activate" command. At this point autest command should run without the wrapper script +2. The other way is to make sure you install python 3.5 or better on your system. From there install these python packages ( ie pip install ): + - hyper + - git+https://bitbucket.org/dragon512/reusable-gold-testing-system.git + +# Writting tests for AuTest +When writting for the AuTest system please refer to the current documenation on the [online wiki](https://bitbucket.org/dragon512/reusable-gold-testing-system/wiki/Home) for general use of the system. + +## Documenation of AuTest extension for ATS. +Autest allows for the creation of extension to help specilaize and simplify test writting for a given application domian. Minus API addition the extension code will check that python 3.5 or better is used. There is also a new command line argumented added: + +--ats-bin < path to bin directory > + +This command line argument will point to your build of ATS you want to test. At this time v6.0 or newer of Trafficserver should work. + +### MakeATSProcess(name,command=[traffic_server],select_ports=[True]) + * name - A name for this instance of ATS + * command - optional argument defining what process to use. Defaults to traffic_server. + * select_ports - have the testing system auto select the ports to use for this instance of ATS + +This function will define a sandbox for an instance of trafficserver to run under. The function will return a AuTest process object that will have a number of files and variables define for making it easier to define a test. + +#### Environment +The environment of the process will have a number of added environment variables to control trafficserver running the in the sandbox location correctly. This can be used to easily setup other commands that should run under same environment. + +##### Example + +```python +# Define default ATS +ts=Test.MakeATSProcess("ts") +# Call traffic_ctrl to set new generation +tr=Test.AddTestRun() +tr.Processes.Default.Command='traffic_ctl' +tr.Processes.Default.ReturnCode=0 +# set the environment for traffic_control to run in to be the same as the "ts" ATS instance +tr.Processes.Default.Env=ts.Env +``` + +#### Variables +These are the current variable that are define dynamically + +port - the ipv4 port to listen on +portv6 - the ipv4 port to listen on +manager_port - the manager port used. This is set even is select_port is False +admin_port - the admin port used. This is set even is select_port is False + +#### File objects +A number of file object are define to help with adding values to a given configuration value to for a test, or testing a value exists in a log file. File that are defined currently are: + +##### log files + * squid.log + * error.log + * diags.log + +##### config files + * records.config + * cache.config + * congestion.config + * hosting.config + * icp.config + * ip_allow.config + * log_hosts.config + * logging.config + * metrics.config + * parent.config + * plugin.config + * remap.config + * socks.config + * splitdns.config + * ssl_multicert.config + * storage.config + * vaddrs.config + * volume.config + +#### Examples + +Create a server + +```python +# don't set ports because a config file will set them +ts1 = Test.MakeATSProcess("ts1",select_ports=False) +ts1.Setup.ts.CopyConfig('config/records_8090.config','records.config') +``` + +Create a server and get the dynamic port value + +```python +# Define default ATS +ts=Test.MakeATSProcess("ts") +#first test is a miss for default +tr=Test.AddTestRun() +# get port for command from Variables +tr.Processes.Default.Command='curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port) + +``` + +Add value to a configuration file +```python +# setup some config file for this server +ts.Disk.records_config.update({ + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation':-1, # Start with cache turned off + 'proxy.config.config_update_interval_ms':1, + }) +ts.Disk.plugin_config.AddLine('xdebug.so') +ts.Disk.remap_config.AddLines([ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + #line 2 + 'map /generation1/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + #line 3 + 'map /generation2/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so' + ]) +``` + +### CopyConfig(file, targetname=None, process=None) +* file - name of the file to copy. Relative paths are relative from the test file location +* targetname - the name name of the file when copied to the correct configuration location +* process - optional process object to use for getting path location to copy to. Only needed if the Setup object call is not in the scope of the process object created with the MakeATSProcess(...) API. + +This function copies a given configuration file the location of a given trafficserver sandbox used in a test. Given a test might have more than on trafficserver instance, it can be difficult to understand the correct location to copy to. This function will deal with the details correctly. + +#### Examples + +Copy a file over + +```python +ts1 = Test.MakeATSProcess("ts1",select_ports=False) +# uses the setup object in the scope of the process object +ts1.Setup.ts.CopyConfig('config/records_8090.config','records.config') +``` +```python +ts1 = Test.MakeATSProcess("ts1",select_ports=False) +# uses the Setup in the global process via a variable passing +Test.Setup.ts.CopyConfig('config/records_8090.config','records.config',ts1) +# same as above, but uses the dynamic object model form +Test.Setup.ts.CopyConfig('config/records_8090.config','records.config',Test.Processes.ts1) +``` + +## Setup Origin Server +### Test.MakeOriginServer(Name) + * name - A name for this instance of Origin Server. + + This function returns a AuTest process object that launches the python-based microserver. Micro-Server is a mock server which responds to client http requests. Microserver needs to be setup for the tests that require an origin server behind ATS. The server reads a JSON-formatted data file that contains request headers and the corresponding response headers. Microserver responds with payload if the response header contains Content-Length or Transfer-Enconding specified. + +### addResponse(filename, request_header, response_header) +* filename - name of the file where the request header and response header will be written to in JSON format +* request_header - dictionary of request header +* response_header - dictionary of response header corresponding to the request header. + +This function adds the request header and response header to a file which is then read by the microserver to populate request-response map. The key-fields required for the header dictionary are 'headers', 'timestamp' and 'body'. + +### Example +```python +#create the origin server process +server=Test.MakeOriginServer("server") +#define the request header and the desired response header +request_header={"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +#desired response form the origin server +response_header={"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +#addResponse adds the transaction to a file which is used by the server +server.addResponse("sessionlog.json", request_header, response_header) +#add remap rule to traffic server +ts.Disk.remap_config.AddLine( + 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) +) +``` diff --git a/tests/gold_tests/autest-site/copy_config.test.ext b/tests/gold_tests/autest-site/copy_config.test.ext new file mode 100644 index 00000000000..2053989ca7b --- /dev/null +++ b/tests/gold_tests/autest-site/copy_config.test.ext @@ -0,0 +1,49 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from future.utils import native_str + +class CopyATSConfig(SetupItem): + def __init__(self, file, targetname=None, process=None): + super(CopyATSConfig, self).__init__( + itemname="CopyATSConfig" + ) + self.file=file + # some protection + if process is None and not isinstance(targetname,native_str): + self.process=targetname + else: + self.process=process + self.targetname=targetname + + def setup(self): + + process = self.process if self.process else self + try: + ts_dir=process.Env['TS_ROOT'] + except: + if self.process: + raise SetupError('TS_ROOT is not defined. Cannot copy ats config file without location to copy to.') + else: + raise SetupError('TS_ROOT is not defined. Cannot copy ats config file without location to copy to. Please pass in an ATS process object') + config_dir = os.path.join(ts_dir,process.ComposeVariables().SYSCONFDIR.replace(process.ComposeVariables().PREFIX+"/","")) + host.WriteVerbose("CopyATSConfig", "Copying {0} to {1}".format(self.file, config_dir)) + self.CopyAs(self.file,config_dir,self.targetname) + +AddSetupItem(CopyATSConfig, "CopyConfig", ns="ts") diff --git a/tests/gold_tests/autest-site/init.cli.ext b/tests/gold_tests/autest-site/init.cli.ext new file mode 100644 index 00000000000..cc3a1a6fdab --- /dev/null +++ b/tests/gold_tests/autest-site/init.cli.ext @@ -0,0 +1,27 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import sys +if sys.version_info<(3,5,0): + host.WriteError("You need python 3.5 or later to run these tests\n",show_stack=False) + +Settings.path_argument(["--ats-bin"], + required=True, + help="A user provided directory to ATS bin") \ No newline at end of file diff --git a/tests/gold_tests/autest-site/microserver.test.ext b/tests/gold_tests/autest-site/microserver.test.ext new file mode 100644 index 00000000000..4c39843eaf0 --- /dev/null +++ b/tests/gold_tests/autest-site/microserver.test.ext @@ -0,0 +1,121 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ports import get_port +import json + +def addMethod(self,testName, request_header, functionName): + return + +# creates the full request or response block using headers and message data +def httpObject(self,header,data): + r=dict() + r["timestamp"]="" + r["headers"]=header + r["body"]=data + return r + +# addResponse adds customized response with respect to request_header. request_header and response_header are both dictionaries +def addResponse(self,filename, testName, request_header, response_header): + + txn = dict() + txn["timestamp"] = "" + txn["uuid"] = testName + txn["request"] = request_header + txn["response"] = response_header + print("data dir",self.Variables.DataDir) + addTransactionToSession(txn,filename) + absFilepath=os.path.abspath(filename) + self.Setup.CopyAs(absFilepath,self.Variables.DataDir) + return + + + +# addResponse adds customized response with respect to request_header. request_header and response_header are both dictionaries +def addResponse(self,filename, request_header, response_header): + requestline = request_header["headers"].split("\r\n")[0] + requestline = requestline.split(" ")[1] + resourceLocation = requestline.split("/",1) + if len(resourceLocation)>1: + rl = resourceLocation[1] + else: + rl = "" + txn = dict() + txn["timestamp"] = "" + txn["uuid"] = rl + txn["request"] = request_header + txn["response"] = response_header + absFilepath = os.path.join(self.Variables.DataDir, filename) + addTransactionToSession(txn,absFilepath) + #absFilepath=os.path.abspath(filename) + #self.Setup.CopyAs(absFilepath,self.Variables.DataDir) + return + +#adds transaction in json format to the specified file +def addTransactionToSession(txn,JFile): + jsondata=None + if not os.path.exists(os.path.dirname(JFile)): + os.makedirs(os.path.dirname(JFile)) + if os.path.exists(JFile): + jf = open(JFile,'r') + jsondata = json.load(jf) + + if jsondata == None: + jsondata = dict() + jsondata["version"]='0.1' + jsondata["timestamp"]="1234567890.098" + jsondata["encoding"]="url_encoded" + jsondata["txns"]=list() + jsondata["txns"].append(txn) + else: + jsondata["txns"].append(txn) + with open(JFile,'w+') as jf: + jf.write(json.dumps(jsondata)) + + +#make headers with the key and values provided +def makeHeader(self,requestString, **kwargs): + headerStr = requestString+'\r\n' + for k,v in kwargs.iteritems(): + headerStr += k+': '+v+'\r\n' + headerStr = headerStr+'\r\n' + return headerStr + + +def MakeOriginServer(obj, name,public_ip=False,options={}): + server_path= os.path.join(obj.Variables.AtsTestToolsDir,'microServer/uWServer.py') + data_dir = os.path.join(obj.RunDirectory, name) + # create Process + p = obj.Processes.Process(name) + port=get_port(p,"Port") + command = "python3 {0} --data-dir {1} --port {2} --public {3} -m test".format(server_path, data_dir, port, public_ip) + for flag,value in options.items() : + command += " {} {}".format(flag,value) + + # create process + p.Command = command + p.Setup.MakeDir(data_dir) + p.Variables.DataDir = data_dir + p.Ready = When.PortOpen(port) + AddMethodToInstance(p,addResponse) + AddMethodToInstance(p,addTransactionToSession) + + return p + +AddTestRunSet(MakeOriginServer,name="MakeOriginServer") +AddTestRunSet(MakeOriginServer,name="MakeOrigin") diff --git a/tests/gold_tests/autest-site/min_cfg/cache.config b/tests/gold_tests/autest-site/min_cfg/cache.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/cache.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/hosting.config b/tests/gold_tests/autest-site/min_cfg/hosting.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/hosting.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/ip_allow.config b/tests/gold_tests/autest-site/min_cfg/ip_allow.config new file mode 100644 index 00000000000..061bbe5c0b6 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/ip_allow.config @@ -0,0 +1,4 @@ +src_ip=127.0.0.1 action=ip_allow method=ALL +src_ip=::1 action=ip_allow method=ALL +src_ip=0.0.0.0-255.255.255.255 action=ip_deny method=PUSH|PURGE|DELETE +src_ip=::-ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff action=ip_deny method=PUSH|PURGE|DELETE \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/parent.config b/tests/gold_tests/autest-site/min_cfg/parent.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/parent.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/readme.txt b/tests/gold_tests/autest-site/min_cfg/readme.txt new file mode 100644 index 00000000000..15a2cf87a8d --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/readme.txt @@ -0,0 +1,3 @@ +Contains the miniumn set of config file and setting to allow trafficserver to start in a usable way. +The goal is to remove the need for any of these files to exist. +Each of these files should provide an understanding of what we need to fix in the code diff --git a/tests/gold_tests/autest-site/min_cfg/records.config b/tests/gold_tests/autest-site/min_cfg/records.config new file mode 100644 index 00000000000..8d97ae7bd01 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/records.config @@ -0,0 +1 @@ +# some stuff diff --git a/tests/gold_tests/autest-site/min_cfg/remap.config b/tests/gold_tests/autest-site/min_cfg/remap.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/remap.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/ssl_multicert.config b/tests/gold_tests/autest-site/min_cfg/ssl_multicert.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/ssl_multicert.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/min_cfg/storage.config b/tests/gold_tests/autest-site/min_cfg/storage.config new file mode 100644 index 00000000000..4ebfe89f5fd --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/storage.config @@ -0,0 +1,4 @@ +# seems good enought for doing something for playing with. +# not good for production +# File must exist and must have this value in it +storage 256MB diff --git a/tests/gold_tests/autest-site/min_cfg/volume.config b/tests/gold_tests/autest-site/min_cfg/volume.config new file mode 100644 index 00000000000..6ac656b1b66 --- /dev/null +++ b/tests/gold_tests/autest-site/min_cfg/volume.config @@ -0,0 +1 @@ +# this files just needs to exist \ No newline at end of file diff --git a/tests/gold_tests/autest-site/ports.py b/tests/gold_tests/autest-site/ports.py new file mode 100644 index 00000000000..d999b2fe015 --- /dev/null +++ b/tests/gold_tests/autest-site/ports.py @@ -0,0 +1,108 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket +import subprocess + +try: + import queue as Queue +except ImportError: + import Queue + +g_ports = None # ports we can use + + +def PortOpen(port, address=None): + + ret = False + if address is None: + address = "localhost" + + address = (address, port) + + try: + s = socket.create_connection(address, timeout=.5) + s.close() + ret = True + except socket.error: + s = None + ret = False + except socket.timeout: + s = None + + return ret + + +def setup_port_queue(amount=1000): + global g_ports + if g_ports is None: + g_ports = Queue.LifoQueue() + else: + return + try: + dmin, dmax = subprocess.check_output( + ["sysctl", "net.ipv4.ip_local_port_range"]).decode().split("=")[1].split() + dmin = int(dmin) + dmax = int(dmax) + except: + return + + rmin = dmin - 2000 + rmax = 65536 - dmax + + if rmax > amount: + # fill in ports + port = dmax + 1 + while port < 65536 and g_ports.qsize() < amount: + # if port good: + if not PortOpen(port): + g_ports.put(port) + port += 1 + if rmin > amount and g_ports.qsize() < amount: + port = 2001 + while port < dmin and g_ports.qsize() < amount: + # if port good: + if not PortOpen(port): + g_ports.put(port) + port += 1 + + +def get_port(obj, name): + ''' + Get a port and set it to a variable on the object + + ''' + + setup_port_queue() + if g_ports.qsize(): + # get port + port = g_ports.get() + # assign to variable + obj.Variables[name] = port + # setup clean up step to recycle the port + obj.Setup.Lambda(func_cleanup=lambda: g_ports.put( + port), description="recycling port") + return port + + # use old code + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('', 0)) # bind to all interfaces on an ephemeral port + port = sock.getsockname()[1] + obj.Variables[name] = port + return port diff --git a/tests/gold_tests/autest-site/setup.cli.ext b/tests/gold_tests/autest-site/setup.cli.ext new file mode 100644 index 00000000000..6f38a873e5f --- /dev/null +++ b/tests/gold_tests/autest-site/setup.cli.ext @@ -0,0 +1,49 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, subprocess +import pprint + +if Arguments.ats_bin is not None: + # Add environment variables + ENV['ATS_BIN'] = Arguments.ats_bin + #if Arguments.ats_bin not in ENV['PATH']: + #ENV['PATH'] = Arguments.ats_bin + ':' + ENV['PATH'] + +if ENV['ATS_BIN'] is not None: + # Add variables for Tests + traffic_layout = os.path.join(ENV['ATS_BIN'], "traffic_layout") + if not os.path.isdir(ENV['ATS_BIN']): + host.WriteError("--ats-bin requires a directory", show_stack=False) + if not os.path.isfile(traffic_layout): + host.WriteError("traffic_layout is not found. Aborting tests - Bad build or install.", show_stack=False) + try: + out = subprocess.check_output([traffic_layout, "--json"]) + except subprocess.CalledProcessError: + host.WriteError("traffic_layout is broken. Aborting tests - The build of traffic server is bad.", show_stack=False) + out = json.loads(out.decode("utf-8")) + for k,v in out.items(): + out[k]=v[:-1] if v.endswith('/') else v + Variables.update(out) + host.WriteVerbose(['ats'],"Traffic server layout Data:\n",pprint.pformat(out)) + +Variables.AtsTestToolsDir = os.path.join(AutestSitePath,'../../tools') + +# modify delay times as we always have to kill Trafficserver +# no need to wait +Variables.Autest.StopProcessLongDelaySeconds=0 diff --git a/tests/gold_tests/autest-site/trafficserver.test.ext b/tests/gold_tests/autest-site/trafficserver.test.ext new file mode 100644 index 00000000000..70a71117dec --- /dev/null +++ b/tests/gold_tests/autest-site/trafficserver.test.ext @@ -0,0 +1,361 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import os +import socket +from ports import get_port + + +def make_id(s): + return s.replace(".", "_").replace('-', '_') + +# this forms is for the global process define + + +def MakeATSProcess(obj, name, command='traffic_server', select_ports=True): + ##################################### + # common locations + + # directory we will setup for the ATS to run under + ts_dir = os.path.join(obj.RunDirectory, name) + # common bin directory + bin_dir = 'bin' + # ideally we would use a value like config.. but there is a bug + # in which records.conf values are not loaded correctly from this location, + # so we use the expecetd "build" layout to correct the issue for the time + # being + config_dir = os.path.join( + ts_dir, + obj.Variables.SYSCONFDIR.replace( + obj.Variables.PREFIX + "/", + "" + ) + ) + # directory contains the html response templates + template_dir = os.path.join(config_dir, "body_factory") + # contains plugins + plugin_dir = os.path.join( + ts_dir, obj.Variables.PLUGINDIR.replace(obj.Variables.PREFIX + "/", "")) + # the log directory + log_dir = os.path.join(ts_dir, 'log') + runtime_dir = os.path.join( + ts_dir, obj.Variables.RUNTIMEDIR.replace(obj.Variables.PREFIX + "/", "")) + ssl_dir = os.path.join(ts_dir, 'ssl') + storage_dir = os.path.join(ts_dir, 'storage') + # create process + p = obj.Processes.Process(name, command) + + # we want to have a few directories more fixed + # this helps with debugging as location are common + # we do this by overiding locations from the "layout" + # used as part of build. This means loctaion such as + # PROXY_CONFIG_BIN_PATH with alway be $root/bin + # not something else such as bin64 + ##### + + # set root for this test + p.Env['TS_ROOT'] = ts_dir + p.Setup.MakeDir(ts_dir) + + # set bin location + + p.Env['PROXY_CONFIG_BIN_PATH'] = bin_dir + bin_path = os.path.join(ts_dir, bin_dir) + p.Env['PATH'] = bin_path + os.pathsep + p.ComposeEnv()['PATH'] + p.Setup.Copy(p.Variables.BINDIR, bin_path, True) + + ######################################################### + # setup config directory + + # copy all basic config files we need to get this to work + cfg_dir = os.path.join(AUTEST_SITE_PATH, "min_cfg") + for f in os.listdir(cfg_dir): + p.Setup.CopyAs(os.path.join(cfg_dir, f), config_dir) + + ######################################################### + # setup read-only data directory in config. Needed for response body + # reponses + + p.Env['PROXY_CONFIG_BODY_FACTORY_TEMPLATE_SETS_DIR'] = template_dir + p.Variables.body_factory_template_dir = template_dir + p.Setup.Copy(os.path.join(p.Variables.SYSCONFDIR, + 'body_factory'), template_dir) + + ######################################################### + # setup read-only data directory for plugins + + p.Env['PROXY_CONFIG_PLUGIN_PLUGIN_DIR'] = plugin_dir + p.Setup.Copy(p.Variables.PLUGINDIR, plugin_dir, True) + + ######################################################### + # create subdirectories that need to exist (but are empty) + # log directory has to be created with correct permissions + p.Setup.MakeDir(log_dir) # log directory has to be created + p.Setup.Chown(log_dir, "nobody", "nobody", ignore=True) + + # set env so traffic server uses correct locations + p.Env['PROXY_CONFIG_LOG_LOGFILE_DIR'] = log_dir + p.Variables.LOGDIR = log_dir + + # this is needed for cache and communication sockets + # Below was to make shorter paths but the code in + # traffic_ctl is broken and ignores this. + # p.Env['PROXY_CONFIG_LOCAL_STATE_DIR']=runtime_dir + p.Env['PROXY_CONFIG_HOSTDB_STORAGE_PATH'] = runtime_dir + # p.Variables.RUNTIMEDIR=runtime_dir + p.Setup.MakeDir(runtime_dir) + # will need this for traffic_manager is it runs + p.Setup.MakeDir(os.path.join(config_dir, 'snapshots')) + + ########################################################## + # create subdirectories that need to exist (but are empty) + # ssl directory has to be created for keeping certs and keys + p.Setup.MakeDir(ssl_dir) + p.Setup.Chown(ssl_dir, "nobody", "nobody", ignore=True) + + # set env so traffic server uses correct locations + p.Env['PROXY_CONFIG_SSL_DIR'] = ssl_dir + p.Variables.SSLDir = ssl_dir + AddMethodToInstance(p, addSSLfile) + ######################################################## + # cache.db directory + p.Setup.MakeDir(storage_dir) + p.Setup.Chown(storage_dir, "nobody", "nobody", ignore=True) + + # set env so traffic server uses correct locations + p.Env['PROXY_CONFIG_STORAGE_DIR'] = storage_dir + p.Variables.STORAGEDIR = storage_dir + ######################################################### + # define the basic file for a given test run + # traffic.out ?? # cannot find it at the moment... + # squid.log + fname = "squid.log" + tmpname = os.path.join(log_dir, fname) + p.Disk.File(tmpname, id=make_id(fname)) + # error.log + fname = "error.log" + tmpname = os.path.join(log_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), exists=False) + # diags.log + fname = "diags.log" + tmpname = os.path.join(log_dir, fname) + p.Disk.File(tmpname, id=make_id(fname)) + # add this test back once we have network namespaces working again + p.Disk.diags_log.Content = Testers.ExcludesExpression( + "ERROR:", "diags.log should not contain errors") + p.Disk.diags_log.Content += Testers.ExcludesExpression( + "FATAL:", "diags.log should not contain errors") + + # config files + def MakeConfigFile(self, fname): + tmpname = os.path.join(config_dir, fname) + return self.File(tmpname, id=make_id(fname), typename="ats:config") + + AddMethodToInstance(p.Disk, MakeConfigFile) + + # "Core" config files are pre-defined as variables. + fname = "records.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config:records") + + fname = "cache.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "congestion.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "hosting.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "icp.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "ip_allow.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "log_hosts.config" + tmpname = os.path.join(config_dir, fname) + # p.Disk.File(tmpname,id=make_id(fname),typename="ats:config").AddLine("# + # need something in here") + + # magic file that should probally not exist + fname = "logging.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "metrics.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "parent.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "plugin.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "remap.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "socks.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "splitdns.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "ssl_multicert.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "storage.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "vaddrs.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + fname = "volume.config" + tmpname = os.path.join(config_dir, fname) + p.Disk.File(tmpname, id=make_id(fname), typename="ats:config") + + ########################################################## + # set up default ports + # get some ports TODO make it so we can hold on to the socket + if select_ports: + # some system have a bug in which ipv4 and ipv6 share port space + # Make two different ports to avoid this + get_port(p, "port") + get_port(p, "portv6") + p.Ready = When.PortOpen(p.Variables.port) + else: + p.Variables.port = 8080 + p.Variables.portv6 = 8080 + get_port(p, "manager_port") + get_port(p, "admin_port") + + # set the ports + if select_ports: + p.Env['PROXY_CONFIG_HTTP_SERVER_PORTS'] = "{0} {1}:ipv6".format( + p.Variables.port, p.Variables.portv6) # your own listen port + p.Env['PROXY_CONFIG_PROCESS_MANAGER_MGMT_PORT'] = str( + p.Variables.manager_port) + p.Env['PROXY_CONFIG_ADMIN_SYNTHETIC_PORT'] = str(p.Variables.admin_port) + p.Env['PROXY_CONFIG_ADMIN_AUTOCONF_PORT'] = str( + p.Variables.admin_port) # support pre ATS 6.x + + # since we always kill this + p.ReturnCode = None + + return p + +################################## +# added to ats process object to help deal with config files + + +class Config(File): + ''' + Class to represent a config file + ''' + + def __init__(self, runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True, content=None): + super(Config, self).__init__( + runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True + ) + + self.content = content + self._added = False + + def AddLines(self, lines): + for line in lines: + self.AddLine(line) + + def _do_write(self, name): + ''' + Write contents to disk + ''' + host.WriteVerbosef('ats-config-file', + "Writting out file {0}", self.Name) + if self.content is not None: + with open(name, 'w') as f: + f.write(self.content) + return (False, "Appended file {0}".format(self.Name), "Success") + + def AddLine(self, line): + if not self._added: + self.WriteCustomOn(self._do_write) + self._added = True + if self.content is None: + self.content = "" + if not line.endswith('\n'): + line += '\n' + self.content += line + + +class RecordsConfig(Config, dict): + ''' + Create a "dict" representation of records.config + + This can be accessed as a multi-level dictionary + + such as: + rc['CONFIG']['proxy.config.log.hostname'] + ''' + + reverse_kind_map = {str: 'STRING', + int: 'INT', + float: 'FLOAT', + } + + line_template = 'CONFIG {name} {kind} {val}\n' + + def __init__(self, runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True): + super(RecordsConfig, self).__init__( + runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True + ) + self.WriteCustomOn(self._do_write) + + def _do_write(self, name): + host.WriteVerbosef('ats-config-file', "Writting out file {0}", name) + if len(self) > 0: + with open(name, 'w') as f: + for name, val in self.items(): + f.write(self.line_template.format(name=name, + kind=self.reverse_kind_map[ + type(val)], + val=val)) + return (False, "Writing config file {0}".format(os.path.split(self.Name)[-1]), "Success") +########################################################################## + + +def addSSLfile(self, filename): + self.Setup.CopyAs(filename, self.Variables.SSLDir) + +RegisterFileType(Config, "ats:config") +RegisterFileType(RecordsConfig, "ats:config:records") +AddTestRunSet(MakeATSProcess, name="MakeATSProcess") diff --git a/tests/gold_tests/autest-site/trafficserver_plugins.test.ext b/tests/gold_tests/autest-site/trafficserver_plugins.test.ext new file mode 100644 index 00000000000..fb53bd771b7 --- /dev/null +++ b/tests/gold_tests/autest-site/trafficserver_plugins.test.ext @@ -0,0 +1,44 @@ +''' +Builds, installs, and enables an ATS plugin in the sandbox environment +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +def prepare_plugin(self, path, tsproc): + """Builds, installs, and enables an ATS plugin in the sandbox environment + + The source file at the given path is copied to the sandbox directory of the + given traffic server process and compiled into a binary with the file + extensioned replaced with '.so'. An entry for this plugin is added to + the 'plugin.config' file.""" + + # Copy the source to the sandbox directory. + plugin_dir = tsproc.Env['PROXY_CONFIG_PLUGIN_PLUGIN_DIR'] + tsproc.Setup.Copy(path, plugin_dir) + + # Compile the plugin. + in_basename = os.path.basename(path) + in_path = os.path.join(plugin_dir, in_basename) + out_basename = os.path.splitext(in_basename)[0] + '.so' + out_path = os.path.join(plugin_dir, out_basename) + tsproc.Setup.RunCommand("tsxs -c {0} -o {1}".format(in_path, out_path)) + + # Add an entry to plugin.config. + tsproc.Disk.plugin_config.AddLine(out_basename) + +ExtendTest(prepare_plugin, name="prepare_plugin") diff --git a/tests/gold_tests/basic/basic-cop.test.py b/tests/gold_tests/basic/basic-cop.test.py new file mode 100644 index 00000000000..0d5203118df --- /dev/null +++ b/tests/gold_tests/basic/basic-cop.test.py @@ -0,0 +1,32 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = ''' +Test that Trafficserver starts with default configurations. +''' + +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) + +p = Test.MakeATSProcess("ts", command="traffic_cop --debug --stdout", select_ports=False) +t = Test.AddTestRun("Test traffic server started properly") +t.StillRunningAfter = Test.Processes.ts + +p = t.Processes.Default +p.Command = "curl http://127.0.0.1:8080" +p.ReturnCode = 0 +p.StartBefore(Test.Processes.ts, ready=When.PortOpen(8080)) diff --git a/tests/gold_tests/basic/basic-manager.test.py b/tests/gold_tests/basic/basic-manager.test.py new file mode 100644 index 00000000000..24cb2bd3979 --- /dev/null +++ b/tests/gold_tests/basic/basic-manager.test.py @@ -0,0 +1,32 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = ''' +Test that Trafficserver starts with default configurations. +''' + +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) + +p = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=False) +t = Test.AddTestRun("Test traffic server started properly") +t.StillRunningAfter = Test.Processes.ts + +p = t.Processes.Default +p.Command = "curl http://127.0.0.1:8080" +p.ReturnCode = 0 +p.StartBefore(Test.Processes.ts, ready=When.PortOpen(8080)) diff --git a/tests/gold_tests/basic/basic.test.py b/tests/gold_tests/basic/basic.test.py new file mode 100644 index 00000000000..b3fe08b4de5 --- /dev/null +++ b/tests/gold_tests/basic/basic.test.py @@ -0,0 +1,32 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = ''' +Test that Trafficserver starts with default configurations. +''' + +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) + +p = Test.MakeATSProcess("ts", select_ports=False) +t = Test.AddTestRun("Test traffic server started properly") +t.StillRunningAfter = Test.Processes.ts + +p = t.Processes.Default +p.Command = "curl http://127.0.0.1:8080" +p.ReturnCode = 0 +p.StartBefore(Test.Processes.ts, ready=When.PortOpen(8080)) diff --git a/tests/gold_tests/basic/config.test.py b/tests/gold_tests/basic/config.test.py new file mode 100644 index 00000000000..0659bf29fce --- /dev/null +++ b/tests/gold_tests/basic/config.test.py @@ -0,0 +1,33 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = "Test start up of Traffic server with configuration modification of starting port" + +Test.SkipUnless(Condition.HasProgram("curl", + "Curl needs to be installed on your system for this test to work")) + +ts1 = Test.MakeATSProcess("ts1", select_ports=False) +ts1.Setup.ts.CopyConfig('config/records_8090.config', "records.config") +t = Test.AddTestRun("Test traffic server started properly") +t.StillRunningAfter = ts1 + +p = t.Processes.Default +p.Command = "curl 127.0.0.1:8090" +p.ReturnCode = 0 + +p.StartBefore(Test.Processes.ts1, ready=When.PortOpen(8090)) diff --git a/tests/gold_tests/basic/config/records_8090.config b/tests/gold_tests/basic/config/records_8090.config new file mode 100644 index 00000000000..ca56e77b264 --- /dev/null +++ b/tests/gold_tests/basic/config/records_8090.config @@ -0,0 +1,2 @@ +CONFIG proxy.config.http.server_ports STRING 8090 + diff --git a/tests/gold_tests/basic/config/records_8091.config b/tests/gold_tests/basic/config/records_8091.config new file mode 100644 index 00000000000..1b560d820c8 --- /dev/null +++ b/tests/gold_tests/basic/config/records_8091.config @@ -0,0 +1,2 @@ +CONFIG proxy.config.http.server_ports STRING 8091 + diff --git a/tests/gold_tests/basic/config/remap.config b/tests/gold_tests/basic/config/remap.config new file mode 100644 index 00000000000..553b66b2da9 --- /dev/null +++ b/tests/gold_tests/basic/config/remap.config @@ -0,0 +1 @@ +regex_map http://(.*)/ http://localhost:9999/ \ No newline at end of file diff --git a/tests/gold_tests/basic/copy_config.test.py b/tests/gold_tests/basic/copy_config.test.py new file mode 100644 index 00000000000..e61de02592a --- /dev/null +++ b/tests/gold_tests/basic/copy_config.test.py @@ -0,0 +1,49 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = "Test start up of Traffic server with configuration modification of starting port of different servers at the same time" + +Test.SkipUnless(Condition.HasProgram("curl", + "Curl needs to be installed on your system for this test to work")) + +# set up some ATS processes +ts1 = Test.MakeATSProcess("ts1", select_ports=False) +ts1.Setup.ts.CopyConfig('config/records_8090.config', 'records.config') + +ts2 = Test.MakeATSProcess("ts2", select_ports=False) +ts2.Setup.ts.CopyConfig('config/records_8091.config', 'records.config') + +# setup a testrun +t = Test.AddTestRun("Talk to ts1") +t.StillRunningAfter = ts1 +t.StillRunningAfter += ts2 +p = t.Processes.Default +p.Command = "curl 127.0.0.1:8090" +p.ReturnCode = 0 +p.StartBefore(Test.Processes.ts1, ready=When.PortOpen(8090)) +p.StartBefore(Test.Processes.ts2, ready=When.PortOpen(8091)) + +# setup a testrun +t = Test.AddTestRun("Talk to ts2") +t.StillRunningBefore = ts1 +t.StillRunningBefore += ts2 +t.StillRunningAfter = ts1 +t.StillRunningAfter += ts2 +p = t.Processes.Default +p.Command = "curl 127.0.0.1:8091" +p.ReturnCode = 0 diff --git a/tests/gold_tests/basic/copy_config2.test.py b/tests/gold_tests/basic/copy_config2.test.py new file mode 100644 index 00000000000..37ba4f20fc9 --- /dev/null +++ b/tests/gold_tests/basic/copy_config2.test.py @@ -0,0 +1,46 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.Summary = "Test start up of Traffic server with generated ports of more than one servers at the same time" + +Test.SkipUnless(Condition.HasProgram("curl", + "Curl needs to be installed on your system for this test to work")) + +# set up some ATS processes +ts1 = Test.MakeATSProcess("ts1") +ts2 = Test.MakeATSProcess("ts2") + +# setup a testrun +t = Test.AddTestRun("Talk to ts1") +t.StillRunningAfter = ts1 +t.StillRunningAfter += ts2 +p = t.Processes.Default +p.Command = "curl 127.0.0.1:{0}".format(ts1.Variables.port) +p.ReturnCode = 0 +p.StartBefore(Test.Processes.ts1, ready=When.PortOpen(ts1.Variables.port)) +p.StartBefore(Test.Processes.ts2, ready=When.PortOpen(ts2.Variables.port)) + +# setup a testrun +t = Test.AddTestRun("Talk to ts2") +t.StillRunningBefore = ts1 +t.StillRunningBefore += ts2 +t.StillRunningAfter = ts1 +t.StillRunningAfter += ts2 +p = t.Processes.Default +p.Command = "curl 127.0.0.1:{0}".format(ts2.Variables.port) +p.ReturnCode = 0 diff --git a/tests/gold_tests/body_factory/config/remap.config b/tests/gold_tests/body_factory/config/remap.config new file mode 100644 index 00000000000..553b66b2da9 --- /dev/null +++ b/tests/gold_tests/body_factory/config/remap.config @@ -0,0 +1 @@ +regex_map http://(.*)/ http://localhost:9999/ \ No newline at end of file diff --git a/tests/gold_tests/body_factory/custom_response.test.py b/tests/gold_tests/body_factory/custom_response.test.py new file mode 100644 index 00000000000..afa636dfbdf --- /dev/null +++ b/tests/gold_tests/body_factory/custom_response.test.py @@ -0,0 +1,63 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests for custom reponse body +''' + +# this test currently fails and it should not +Test.SkipIf(Condition.true("This test fails at the moment as is turned off")) +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) + +ts = Test.MakeATSProcess("ts") +ts.Disk.records_config.update({ + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory +}) +ts.Disk.remap_config.AddLine( + 'map / http://www.linkedin.com/ @action=deny' +) + + +domain_directory = ['www.linkedin.com', '127.0.0.1', 'www.foobar.net'] +body_factory_dir = ts.Variables.body_factory_template_dir +# for each domain +set = False +for directory_item in domain_directory: + # write out a files with some content for Traffic server for given domain + ts.Disk.File(os.path.join(body_factory_dir, directory_item, "access#denied")).\ + WriteOn("{0} 44 Not 89 found".format(directory_item)) + + ts.Disk.File(os.path.join(body_factory_dir, directory_item, ".body_factory_info")).\ + WriteOn("") + # make a test run for a given domain + tr = Test.AddTestRun("Test domain {0}".format(directory_item)) + if not set: + # Start the ATS process for first test run + tr.Processes.Default.StartBefore(Test.Processes.ts) + set = True + tr.StillRunningAfter = ts + else: + # test that ats is still running before and after + tr.StillRunningBefore = ts + tr.StillRunningAfter = ts + + tr.Processes.Default.Command = "curl --proxy 127.0.0.1:{1} {0}".format(directory_item, ts.Variables.port) + tr.Processes.Default.ReturnCode = 0 + tr.Streams.All = Testers.ContainsExpression("{0} Not found".format(directory_item), "should contain custom data") diff --git a/tests/gold_tests/body_factory/data/www.customplugin204.test_get.txt b/tests/gold_tests/body_factory/data/www.customplugin204.test_get.txt new file mode 100644 index 00000000000..be603f95f17 --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.customplugin204.test_get.txt @@ -0,0 +1,2 @@ +GET HTTP://www.customplugin204.test/ HTTP/1.1 + diff --git a/tests/gold_tests/body_factory/data/www.customtemplate204.test_get.txt b/tests/gold_tests/body_factory/data/www.customtemplate204.test_get.txt new file mode 100644 index 00000000000..395d7985f69 --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.customtemplate204.test_get.txt @@ -0,0 +1,2 @@ +GET HTTP://www.customtemplate204.test/ HTTP/1.1 + diff --git a/tests/gold_tests/body_factory/data/www.default204.test_get.txt b/tests/gold_tests/body_factory/data/www.default204.test_get.txt new file mode 100644 index 00000000000..e77408af827 --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.default204.test_get.txt @@ -0,0 +1,2 @@ +GET HTTP://www.default204.test/ HTTP/1.1 + diff --git a/tests/gold_tests/body_factory/data/www.default304.test_get.txt b/tests/gold_tests/body_factory/data/www.default304.test_get.txt new file mode 100644 index 00000000000..c9064fa0ade --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.default304.test_get.txt @@ -0,0 +1,2 @@ +GET HTTP://www.default304.test/ HTTP/1.1 + diff --git a/tests/gold_tests/body_factory/data/www.example.test_get_200.txt b/tests/gold_tests/body_factory/data/www.example.test_get_200.txt new file mode 100644 index 00000000000..c53681f3486 --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.example.test_get_200.txt @@ -0,0 +1,3 @@ +GET /get200 HTTP/1.1 +Host: www.example.test + diff --git a/tests/gold_tests/body_factory/data/www.example.test_get_304.txt b/tests/gold_tests/body_factory/data/www.example.test_get_304.txt new file mode 100644 index 00000000000..8d0aecf9bdb --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.example.test_get_304.txt @@ -0,0 +1,4 @@ +GET /get304 HTTP/1.1 +Host: www.example.test +If-Modified-Since: Thu, 1 Jan 1970 00:00:00 GMT + diff --git a/tests/gold_tests/body_factory/data/www.example.test_head.txt b/tests/gold_tests/body_factory/data/www.example.test_head.txt new file mode 100644 index 00000000000..c5a5c979e9e --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.example.test_head.txt @@ -0,0 +1,3 @@ +HEAD http://www.example.test/ HTTP/1.1 +Host: www.example.test + diff --git a/tests/gold_tests/body_factory/data/www.example.test_head_200.txt b/tests/gold_tests/body_factory/data/www.example.test_head_200.txt new file mode 100644 index 00000000000..6d214e1b365 --- /dev/null +++ b/tests/gold_tests/body_factory/data/www.example.test_head_200.txt @@ -0,0 +1,3 @@ +HEAD /head200 HTTP/1.1 +Host: www.example.test + diff --git a/tests/gold_tests/body_factory/gold/http-204-custom-plugin.gold b/tests/gold_tests/body_factory/gold/http-204-custom-plugin.gold new file mode 100644 index 00000000000..cab77b6aeb2 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-204-custom-plugin.gold @@ -0,0 +1,19 @@ +HTTP/1.1 204 No Content +Connection: keep-alive +Cache-Control: no-store +Content-Length: 282 +Content-Type: text/html + + + +Spec-breaking 204! + + + +

This is body content for a 204.

+
+ +Description: According to rfc7231 I should not have been sent to you!
+This response was sent via the custom204plugin via a call to TSHttpTxnErrorBodySet. +
+ diff --git a/tests/gold_tests/body_factory/gold/http-204-custom.gold b/tests/gold_tests/body_factory/gold/http-204-custom.gold new file mode 100644 index 00000000000..fb84d9b7f05 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-204-custom.gold @@ -0,0 +1,21 @@ +HTTP/1.1 204 No Content +Connection: keep-alive +Cache-Control: no-store +Content-Type: text/html +Content-Language: en +Content-Length: 271 + + + +Spec-breaking 204! + + + +

This is body content for a 204.

+
+ + +Description: According to rfc7231 I should not have been sent to you! + +
+ diff --git a/tests/gold_tests/body_factory/gold/http-204.gold b/tests/gold_tests/body_factory/gold/http-204.gold new file mode 100644 index 00000000000..29312022b09 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-204.gold @@ -0,0 +1,4 @@ +HTTP/1.1 204 No Content +Connection: keep-alive +Cache-Control: no-store + diff --git a/tests/gold_tests/body_factory/gold/http-304.gold b/tests/gold_tests/body_factory/gold/http-304.gold new file mode 100644 index 00000000000..1931f8b2533 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-304.gold @@ -0,0 +1,4 @@ +HTTP/1.1 304 Not Modified +Connection: keep-alive +Cache-Control: no-store + diff --git a/tests/gold_tests/body_factory/gold/http-get-200.gold b/tests/gold_tests/body_factory/gold/http-get-200.gold new file mode 100644 index 00000000000..a5c3c385443 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-get-200.gold @@ -0,0 +1,6 @@ +HTTP/1.1 200 OK +Content-Length: 47 +Age: 0 +Connection: keep-alive + +This body should be returned for a GET request. diff --git a/tests/gold_tests/body_factory/gold/http-get-304.gold b/tests/gold_tests/body_factory/gold/http-get-304.gold new file mode 100644 index 00000000000..03f3cedf18f --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-get-304.gold @@ -0,0 +1,5 @@ +HTTP/1.1 304 Not Modified +Age: 0 +Connection: keep-alive +Warning: 199 VERSION Proxy received unexpected 304 response; content may be stale + diff --git a/tests/gold_tests/body_factory/gold/http-head-200.gold b/tests/gold_tests/body_factory/gold/http-head-200.gold new file mode 100644 index 00000000000..045c1e18bdb --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-head-200.gold @@ -0,0 +1,4 @@ +HTTP/1.1 200 OK +Age: 0 +Connection: keep-alive + diff --git a/tests/gold_tests/body_factory/gold/http-head-no-origin.gold b/tests/gold_tests/body_factory/gold/http-head-no-origin.gold new file mode 100644 index 00000000000..6e1d0fd1586 --- /dev/null +++ b/tests/gold_tests/body_factory/gold/http-head-no-origin.gold @@ -0,0 +1,7 @@ +HTTP/1.1 404 Not Found +Connection: keep-alive +Cache-Control: no-store +Content-Type: text/html +Content-Language: en +Content-Length: 297 + diff --git a/tests/gold_tests/body_factory/http204_response.test.py b/tests/gold_tests/body_factory/http204_response.test.py new file mode 100644 index 00000000000..3833d463b95 --- /dev/null +++ b/tests/gold_tests/body_factory/http204_response.test.py @@ -0,0 +1,95 @@ +''' +Tests that 204 responses conform to rfc2616, unless custom templates override. +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests that 204 responses conform to rfc2616, unless custom templates override. +''' + +Test.SkipUnless(Condition.HasProgram("grep","grep needs to be installed on system for this test to work")) + +ts=Test.MakeATSProcess("ts") +server=Test.MakeOriginServer("server") + +DEFAULT_204_HOST='www.default204.test' +CUSTOM_TEMPLATE_204_HOST='www.customtemplate204.test' + +ts.Disk.records_config.update({ + # enable domain specific body factory + 'proxy.config.body_factory.enable_customizations': 3, + }) + +# Create a template body for a 204. +body_factory_dir=ts.Variables.body_factory_template_dir +ts.Disk.File(os.path.join(body_factory_dir, 'default', CUSTOM_TEMPLATE_204_HOST+'_default')).\ + WriteOn( +""" + +Spec-breaking 204! + + + +

This is body content for a 204.

+
+ + +Description: According to rfc7231 I should not have been sent to you! + +
+ +""") + +regex_remap_conf_file = "maps.reg" + +ts.Disk.remap_config.AddLine( + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host' + .format(DEFAULT_204_HOST, server.Variables.Port, regex_remap_conf_file) + ) +ts.Disk.remap_config.AddLine( + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host @plugin=conf_remap.so @pparam=proxy.config.body_factory.template_base={0}' + .format(CUSTOM_TEMPLATE_204_HOST, server.Variables.Port, regex_remap_conf_file) + ) +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine( + '//.*/ http://127.0.0.1:{0} @status=204' + .format(server.Variables.Port) + ) + +Test.Setup.Copy(os.path.join(os.pardir,os.pardir,'tools','tcp_client.py')) +Test.Setup.Copy('data') + +defaultTr=Test.AddTestRun("Test domain {0}".format(DEFAULT_204_HOST)) +defaultTr.Processes.Default.StartBefore(Test.Processes.ts) +defaultTr.StillRunningAfter = ts + +defaultTr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(DEFAULT_204_HOST)) +defaultTr.Processes.Default.TimeOut=5 # seconds +defaultTr.Processes.Default.ReturnCode=0 +defaultTr.Processes.Default.Streams.stdout="gold/http-204.gold" + + +customTemplateTr=Test.AddTestRun("Test domain {0}".format(CUSTOM_TEMPLATE_204_HOST)) +customTemplateTr.StillRunningBefore = ts +customTemplateTr.StillRunningAfter = ts +customTemplateTr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(CUSTOM_TEMPLATE_204_HOST)) +customTemplateTr.Processes.Default.TimeOut=5 # seconds +customTemplateTr.Processes.Default.ReturnCode=0 +customTemplateTr.Processes.Default.Streams.stdout="gold/http-204-custom.gold" diff --git a/tests/gold_tests/body_factory/http204_response_plugin.test.py b/tests/gold_tests/body_factory/http204_response_plugin.test.py new file mode 100644 index 00000000000..b8fd336874b --- /dev/null +++ b/tests/gold_tests/body_factory/http204_response_plugin.test.py @@ -0,0 +1,55 @@ +''' +Tests that plugins may break HTTP by sending 204 respose bodies +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests that plugins may break HTTP by sending 204 respose bodies +''' + +Test.SkipUnless(Condition.HasProgram("grep","grep needs to be installed on system for this test to work")) + +ts=Test.MakeATSProcess("ts") +server=Test.MakeOriginServer("server") + +CUSTOM_PLUGIN_204_HOST='www.customplugin204.test' + +regex_remap_conf_file = "maps.reg" + +ts.Disk.remap_config.AddLine( + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host' + .format(CUSTOM_PLUGIN_204_HOST, server.Variables.Port, + regex_remap_conf_file) + ) +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine('//.*/ http://donotcare.test @status=204') + +Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'custom204plugin.cc'), ts) + +Test.Setup.Copy(os.path.join(os.pardir,os.pardir,'tools','tcp_client.py')) +Test.Setup.Copy('data') + +tr=Test.AddTestRun("Test domain {0}".format(CUSTOM_PLUGIN_204_HOST)) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.StillRunningAfter = ts + +tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(CUSTOM_PLUGIN_204_HOST)) +tr.Processes.Default.TimeOut=5 # seconds +tr.Processes.Default.ReturnCode=0 +tr.Processes.Default.Streams.stdout="gold/http-204-custom-plugin.gold" diff --git a/tests/gold_tests/body_factory/http304_response.test.py b/tests/gold_tests/body_factory/http304_response.test.py new file mode 100644 index 00000000000..ed672a6e16f --- /dev/null +++ b/tests/gold_tests/body_factory/http304_response.test.py @@ -0,0 +1,57 @@ +''' +Tests 304 responses +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests 304 responses +''' + +Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work")) +Test.SkipUnless(Condition.HasProgram("sed", "grep needs to be installed on system for this test to work")) + +ts = Test.MakeATSProcess("ts") +server = Test.MakeOriginServer("server") + +DEFAULT_304_HOST = 'www.default304.test' + + +regex_remap_conf_file = "maps.reg" + +ts.Disk.remap_config.AddLine( + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host' + .format(DEFAULT_304_HOST, server.Variables.Port, regex_remap_conf_file) + ) +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine( + '//.*/ http://127.0.0.1:{0} @status=304' + .format(server.Variables.Port) +) + +Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) +Test.Setup.Copy('data') + +tr = Test.AddTestRun("Test domain {0}".format(DEFAULT_304_HOST)) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.StillRunningAfter = ts + +cmd_tpl = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/' | sed 's;ApacheTrafficServer\/[^ ]*;VERSION;'" +tr.Processes.Default.Command = cmd_tpl.format(ts.Variables.port, 'data/{0}_get.txt'.format(DEFAULT_304_HOST)) +tr.Processes.Default.TimeOut = 5 # seconds +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stdout = "gold/http-304.gold" diff --git a/tests/gold_tests/body_factory/http_head_no_origin.test.py b/tests/gold_tests/body_factory/http_head_no_origin.test.py new file mode 100644 index 00000000000..246e13f48a6 --- /dev/null +++ b/tests/gold_tests/body_factory/http_head_no_origin.test.py @@ -0,0 +1,45 @@ +''' +Tests that HEAD requests return proper responses when origin fails +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests that HEAD requests return proper responses when origin fails +''' + +Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work")) + +ts = Test.MakeATSProcess("ts") +server = Test.MakeOriginServer("server") + +HOST = 'www.example.test' + + +Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) +Test.Setup.Copy('data') + +tr = Test.AddTestRun("Test domain {0}".format(HOST)) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.StillRunningAfter = ts + +tr.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_head.txt'.format(HOST)) +tr.Processes.Default.TimeOut = 5 # seconds +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stdout = "gold/http-head-no-origin.gold" diff --git a/tests/gold_tests/body_factory/http_with_origin.test.py b/tests/gold_tests/body_factory/http_with_origin.test.py new file mode 100644 index 00000000000..3eb85a04059 --- /dev/null +++ b/tests/gold_tests/body_factory/http_with_origin.test.py @@ -0,0 +1,109 @@ +''' +Tests that HEAD requests return proper responses +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests that HEAD requests return proper responses +''' + +Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work")) +Test.SkipUnless(Condition.HasProgram("sed", "grep needs to be installed on system for this test to work")) + +ts = Test.MakeATSProcess("ts") + +HOST = 'www.example.test' + +server = Test.MakeOriginServer("server") + +ts.Disk.remap_config.AddLine( + 'map http://{0} http://127.0.0.1:{1}'.format(HOST, server.Variables.Port) +) + +server.addResponse("sessionfile.log", { + "headers": "HEAD /head200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" +}, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "This body should not be returned for a HEAD request." +}) + +server.addResponse("sessionfile.log", { + "headers": "GET /get200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" +}, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "This body should be returned for a GET request." +}) + +server.addResponse("sessionfile.log", { + "headers": "GET /get304 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" +}, { + "headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +}) + + +Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) +Test.Setup.Copy('data') + +trhead200 = Test.AddTestRun("Test domain {0}".format(HOST)) +trhead200.Processes.Default.StartBefore(Test.Processes.ts) +trhead200.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) +trhead200.StillRunningAfter = ts +trhead200.StillRunningAfter = server + +trhead200.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_head_200.txt'.format(HOST)) +trhead200.Processes.Default.TimeOut = 5 # seconds +trhead200.Processes.Default.ReturnCode = 0 +trhead200.Processes.Default.Streams.stdout = "gold/http-head-200.gold" + + +trget200 = Test.AddTestRun("Test domain {0}".format(HOST)) +trget200.StillRunningBefore = ts +trget200.StillRunningBefore = server +trget200.StillRunningAfter = ts +trget200.StillRunningAfter = server + +trget200.Processes.Default.Command = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get_200.txt'.format(HOST)) +trget200.Processes.Default.TimeOut = 5 # seconds +trget200.Processes.Default.ReturnCode = 0 +trget200.Processes.Default.Streams.stdout = "gold/http-get-200.gold" + + +trget304 = Test.AddTestRun("Test domain {0}".format(HOST)) +trget304.StillRunningBefore = ts +trget304.StillRunningBefore = server +trget304.StillRunningAfter = ts +trget304.StillRunningAfter = server + +cmd_tpl = "python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/' | sed 's;ApacheTrafficServer\/[^ ]*;VERSION;'" +trget304.Processes.Default.Command = cmd_tpl.format(ts.Variables.port, 'data/{0}_get_304.txt'.format(HOST)) +trget304.Processes.Default.TimeOut = 5 # seconds +trget304.Processes.Default.ReturnCode = 0 +trget304.Processes.Default.Streams.stdout = "gold/http-get-304.gold" diff --git a/tests/gold_tests/cache/cache-generation-clear.test.py b/tests/gold_tests/cache/cache-generation-clear.test.py new file mode 100644 index 00000000000..dac8ec8868a --- /dev/null +++ b/tests/gold_tests/cache/cache-generation-clear.test.py @@ -0,0 +1,94 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +Test.Summary = ''' +Test that incrementing the cache generation acts like a cache clear +''' +# need Curl +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts", command="traffic_manager") + +# setup some config file for this server +ts.Disk.records_config.update({ + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.config_update_interval_ms': 1, +}) +ts.Disk.plugin_config.AddLine('xdebug.so') +ts.Disk.remap_config.AddLines([ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so' +]) + +objectid = uuid.uuid4() +# first test is a miss for default +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=5) +tr.Processes.Default.Streams.All = "gold/miss_default-1.gold" + +# Second touch is a HIT for default. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_default-1.gold" + +# Call traffic_ctrl to set new generation +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'traffic_ctl --debug config set proxy.config.http.cache.generation 77' +tr.Processes.Default.ForceUseShell = False +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Env = ts.Env # set the environment for traffic_control to run in + +# new generation should first be a miss. +tr = Test.AddTestRun() +tr.DelayStart = 15 # delay start of test run to allow previous command to take effect +# create a new traffic_ctrl call and the environment +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/miss_default77.gold" + +# new generation should should now hit. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_default77.gold" + +# should still hit. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_default77.gold" diff --git a/tests/gold_tests/cache/cache-generation-disjoint.test.py b/tests/gold_tests/cache/cache-generation-disjoint.test.py new file mode 100644 index 00000000000..0bf62829da2 --- /dev/null +++ b/tests/gold_tests/cache/cache-generation-disjoint.test.py @@ -0,0 +1,94 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +Test.Summary = ''' +Test that the same URL path in different cache generations creates disjoint objects +''' +# need Curl +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts") + +# setup some config file for this server +ts.Disk.records_config.update({ + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.http.wait_for_cache': 1, + 'proxy.config.config_update_interval_ms': 1, + +}) +ts.Disk.plugin_config.AddLine('xdebug.so') +ts.Disk.remap_config.AddLines([ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so' +]) + +objectid = uuid.uuid4() +# first test is a miss for default +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=2) +tr.Processes.Default.Streams.All = "gold/miss_default-1.gold" + +# Same URL in generation 1 is a MISS. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation1/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/miss_gen1.gold" + +# Same URL in generation 2 is still a MISS. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation2/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/miss_gen2.gold" + +# Second touch is a HIT for default. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_default-1.gold" + +# Second touch is a HIT for generation1. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation1/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_gen1.gold" + +# Second touch is a HIT for generation2. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation2/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_gen2.gold" diff --git a/tests/gold_tests/cache/disjoint-wait-for-cache.test.py b/tests/gold_tests/cache/disjoint-wait-for-cache.test.py new file mode 100644 index 00000000000..c57efdf4da6 --- /dev/null +++ b/tests/gold_tests/cache/disjoint-wait-for-cache.test.py @@ -0,0 +1,94 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +Test.Summary = ''' +Same as cache-generaertaion-disjoint, but uses proxy.config.http.wait_for_cache which should delay +the server from accepting connection till the cache is loaded +''' +# need Curl +Test.SkipUnless(Condition.HasProgram("curl", "Curl need to be installed on system for this test to work")) +Test.SkipIf(Condition.true("This test fails at the moment as is turned off")) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts") + +# setup some config file for this server +ts.Disk.records_config.update({ + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.config_update_interval_ms': 1, + 'proxy.config.http.wait_for_cache': 3, +}) +ts.Disk.plugin_config.AddLine('xdebug.so') +ts.Disk.remap_config.AddLines([ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so' +]) + +objectid = uuid.uuid4() +# first test is a miss for default +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.Processes.Default.Streams.All = "gold/miss_default-1.gold" + +# Same URL in generation 1 is a MISS. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation1/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/miss_gen1.gold" + +# Same URL in generation 2 is still a MISS. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation2/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/miss_gen2.gold" + +# Second touch is a HIT for default. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/default/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_default-1.gold" + +# Second touch is a HIT for generation1. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation1/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_gen1.gold" + +# Second touch is a HIT for generation2. +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/generation2/cache/10/{1}" -H "x-debug: x-cache,x-cache-key,via,x-cache-generation" --verbose'.format( + ts.Variables.port, objectid) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.All = "gold/hit_gen2.gold" diff --git a/tests/gold_tests/cache/gold/hit_default-1.gold b/tests/gold_tests/cache/gold/hit_default-1.gold new file mode 100644 index 00000000000..55590ffb365 --- /dev/null +++ b/tests/gold_tests/cache/gold/hit_default-1.gold @@ -0,0 +1,8 @@ +{} +> GET /default/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: hit-fresh +< X-Cache-Generation: -1 +{} diff --git a/tests/gold_tests/cache/gold/hit_default77.gold b/tests/gold_tests/cache/gold/hit_default77.gold new file mode 100644 index 00000000000..79f4dc85c53 --- /dev/null +++ b/tests/gold_tests/cache/gold/hit_default77.gold @@ -0,0 +1,8 @@ +{} +> GET /default/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: hit-fresh +< X-Cache-Generation: 77 +{} diff --git a/tests/gold_tests/cache/gold/hit_gen1.gold b/tests/gold_tests/cache/gold/hit_gen1.gold new file mode 100644 index 00000000000..0830f99eabf --- /dev/null +++ b/tests/gold_tests/cache/gold/hit_gen1.gold @@ -0,0 +1,8 @@ +{} +> GET /generation1/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: hit-fresh +< X-Cache-Generation: 1 +{} diff --git a/tests/gold_tests/cache/gold/hit_gen2.gold b/tests/gold_tests/cache/gold/hit_gen2.gold new file mode 100644 index 00000000000..ee30558f69d --- /dev/null +++ b/tests/gold_tests/cache/gold/hit_gen2.gold @@ -0,0 +1,8 @@ +{} +> GET /generation2/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: hit-fresh +< X-Cache-Generation: 2 +{} diff --git a/tests/gold_tests/cache/gold/miss_default-1.gold b/tests/gold_tests/cache/gold/miss_default-1.gold new file mode 100644 index 00000000000..ada98420e17 --- /dev/null +++ b/tests/gold_tests/cache/gold/miss_default-1.gold @@ -0,0 +1,8 @@ +{} +> GET /default/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: miss +< X-Cache-Generation: -1 +{} diff --git a/tests/gold_tests/cache/gold/miss_default77.gold b/tests/gold_tests/cache/gold/miss_default77.gold new file mode 100644 index 00000000000..083ff08d092 --- /dev/null +++ b/tests/gold_tests/cache/gold/miss_default77.gold @@ -0,0 +1,8 @@ +{} +> GET /default/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: miss +< X-Cache-Generation: 77 +{} diff --git a/tests/gold_tests/cache/gold/miss_gen1.gold b/tests/gold_tests/cache/gold/miss_gen1.gold new file mode 100644 index 00000000000..cacf5c3856f --- /dev/null +++ b/tests/gold_tests/cache/gold/miss_gen1.gold @@ -0,0 +1,8 @@ +{} +> GET /generation1/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: miss +< X-Cache-Generation: 1 +{} diff --git a/tests/gold_tests/cache/gold/miss_gen2.gold b/tests/gold_tests/cache/gold/miss_gen2.gold new file mode 100644 index 00000000000..cf1eb427242 --- /dev/null +++ b/tests/gold_tests/cache/gold/miss_gen2.gold @@ -0,0 +1,8 @@ +{} +> GET /generation2/cache/10/{} HTTP/1.1 +{} +< Server: ATS/{} +< X-Cache-Key: http://127.0.0.1/cache/10/{} +< X-Cache: miss +< X-Cache-Generation: 2 +{} diff --git a/tests/gold_tests/h2/gold/bigfile.gold b/tests/gold_tests/h2/gold/bigfile.gold new file mode 100644 index 00000000000..5fd92155bec --- /dev/null +++ b/tests/gold_tests/h2/gold/bigfile.gold @@ -0,0 +1,12 @@ +Content length = 191414 + +Body length = 191414 + +Content success + +Content length = 191414 + +Body length = 191414 + +Content success + diff --git a/tests/gold_tests/h2/gold/remap-200.gold b/tests/gold_tests/h2/gold/remap-200.gold new file mode 100644 index 00000000000..5f7e6ecd5f7 --- /dev/null +++ b/tests/gold_tests/h2/gold/remap-200.gold @@ -0,0 +1,4 @@ +HTTP/2 200 +date: {} +server: ATS/{} + diff --git a/tests/gold_tests/h2/h2bigclient.py b/tests/gold_tests/h2/h2bigclient.py new file mode 100644 index 00000000000..6e0cec809e7 --- /dev/null +++ b/tests/gold_tests/h2/h2bigclient.py @@ -0,0 +1,85 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from hyper import HTTPConnection +import hyper +import argparse + + +def getResponseString(response): + typestr = str(type(response)) + if typestr.find('HTTP20') != -1: + string = "HTTP/2 {0}\r\n".format(response.status) + else: + string = "HTTP {0}\r\n".format(response.status) + string += 'date: ' + response.headers.get('date')[0].decode('utf-8') + "\r\n" + string += 'server: ' + response.headers.get('Server')[0].decode('utf-8') + "\r\n" + return string + + +def makerequest(port): + hyper.tls._context = hyper.tls.init_context() + hyper.tls._context.check_hostname = False + hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE + + conn = HTTPConnection('localhost:{0}'.format(port), secure=True) + + # Fetch the object twice so we know at least one time comes from cache + # Exploring timing options + sites = ['/bigfile', '/bigfile'] + responses = [] + request_ids = [] + for site in sites: + request_id = conn.request('GET', url=site) + request_ids.append(request_id) + + # get responses + for req_id in request_ids: + response = conn.get_response(req_id) + body = response.read() + cl = response.headers.get('Content-Length')[0] + print("Content length = {}\r\n".format(int(cl))) + print("Body length = {}\r\n".format(len(body))) + error = 0 + if chr(body[0]) != 'a': + error = 1 + print("First char {}".format(body[0])) + i = 1 + while i < len(body) and not error: + error = chr(body[i]) != 'b' + if error: + print("bad char {} at {}".format(body[i], i)) + i = i + 1 + if not error: + print("Content success\r\n") + else: + print("Content fail\r\n") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--port", "-p", + type=int, + help="Port to use") + args = parser.parse_args() + makerequest(args.port) + + +if __name__ == '__main__': + main() diff --git a/tests/gold_tests/h2/h2client.py b/tests/gold_tests/h2/h2client.py new file mode 100644 index 00000000000..df0d19fb627 --- /dev/null +++ b/tests/gold_tests/h2/h2client.py @@ -0,0 +1,66 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from hyper import HTTPConnection +import hyper +import argparse + + +def getResponseString(response): + typestr = str(type(response)) + if typestr.find('HTTP20') != -1: + string = "HTTP/2 {0}\r\n".format(response.status) + else: + string = "HTTP {0}\r\n".format(response.status) + string += 'date: ' + response.headers.get('date')[0].decode('utf-8') + "\r\n" + string += 'server: ' + response.headers.get('Server')[0].decode('utf-8') + "\r\n" + return string + + +def makerequest(port): + hyper.tls._context = hyper.tls.init_context() + hyper.tls._context.check_hostname = False + hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE + + conn = HTTPConnection('localhost:{0}'.format(port), secure=True) + + sites = {'/'} + responses = [] + request_ids = [] + for site in sites: + request_id = conn.request('GET', url=site) + request_ids.append(request_id) + + # get responses + for req_id in request_ids: + response = conn.get_response(req_id) + print(getResponseString(response)) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--port", "-p", + type=int, + help="Port to use") + args = parser.parse_args() + makerequest(args.port) + + +if __name__ == '__main__': + main() diff --git a/tests/gold_tests/h2/http2.test.py b/tests/gold_tests/h2/http2.test.py new file mode 100644 index 00000000000..eb43d240cc8 --- /dev/null +++ b/tests/gold_tests/h2/http2.test.py @@ -0,0 +1,85 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +Test.Summary = ''' +Test a basic remap of a http connection +''' +# need Curl +Test.SkipUnless( + Condition.HasProgram("curl", "Curl need to be installed on system for this test to work") +) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts", select_ports=False) +server = Test.MakeOriginServer("server") + +testName = "" +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +# desired response form the origin server +response_header = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", "body": ""} +server.addResponse("sessionlog.json", request_header, response_header) + +# Add info for the large H2 download test +server.addResponse("sessionlog.json", + {"headers": "GET /bigfile HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}, + {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 191414\r\n\r\n", "timestamp": "1469733493.993", "body": ""}) + + +# add ssl materials like key, certificates for the server +ts.addSSLfile("ssl/server.pem") +ts.addSSLfile("ssl/server.key") + +ts.Variables.ssl_port = 4443 +ts.Disk.remap_config.AddLine( + 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) +) +ts.Disk.ssl_multicert_config.AddLine( + 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' +) +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.number.threads': 0, + # enable ssl port + 'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port), + 'proxy.config.ssl.client.verify.server': 0, + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', +}) +ts.Setup.CopyAs('h2client.py', Test.RunDirectory) +ts.Setup.CopyAs('h2bigclient.py', Test.RunDirectory) + +# Test Case 1: basic H2 interaction +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'python3 h2client.py -p {0}'.format(ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) +tr.Processes.Default.Streams.stdout = "gold/remap-200.gold" +tr.StillRunningAfter = server + +# Test Case 2: Make sure all the big file gets back. Regression test for issue 1646 +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'python3 h2bigclient.py -p {0}'.format(ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stdout = "gold/bigfile.gold" +tr.StillRunningAfter = server diff --git a/tests/gold_tests/h2/ssl/server.key b/tests/gold_tests/h2/ssl/server.key new file mode 100644 index 00000000000..4c7a661a6bd --- /dev/null +++ b/tests/gold_tests/h2/ssl/server.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- diff --git a/tests/gold_tests/h2/ssl/server.pem b/tests/gold_tests/h2/ssl/server.pem new file mode 100644 index 00000000000..a1de94fa776 --- /dev/null +++ b/tests/gold_tests/h2/ssl/server.pem @@ -0,0 +1,32 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICszCCAhwCCQCRJsJJ+mTsdDANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wHhcNMTYwODI1MjI1NzIxWhcNMTcwODI1MjI1NzIxWjCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYwc6JQX45GZmMDEjwxYT11 +uVvuBBInfpYJeU8WIXHrKcX5LUSRcBikiKnlfSnMNRohsu6TElQACc60wQ7Q8KDE +lBSsS1FaHzCIl1t1AkXRmz/1H65JSBvrV/6Z1NC+Gp58EbH7Gul8ByC1xaJm5ID1 +Dd++kOPlY5ZI9ZcFS7HLAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAXSVfZ5p1TkhW +QiYq9nfQlBnX2NVaf8ymA8edQR0qH/QBv4/52bNNXC7V/V+ev9LCho2iRMeYYyXB +yo1wBAGR83lS9cF/tOABcYrxjdP54Sfkyh5fomcg8SV7zap6C8mhbV8r3EujbKCx +igH3fMX5F/eRwNCzaMMyQsXaxTJ3trk= +-----END CERTIFICATE----- diff --git a/tests/gold_tests/headers/data/www.passthrough.test_get.txt b/tests/gold_tests/headers/data/www.passthrough.test_get.txt new file mode 100644 index 00000000000..0cba742da68 --- /dev/null +++ b/tests/gold_tests/headers/data/www.passthrough.test_get.txt @@ -0,0 +1,2 @@ +GET http://www.passthrough.test/ HTTP/1.1 + diff --git a/tests/gold_tests/headers/data/www.redirect301.test_get.txt b/tests/gold_tests/headers/data/www.redirect301.test_get.txt new file mode 100644 index 00000000000..48352678c17 --- /dev/null +++ b/tests/gold_tests/headers/data/www.redirect301.test_get.txt @@ -0,0 +1,2 @@ +GET http://www.redirect301.test/ HTTP/1.1 + diff --git a/tests/gold_tests/headers/data/www.redirect302.test_get.txt b/tests/gold_tests/headers/data/www.redirect302.test_get.txt new file mode 100644 index 00000000000..6aae2667c6d --- /dev/null +++ b/tests/gold_tests/headers/data/www.redirect302.test_get.txt @@ -0,0 +1,2 @@ +GET http://www.redirect302.test/ HTTP/1.1 + diff --git a/tests/gold_tests/headers/data/www.redirect307.test_get.txt b/tests/gold_tests/headers/data/www.redirect307.test_get.txt new file mode 100644 index 00000000000..b37c8ae1486 --- /dev/null +++ b/tests/gold_tests/headers/data/www.redirect307.test_get.txt @@ -0,0 +1,2 @@ +GET http://www.redirect307.test/ HTTP/1.1 + diff --git a/tests/gold_tests/headers/data/www.redirect308.test_get.txt b/tests/gold_tests/headers/data/www.redirect308.test_get.txt new file mode 100644 index 00000000000..05bcbf8ec78 --- /dev/null +++ b/tests/gold_tests/headers/data/www.redirect308.test_get.txt @@ -0,0 +1,2 @@ +GET http://www.redirect308.test/ HTTP/1.1 + diff --git a/tests/gold_tests/headers/domain-blacklist-30x.test.py b/tests/gold_tests/headers/domain-blacklist-30x.test.py new file mode 100644 index 00000000000..aef08d65767 --- /dev/null +++ b/tests/gold_tests/headers/domain-blacklist-30x.test.py @@ -0,0 +1,103 @@ +''' +Tests 30x responses are returned for matching domains +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Tests 30x responses are returned for matching domains +''' + +Test.SkipUnless(Condition.HasProgram("grep","grep needs to be installed on system for this test to work")) + +ts=Test.MakeATSProcess("ts") +server=Test.MakeOriginServer("server") + +REDIRECT_301_HOST='www.redirect301.test' +REDIRECT_302_HOST='www.redirect302.test' +REDIRECT_307_HOST='www.redirect307.test' +REDIRECT_308_HOST='www.redirect308.test' +PASSTHRU_HOST='www.passthrough.test' + +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'header_rewrite|dbg_header_rewrite', + 'proxy.config.body_factory.enable_logging': 1, + }) + +ts.Disk.remap_config.AddLine("""\ +regex_map http://{0}/ http://{0}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_301.conf +regex_map http://{1}/ http://{1}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_302.conf +regex_map http://{2}/ http://{2}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_307.conf +regex_map http://{3}/ http://{3}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_308.conf +""".format(REDIRECT_301_HOST, REDIRECT_302_HOST, REDIRECT_307_HOST, REDIRECT_308_HOST) +) + +for x in (1,2,7,8): + ts.Disk.MakeConfigFile("header_rewrite_rules_30{0}.conf".format(x)).AddLine("""\ +set-redirect 30{0} "%" +""".format(x)) + +Test.Setup.Copy(os.path.join(os.pardir,os.pardir,'tools','tcp_client.py')) +Test.Setup.Copy('data') + +redirect301tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_301_HOST)) +redirect301tr.Processes.Default.StartBefore(Test.Processes.ts) +redirect301tr.StillRunningAfter = ts +redirect301tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_301_HOST)) +redirect301tr.Processes.Default.TimeOut=5 # seconds +redirect301tr.Processes.Default.ReturnCode=0 +redirect301tr.Processes.Default.Streams.stdout="redirect301_get.gold" + +redirect302tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_302_HOST)) +redirect302tr.StillRunningBefore = ts +redirect302tr.StillRunningAfter = ts +redirect302tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_302_HOST)) +redirect302tr.Processes.Default.TimeOut=5 # seconds +redirect302tr.Processes.Default.ReturnCode=0 +redirect302tr.Processes.Default.Streams.stdout="redirect302_get.gold" + + +redirect307tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_307_HOST)) +redirect302tr.StillRunningBefore = ts +redirect307tr.StillRunningAfter = ts +redirect307tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_307_HOST)) +redirect307tr.Processes.Default.TimeOut=5 # seconds +redirect307tr.Processes.Default.ReturnCode=0 +redirect307tr.Processes.Default.Streams.stdout="redirect307_get.gold" + +redirect308tr=Test.AddTestRun("Test domain {0}".format(REDIRECT_308_HOST)) +redirect308tr.StillRunningBefore = ts +redirect308tr.StillRunningAfter = ts +redirect308tr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(REDIRECT_308_HOST)) +redirect308tr.Processes.Default.TimeOut=5 # seconds +redirect308tr.Processes.Default.ReturnCode=0 +redirect308tr.Processes.Default.Streams.stdout="redirect308_get.gold" + +passthroughtr=Test.AddTestRun("Test domain {0}".format(PASSTHRU_HOST)) +passthroughtr.StillRunningBefore = ts +passthroughtr.StillRunningAfter = ts +passthroughtr.Processes.Default.Command="python tcp_client.py 127.0.0.1 {0} {1} | grep -v '^Date: '| grep -v '^Server: ATS/'".\ + format(ts.Variables.port, 'data/{0}_get.txt'.format(PASSTHRU_HOST)) +passthroughtr.Processes.Default.TimeOut=5 # seconds +passthroughtr.Processes.Default.ReturnCode=0 +passthroughtr.Processes.Default.Streams.stdout="passthrough_get.gold" diff --git a/tests/gold_tests/headers/http408.gold b/tests/gold_tests/headers/http408.gold new file mode 100644 index 00000000000..64646016471 --- /dev/null +++ b/tests/gold_tests/headers/http408.gold @@ -0,0 +1,22 @@ +HTTP/1.1 408`` +Date:`` +Connection: close +Server:`` +Cache-Control: no-store +Content-Type: text/html; charset=utf-8 +Content-Language: en + + + +Inactivity Timeout + + + +

Inactivity Timeout

+
+ + +Description: Too much time has passed without sending any data for document. + +
+ diff --git a/tests/gold_tests/headers/http408.test.py b/tests/gold_tests/headers/http408.test.py new file mode 100644 index 00000000000..9212023f905 --- /dev/null +++ b/tests/gold_tests/headers/http408.test.py @@ -0,0 +1,58 @@ +''' +Test the 408 reponse header. +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +Test.Summary = ''' +Check 408 response header for protocol stack data. +''' + +Test.SkipUnless( +) +Test.ContinueOnFail = True + +# Define default ATS +ts = Test.MakeATSProcess("ts") +server = Test.MakeOriginServer("server") + +testName = "408 test" + +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +server.addResponse("sessionlog.json", request_header, response_header) + +ts.Disk.remap_config.AddLine( + 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) + ) + +ts.Disk.records_config.update({ + 'proxy.config.http.transaction_no_activity_timeout_in' : 2, + }) + +Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_408_client.py')) + +tr = Test.AddTestRun() +tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.Processes.Default.Command = 'python tcp_408_client.py 127.0.0.1 {0} 4'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.TimeOut = 10 +tr.Processes.Default.Streams.stdout = "http408.gold" diff --git a/tests/gold_tests/headers/passthrough_get.gold b/tests/gold_tests/headers/passthrough_get.gold new file mode 100644 index 00000000000..24e797d574f --- /dev/null +++ b/tests/gold_tests/headers/passthrough_get.gold @@ -0,0 +1,22 @@ +HTTP/1.1 404 Not Found +Connection: keep-alive +Cache-Control: no-store +Content-Type: text/html +Content-Language: en +Content-Length: 297 + + + +Not Found on Accelerator + + + +

Not Found on Accelerator

+
+ + +Description: Your request on the specified host was not found. +Check the location and try again. + +
+ diff --git a/tests/gold_tests/headers/redirect301_get.gold b/tests/gold_tests/headers/redirect301_get.gold new file mode 100644 index 00000000000..6d70fa39c4e --- /dev/null +++ b/tests/gold_tests/headers/redirect301_get.gold @@ -0,0 +1,22 @@ +HTTP/1.1 301 Redirect +Connection: keep-alive +Cache-Control: no-store +Location: http://www.redirect301.test/ +Content-Type: text/html +Content-Language: en +Content-Length: 310 + + + +Document Has Moved + + + +

Document Has Moved

+
+ + +Description: The document you requested has moved to a new location. The new location is "http://www.redirect301.test/". + +
+ diff --git a/tests/gold_tests/headers/redirect302_get.gold b/tests/gold_tests/headers/redirect302_get.gold new file mode 100644 index 00000000000..8baafcb3463 --- /dev/null +++ b/tests/gold_tests/headers/redirect302_get.gold @@ -0,0 +1,22 @@ +HTTP/1.1 302 Redirect +Connection: keep-alive +Cache-Control: no-store +Location: http://www.redirect302.test/ +Content-Type: text/html +Content-Language: en +Content-Length: 310 + + + +Document Has Moved + + + +

Document Has Moved

+
+ + +Description: The document you requested has moved to a new location. The new location is "http://www.redirect302.test/". + +
+ diff --git a/tests/gold_tests/headers/redirect307_get.gold b/tests/gold_tests/headers/redirect307_get.gold new file mode 100644 index 00000000000..282993bdeca --- /dev/null +++ b/tests/gold_tests/headers/redirect307_get.gold @@ -0,0 +1,22 @@ +HTTP/1.1 307 Redirect +Connection: keep-alive +Cache-Control: no-store +Location: http://www.redirect307.test/ +Content-Type: text/html +Content-Language: en +Content-Length: 310 + + + +Document Has Moved + + + +

Document Has Moved

+
+ + +Description: The document you requested has moved to a new location. The new location is "http://www.redirect307.test/". + +
+ diff --git a/tests/gold_tests/headers/redirect308_get.gold b/tests/gold_tests/headers/redirect308_get.gold new file mode 100644 index 00000000000..05cae3d4ff1 --- /dev/null +++ b/tests/gold_tests/headers/redirect308_get.gold @@ -0,0 +1,22 @@ +HTTP/1.1 308 Redirect +Connection: keep-alive +Cache-Control: no-store +Location: http://www.redirect308.test/ +Content-Type: text/html +Content-Language: en +Content-Length: 310 + + + +Document Has Moved + + + +

Document Has Moved

+
+ + +Description: The document you requested has moved to a new location. The new location is "http://www.redirect308.test/". + +
+ diff --git a/tests/gold_tests/logging/custom-log.test.py b/tests/gold_tests/logging/custom-log.test.py new file mode 100644 index 00000000000..683a251da6e --- /dev/null +++ b/tests/gold_tests/logging/custom-log.test.py @@ -0,0 +1,103 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +Test.Summary = ''' +Test custom log file format +''' +# need Curl +Test.SkipUnless( + Condition.HasProgram( + "curl", "Curl need to be installed on system for this test to work"), + Condition.IsPlatform("linux") +) + +# Define default ATS +ts = Test.MakeATSProcess("ts") + +# setup some config file for this server +ts.Disk.remap_config.AddLine( + 'map / http://www.linkedin.com/ @action=deny' +) + +ts.Disk.logging_config.AddLines( + '''custom = format { + Format = "% %" +} + +log.ascii { + Format = custom, + Filename = 'test_log_field' +}'''.split("\n") +) + +# ######################################################################### +# at the end of the different test run a custom log file should exist +# Because of this we expect the testruns to pass the real test is if the +# customlog file exists and passes the format check +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'test_log_field.log'), + exists=True, content='gold/custom.gold') + +# first test is a miss for default +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(Test.Processes.ts) + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.1.1.1:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.2.2.2:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.3.3.3:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.3.0.1:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.43.2.1:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.213.213.132:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.123.32.243:{0}" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 + +tr = Test.AddTestRun() +tr.DelayStart = 10 +tr.Processes.Default.Command = 'echo "Delay"' +tr.Processes.Default.ReturnCode = 0 diff --git a/tests/gold_tests/logging/gold/custom.gold b/tests/gold_tests/logging/gold/custom.gold new file mode 100644 index 00000000000..4432d333358 --- /dev/null +++ b/tests/gold_tests/logging/gold/custom.gold @@ -0,0 +1,8 @@ +127.0.0.1 7F000001 +127.1.1.1 7F010101 +127.2.2.2 7F020202 +127.3.3.3 7F030303 +127.3.0.1 7F030001 +127.43.2.1 7F2B0201 +127.213.213.132 7FD5D584 +127.123.32.243 7F7B20F3 diff --git a/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-303.gold b/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-303.gold new file mode 100644 index 00000000000..94a42adfb5c --- /dev/null +++ b/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-303.gold @@ -0,0 +1,14 @@ +`` +> GET http://www.example.com`` +> Host: www.example.com`` +> User-Agent: curl/`` +> Accept: */* +`` +< HTTP/1.1 303 See Other +< Date: `` +< Age: `` +< Transfer-Encoding: chunked +< Proxy-Connection: keep-alive +< Server: ATS/`` +< +`` diff --git a/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-tag.gold b/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-tag.gold new file mode 100644 index 00000000000..eac700f5a94 --- /dev/null +++ b/tests/gold_tests/pluginTest/header_rewrite/gold/header_rewrite-tag.gold @@ -0,0 +1 @@ +``DIAG: (header_rewrite)`` diff --git a/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py b/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py new file mode 100644 index 00000000000..f4dfa0c3563 --- /dev/null +++ b/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py @@ -0,0 +1,66 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +Test.Summary = ''' +Test a basic remap of a http connection +''' +# need Curl +Test.SkipUnless( + Condition.HasProgram("curl", "Curl need to be installed on system for this test to work") +) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts") +server = Test.MakeOriginServer("server") + +Test.testName = "" +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +# expected response from the origin server +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + +# add response to the server dictionary +server.addResponse("sessionfile.log", request_header, response_header) +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'header.*', +}) +# The following rule changes the status code returned from origin server to 303 +ts.Setup.CopyAs('rules/rule.conf', Test.RunDirectory) +ts.Disk.plugin_config.AddLine( + 'header_rewrite.so {0}/rule.conf'.format(Test.RunDirectory) +) +ts.Disk.remap_config.AddLine( + 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) +) +ts.Disk.remap_config.AddLine( + 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port) +) + +# call localhost straight +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.Processes.Default.Streams.stderr = "gold/header_rewrite-303.gold" +tr.StillRunningAfter = server + +ts.Streams.All = "gold/header_rewrite-tag.gold" diff --git a/tests/gold_tests/pluginTest/header_rewrite/rules/rule.conf b/tests/gold_tests/pluginTest/header_rewrite/rules/rule.conf new file mode 100644 index 00000000000..551373b411c --- /dev/null +++ b/tests/gold_tests/pluginTest/header_rewrite/rules/rule.conf @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cond %{STATUS} =200 +set-status 303 diff --git a/tests/gold_tests/remap/gold/remap-200.gold b/tests/gold_tests/remap/gold/remap-200.gold new file mode 100644 index 00000000000..619066a4e7e --- /dev/null +++ b/tests/gold_tests/remap/gold/remap-200.gold @@ -0,0 +1,14 @@ +`` +> GET http://www.example.com`` +> Host: www.example.com`` +> User-Agent: curl/`` +> Accept: */* +`` +< HTTP/1.1 200 OK +< Date: `` +< Age: `` +< Transfer-Encoding: chunked +< Proxy-Connection: keep-alive +< Server: ATS/`` +< +`` diff --git a/tests/gold_tests/remap/gold/remap-404.gold b/tests/gold_tests/remap/gold/remap-404.gold new file mode 100644 index 00000000000..779053a2eb0 --- /dev/null +++ b/tests/gold_tests/remap/gold/remap-404.gold @@ -0,0 +1,12 @@ +`` +> GET `` HTTP/1.1 +> Host: `` +> User-Agent: curl/`` +`` +< HTTP/1.1 404 Not Found +< Date: `` +< Proxy-Connection: keep-alive +< Server: ATS/`` +`` +< Content-Type: text/html +`` diff --git a/tests/gold_tests/remap/gold/remap-hitATS-404.gold b/tests/gold_tests/remap/gold/remap-hitATS-404.gold new file mode 100644 index 00000000000..67a81bc4f9a --- /dev/null +++ b/tests/gold_tests/remap/gold/remap-hitATS-404.gold @@ -0,0 +1,11 @@ +`` +> GET / HTTP/1.1 +> Host: `` +> User-Agent: curl/`` +`` +< HTTP/1.1 404 Not Found on Accelerator +< Date: `` +< Connection: `` +< Server: ATS/`` +< Content-Type: text/html +`` diff --git a/tests/gold_tests/remap/gold/remap-https-200.gold b/tests/gold_tests/remap/gold/remap-https-200.gold new file mode 100644 index 00000000000..9cd42fc43a0 --- /dev/null +++ b/tests/gold_tests/remap/gold/remap-https-200.gold @@ -0,0 +1,13 @@ +`` +> GET / HTTP/1.1 +> Host: www.example.com`` +> User-Agent: curl/`` +`` +< HTTP/1.1 200 OK +< Date: `` +< Age: `` +< Transfer-Encoding: chunked +< Connection: keep-alive +< Server: ATS/`` +< +`` diff --git a/tests/gold_tests/remap/remap_http.test.py b/tests/gold_tests/remap/remap_http.test.py new file mode 100644 index 00000000000..9221e266ffc --- /dev/null +++ b/tests/gold_tests/remap/remap_http.test.py @@ -0,0 +1,94 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +Test.Summary = ''' +Test a basic remap of a http connection +''' +# need Curl +Test.SkipUnless( + Condition.HasProgram("curl", "Curl need to be installed on system for this test to work") +) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts") +server = Test.MakeOriginServer("server") + +Test.testName = "" +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +# expected response from the origin server +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + +# add response to the server dictionary +server.addResponse("sessionfile.log", request_header, response_header) +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'url.*', +}) + +ts.Disk.remap_config.AddLine( + 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) +) +ts.Disk.remap_config.AddLine( + 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port) +) + +# call localhost straight +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}/" --verbose'.format(ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(Test.Processes.ts) +tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold" +tr.StillRunningAfter = server + +# www.example.com host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-200.gold" + +# www.example.com:80 host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com:80/" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-200.gold" + +# www.example.com:8080 host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com:8080" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-200.gold" + +# no rule for this +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.test.com/" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-404.gold" + +# bad port +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://www.example.com:1234/" -H "Proxy-Connection: keep-alive" --verbose'.format( + ts.Variables.port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-404.gold" diff --git a/tests/gold_tests/remap/remap_https.test.py b/tests/gold_tests/remap/remap_https.test.py new file mode 100644 index 00000000000..ad93a22db5f --- /dev/null +++ b/tests/gold_tests/remap/remap_https.test.py @@ -0,0 +1,116 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +Test.Summary = ''' +Test a basic remap of a http connection +''' +# need Curl +Test.SkipUnless( + Condition.HasProgram("curl", "Curl need to be installed on system for this test to work") +) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts", select_ports=False) +server = Test.MakeOriginServer("server") + +#**testname is required** +testName = "" +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +# desired response form the origin server +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +server.addResponse("sessionlog.json", request_header, response_header) + +# add ssl materials like key, certificates for the server +ts.addSSLfile("ssl/server.pem") +ts.addSSLfile("ssl/server.key") + +ts.Variables.ssl_port = 4443 +ts.Disk.records_config.update({ + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'lm|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.number.threads': 0, + # enable ssl port + 'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port), + 'proxy.config.ssl.client.verify.server': 0, + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', +}) + +ts.Disk.remap_config.AddLine( + 'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) +) +ts.Disk.remap_config.AddLine( + 'map https://www.example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) +) + + +ts.Disk.ssl_multicert_config.AddLine( + 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' +) + +# call localhost straight +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} --verbose'.format(ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 + +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(server) +# Delay on readyness of our ssl ports +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) +tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold" +tr.StillRunningAfter = server +tr.StillRunningAfter = ts + + +# www.example.com host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com" --verbose'.format( + ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-https-200.gold" + + +# www.example.com:80 host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com:443" --verbose'.format( + ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-https-200.gold" + +# www.example.com:8080 host +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com:4443" --verbose'.format( + ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-https-200.gold" + +# no rule for this +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.test.com" --verbose'.format( + ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold" + +# bad port +tr = Test.AddTestRun() +tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com:1234" --verbose'.format( + ts.Variables.ssl_port) +tr.Processes.Default.ReturnCode = 0 +tr.Processes.Default.Streams.stderr = "gold/remap-hitATS-404.gold" diff --git a/tests/gold_tests/remap/ssl/server.key b/tests/gold_tests/remap/ssl/server.key new file mode 100644 index 00000000000..4c7a661a6bd --- /dev/null +++ b/tests/gold_tests/remap/ssl/server.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- diff --git a/tests/gold_tests/remap/ssl/server.pem b/tests/gold_tests/remap/ssl/server.pem new file mode 100644 index 00000000000..a1de94fa776 --- /dev/null +++ b/tests/gold_tests/remap/ssl/server.pem @@ -0,0 +1,32 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICszCCAhwCCQCRJsJJ+mTsdDANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wHhcNMTYwODI1MjI1NzIxWhcNMTcwODI1MjI1NzIxWjCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYwc6JQX45GZmMDEjwxYT11 +uVvuBBInfpYJeU8WIXHrKcX5LUSRcBikiKnlfSnMNRohsu6TElQACc60wQ7Q8KDE +lBSsS1FaHzCIl1t1AkXRmz/1H65JSBvrV/6Z1NC+Gp58EbH7Gul8ByC1xaJm5ID1 +Dd++kOPlY5ZI9ZcFS7HLAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAXSVfZ5p1TkhW +QiYq9nfQlBnX2NVaf8ymA8edQR0qH/QBv4/52bNNXC7V/V+ev9LCho2iRMeYYyXB +yo1wBAGR83lS9cF/tOABcYrxjdP54Sfkyh5fomcg8SV7zap6C8mhbV8r3EujbKCx +igH3fMX5F/eRwNCzaMMyQsXaxTJ3trk= +-----END CERTIFICATE----- diff --git a/tests/gold_tests/tls/gold/ssl-post.gold b/tests/gold_tests/tls/gold/ssl-post.gold new file mode 100644 index 00000000000..b71b02512bb --- /dev/null +++ b/tests/gold_tests/tls/gold/ssl-post.gold @@ -0,0 +1,2 @@ +Sent request +All threads finished diff --git a/tests/gold_tests/tls/ssl-post.c b/tests/gold_tests/tls/ssl-post.c new file mode 100644 index 00000000000..e4c9f4f0733 --- /dev/null +++ b/tests/gold_tests/tls/ssl-post.c @@ -0,0 +1,330 @@ +/** @file + + SSL post test client + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NUM_THREADS 10 + +char req_buf[10000]; +char post_buf[1000]; + +pthread_mutex_t *mutex_buf = NULL; + +struct thread_info +{ + struct addrinfo *result, *rp; + SSL_SESSION *session; +}; + +void +SSL_locking_callback(int mode, int type, const char *file, int line) +{ + if (mode & CRYPTO_LOCK) { + pthread_mutex_lock(&mutex_buf[type]); + } else if (mode & CRYPTO_UNLOCK) { + pthread_mutex_unlock(&mutex_buf[type]); + } else { + printf("invalid SSL locking mode 0x%x\n", mode); + } +} + +void +SSL_pthreads_thread_id(CRYPTO_THREADID *id) +{ + CRYPTO_THREADID_set_numeric(id, (unsigned long)pthread_self()); +} + +void *spawn_same_session_send(void *arg) +{ + struct thread_info *tinfo = (struct thread_info *)arg; + + // Start again, but with the session set this time + int sfd = socket(tinfo->rp->ai_family, tinfo->rp->ai_socktype, + tinfo->rp->ai_protocol); + if (sfd == -1) + { + printf("Failed to get socket"); + perror("Failed"); + pthread_exit((void *)1); + } + if (connect(sfd, tinfo->rp->ai_addr, tinfo->rp->ai_addrlen) < 0) + { + printf("Failed to connect %d\n", sfd); + perror("Failed"); + pthread_exit((void *)1); + } + + fcntl(sfd, F_SETFL, O_NONBLOCK); + // Make sure we are nagling + int one = 0; + setsockopt(sfd, SOL_TCP, TCP_NODELAY, &one, sizeof(one)); + + SSL_CTX *client_ctx = SSL_CTX_new(SSLv23_client_method()); + SSL *ssl = SSL_new(client_ctx); + SSL_set_session(ssl, tinfo->session); + + SSL_set_fd(ssl, sfd); + int ret = SSL_connect(ssl); + int read_count = 0; + int write_count = 1; + int write_ret = -1; + int post_write_ret = -1; + + while (ret < 0) { + int error = SSL_get_error(ssl, ret); + fd_set reads; + fd_set writes; + FD_ZERO(&reads); + FD_ZERO(&writes); + switch (error) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_ACCEPT: + FD_SET(sfd, &reads); + read_count++; + break; + case SSL_ERROR_WANT_CONNECT: + case SSL_ERROR_WANT_WRITE: + FD_SET(sfd, &writes); + write_count++; + break; + case SSL_ERROR_SYSCALL: + case SSL_ERROR_SSL: + case SSL_ERROR_ZERO_RETURN: + printf("Error %d\n", error); + pthread_exit((void *)1); + break; + default: + //printf("Unknown error is %d", error); + FD_SET(sfd, &reads); + FD_SET(sfd, &writes); + break; + } + ret = select(sfd+1, &reads, &writes, NULL, NULL); + if (FD_ISSET(sfd, &reads) || FD_ISSET(sfd, &writes)) { + ret = write_ret = SSL_write(ssl, req_buf, strlen(req_buf)); + if (write_ret >= 0) + post_write_ret = SSL_write(ssl, post_buf, sizeof(post_buf)); + } + } + + while (write_ret < 0) { + write_ret = SSL_write(ssl, req_buf, strlen(req_buf)); + } + while (post_write_ret < 0) { + post_write_ret = SSL_write(ssl, post_buf, sizeof(post_buf)); + } + + // Have to do the shutdown so the data packet is sent out fast enough + // so it might be read with the last handshake packet + shutdown(sfd, SHUT_WR); + + char input_buf[1024]; + int read_bytes = SSL_read(ssl, input_buf, sizeof(input_buf)); + int total_read = 0; + while (read_bytes != 0) { + fd_set reads; + fd_set writes; + FD_ZERO(&reads); + FD_ZERO(&writes); + if (read_bytes > 0) { + total_read += read_bytes; + FD_SET(sfd, &reads); + } + else { + int error = SSL_get_error(ssl, read_bytes); + switch (error) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_ACCEPT: + FD_SET(sfd, &reads); + break; + case SSL_ERROR_WANT_CONNECT: + case SSL_ERROR_WANT_WRITE: + printf("Unexpected write\n"); + pthread_exit((void *)1); + break; + case SSL_ERROR_SYSCALL: + case SSL_ERROR_SSL: + case SSL_ERROR_ZERO_RETURN: + printf("Error Read\n"); + pthread_exit((void *)1); + break; + default: + FD_SET(sfd, &reads); + FD_SET(sfd, &writes); + break; + } + } + select(sfd+1, &reads, &writes, NULL, NULL); + if (FD_ISSET(sfd, &reads)) { + read_bytes = SSL_read(ssl, input_buf, sizeof(input_buf)); + } + } + if (read_bytes > 0 && read_bytes < 1024) input_buf[read_bytes] = '\0'; + else input_buf[1023] = '\0'; + //printf("total_bytes=%d Received bytes=%d handshake writes=%d handshake reads=%d\n", total_read, read_bytes, write_count, read_count); + + // Leaking the socket, so that the EOS does not wake up a potentially + // stalled ATS connection. Want to wait for the inactivity timeout + // to make it clear that there was a stalling problem + //close(sfd); + pthread_exit(NULL); +} + +/** + * Connect to a server. + * Handshake + * Exit immediatesly + */ +int +main(int argc, char *argv[]) +{ + struct addrinfo hints; + struct addrinfo *result, *rp; + int sfd, s, j; + size_t len; + ssize_t nread; + + if (argc < 4) { + fprintf(stderr, "Usage: %s host thread-count header-count [port]\n", argv[0]); + exit(EXIT_FAILURE); + } + char *host = argv[1]; + int header_count = atoi(argv[3]); + snprintf(req_buf, sizeof(req_buf), "POST /post HTTP/1.1\r\nHost: %s\r\nConnection: close\r\nContent-length:%d\r\n", host, sizeof(post_buf)); + int i; + for (i = 0; i < header_count; i++) { + sprintf(req_buf + strlen(req_buf), "header%d:%d\r\n", i, i); + } + strcat(req_buf, "\r\n"); + memset(post_buf, '0', sizeof(post_buf)); + + int thread_count = atoi(argv[2]); + + char *port = argc == 5 ? argv[4] : "443"; + + /* Obtain address(es) matching host/port */ + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */ + hints.ai_socktype = SOCK_STREAM; /* Datagram socket */ + hints.ai_flags = 0; + hints.ai_protocol = 0; /* Any protocol */ + + s = getaddrinfo(host, port, &hints, &result); + if (s != 0) { + fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(s)); + exit(EXIT_FAILURE); + } + + /* getaddrinfo() returns a list of address structures. + * Try each address until we successfully connect(2). + * socket(2) (or connect(2)) fails, we (close the socket + and) try the next address. */ + + for (rp = result; rp != NULL; rp = rp->ai_next) { + sfd = socket(rp->ai_family, rp->ai_socktype, + rp->ai_protocol); + if (sfd == -1) + continue; + if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) + break; /* Success */ + + close(sfd); + } + + if (rp == NULL) { /* No address succeeded */ + fprintf(stderr, "Could not connect\n"); + exit(EXIT_FAILURE); + } + + + //fcntl(sfd, F_SETFL, O_NONBLOCK); + + SSL_load_error_strings(); + SSL_library_init(); + + mutex_buf = (pthread_mutex_t *)OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t)); + for (i = 0; i < CRYPTO_num_locks(); i++) { + pthread_mutex_init(&mutex_buf[i], NULL); + } + + CRYPTO_set_locking_callback(SSL_locking_callback); + CRYPTO_THREADID_set_callback(SSL_pthreads_thread_id); + + + SSL_CTX *client_ctx = SSL_CTX_new(SSLv23_client_method()); + SSL *ssl = SSL_new(client_ctx); + + SSL_set_fd(ssl, sfd); + int ret = SSL_connect(ssl); + int read_count = 0; + int write_count = 1; + + printf("Sent request\n"); + if ((ret = SSL_write(ssl, req_buf, strlen(req_buf))) <= 0) { + int error = SSL_get_error(ssl, ret); + printf("SSL_write failed %d", error); + exit(1); + } + SSL_write(ssl, post_buf, sizeof(post_buf)); + + char input_buf[1024]; + int read_bytes = SSL_read(ssl, input_buf, sizeof(input_buf)); + if (read_bytes > 0 && read_bytes < 1024) input_buf[read_bytes] = '\0'; + else input_buf[1023] = '\0'; + //printf("Received %d bytes %s\n", read_bytes, input_buf); + SSL_SESSION *session = SSL_get_session(ssl); + close(sfd); + struct thread_info tinfo; + tinfo.rp =rp; + tinfo.session = session; + pthread_t * threads = malloc(thread_count *sizeof(pthread_t)); + for (i= 0; i < thread_count; i++) { + pthread_create(threads + i, NULL, spawn_same_session_send, &tinfo); + } + + void *retval; + for (i = 0; i < thread_count; i++) { + retval = NULL; + pthread_join(threads[i], &retval); + if (retval != NULL) { + printf("Thread %d failed 0x%x\n", i, retval); + } + } + + printf("All threads finished\n"); + + exit(0); +} + diff --git a/tests/gold_tests/tls/ssl/server.key b/tests/gold_tests/tls/ssl/server.key new file mode 100644 index 00000000000..4c7a661a6bd --- /dev/null +++ b/tests/gold_tests/tls/ssl/server.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- diff --git a/tests/gold_tests/tls/ssl/server.pem b/tests/gold_tests/tls/ssl/server.pem new file mode 100644 index 00000000000..a1de94fa776 --- /dev/null +++ b/tests/gold_tests/tls/ssl/server.pem @@ -0,0 +1,32 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICszCCAhwCCQCRJsJJ+mTsdDANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wHhcNMTYwODI1MjI1NzIxWhcNMTcwODI1MjI1NzIxWjCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYwc6JQX45GZmMDEjwxYT11 +uVvuBBInfpYJeU8WIXHrKcX5LUSRcBikiKnlfSnMNRohsu6TElQACc60wQ7Q8KDE +lBSsS1FaHzCIl1t1AkXRmz/1H65JSBvrV/6Z1NC+Gp58EbH7Gul8ByC1xaJm5ID1 +Dd++kOPlY5ZI9ZcFS7HLAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAXSVfZ5p1TkhW +QiYq9nfQlBnX2NVaf8ymA8edQR0qH/QBv4/52bNNXC7V/V+ev9LCho2iRMeYYyXB +yo1wBAGR83lS9cF/tOABcYrxjdP54Sfkyh5fomcg8SV7zap6C8mhbV8r3EujbKCx +igH3fMX5F/eRwNCzaMMyQsXaxTJ3trk= +-----END CERTIFICATE----- diff --git a/tests/gold_tests/tls/tls.test.py b/tests/gold_tests/tls/tls.test.py new file mode 100644 index 00000000000..679209ca840 --- /dev/null +++ b/tests/gold_tests/tls/tls.test.py @@ -0,0 +1,102 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +Test.Summary = ''' +Test tls +''' + + +def Build(Test, filename, host): + tr = Test.AddTestRun("Build", "Build test file: {0}".format(filename)) + tr.Command = 'gcc -o ssl-post -O2 -g {0} -lssl -lpthread -lcrypto'.format(filename) + tr.ReturnCode = 0 + tr = Test.addTestRun("Run-Test") + tr.Command = './ssl-post {0} 40 378'.format(host) + +# ExtendTest(Build) + + +# need Curl +Test.SkipUnless( + Condition.HasProgram("curl", "Curl need to be installed on system for this test to work") +) +Test.ContinueOnFail = True +# Define default ATS +ts = Test.MakeATSProcess("ts", select_ports=False) +server = Test.MakeOriginServer("server") + + +tr = Test.AddTestRun("Build-Test", "build test file: ssl-post.c") +tr.Command = 'gcc -o ssl-post -O2 -g {0}/ssl-post.c -lssl -lpthread -lcrypto'.format(Test.RunDirectory) +tr.ReturnCode = 0 +tr.Setup.CopyAs('ssl-post.c', Test.RunDirectory) + +requestLocation = "test2" +reHost = "www.example.com" + +testName = "" + +header_count = 378 + +header_string = "POST /post HTTP/1.1\r\nHost: www.example.com\r\nContent-Length:1000\r\n" + +for i in range(0, 378): + header_string = "{1}header{0}:{0}\r\n".format(i, header_string) +header_string = "{0}\r\n".format(header_string) + +post_body = "" +for i in range(0, 1000): + post_body = "{0}0".format(post_body) + +# Add info the origin server responses +server.addResponse("sessionlog.json", + {"headers": header_string, "timestamp": "1469733493.993", "body": post_body}, + {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", "timestamp": "1469733493.993", "body": "ok"}) + +# add ssl materials like key, certificates for the server +ts.addSSLfile("ssl/server.pem") +ts.addSSLfile("ssl/server.key") + +ts.Variables.ssl_port = 4443 +ts.Disk.remap_config.AddLine( + 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) +) + +ts.Disk.ssl_multicert_config.AddLine( + 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' +) +ts.Disk.records_config.update({ + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # enable ssl port + 'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port), + 'proxy.config.ssl.client.verify.server': 0, + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', +}) + +tr = Test.AddTestRun("Run-Test") +tr.Command = './ssl-post 127.0.0.1 40 378 4443' +tr.ReturnCode = 0 +# time delay as proxy.config.http.wait_for_cache could be broken +tr.Processes.Default.StartBefore(server) +tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port)) +tr.Processes.Default.Streams.stdout = "gold/ssl-post.gold" +tr.StillRunningAfter = server +tr.Processes.Default.TimeOut = 5 +tr.TimeOut = 5 diff --git a/tests/tools/README.md b/tests/tools/README.md new file mode 100644 index 00000000000..d6d17c021da --- /dev/null +++ b/tests/tools/README.md @@ -0,0 +1,42 @@ + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +These tools are meant to become it own repository in the future. They are here at the moment to help accelerate progress at getting everything working. + +Note these Tools require python 3.4 or better. + +# Traffic-Replay + +Replay client to replay session logs. + +Usage: +python3.5 trafficreplay_v2/ -type -log_dir /path/to/log -v + +Session Log format (in JSON): + + {"version": "0.1", + "txns": [ + {"request": {"headers": "POST ……\r\n\r\n", "timestamp": "..", "body": ".."}, + "response": {"headers": "HTTP/1.1..\r\n\r\n", "timestamp": "..", "body": ".."}, + "uuid": "1"}, + {"request": {"headers": "POST ..….\r\n\r\n", "timestamp": "..", "body": ".."}, + "response": {"headers": "HTTP/1.1..\r\nr\n", "timestamp": "..", "body": ".."}, + "uuid": "2"} + ], + "timestamp": "....", + "encoding": "...."} + Configuration: The configuration required to run traffic-replay can be specified in traffic-replay/Config.py diff --git a/tests/tools/lib/result.py b/tests/tools/lib/result.py new file mode 100644 index 00000000000..664250d8321 --- /dev/null +++ b/tests/tools/lib/result.py @@ -0,0 +1,98 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + + +class TermColors: + ''' Collection of colors for printing out to terminal ''' + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + ENDC = '\033[0m' + + +ignoredFields = {'age', 'set-cookie', 'server', 'date', 'last-modified', 'via', 'expires', 'cahe-control'} # all lower case + + +class Result(object): + ''' Result encapsulates the result of a single session replay ''' + + def __init__(self, test_name, expected_response, received_response, recv_resp_body=None): + ''' expected_response and received_response can be any datatype the caller wants as long as they are the same datatype ''' + self._test_name = test_name + self._expected_response = expected_response + self._received_response = received_response + self._received_response_body = recv_resp_body + + def getTestName(self): + return self._test_name + + def getResultBool(self): + return self._expected_response == self._received_response + + def getRespBody(self): + if self._received_response_body: + return self._received_response_body + else: + return "" + + def Compare(self, received_dict, expected_dict): + global ignoredFields + try: + for key in received_dict: + # print(key) + if key.lower() in expected_dict and key.lower() not in ignoredFields: + #print("{0} ==? {1}".format(expected_dict[key.lower()],received_dict[key])) + if received_dict[key] != expected_dict[key.lower()]: + print("{0}Difference in the field \"{1}\": \n received:\n{2}\n expected:\n{3}{4}".format( + TermColors.FAIL, key, received_dict[key], expected_dict[key], TermColors.ENDC)) + return False + + except: + e = sys.exc_info() + print("Error in comparing key ", e, key, expected_dict[key.lower()], received_dict[key]) + return True + + def getResultString(self, received_dict, expected_dict, colorize=False): + global ignoredFields + ''' Return a nicely formatted result string with color if requested ''' + if self.getResultBool() and self.Compare(received_dict, expected_dict): + if colorize: + outstr = "{0}PASS{1}".format( + TermColors.OKGREEN, TermColors.ENDC) + + else: + outstr = "PASS" + + else: + if colorize: + outstr = "{0}FAIL{1}: expected {2}, received {3}, session file: {4}".format( + TermColors.FAIL, TermColors.ENDC, self._expected_response, self._received_response, self._test_name) + + else: + outstr = "FAIL: expected {0}, received {1}".format( + self._expected_response, self._received_response) + sys.exit(0) + + return outstr diff --git a/tests/tools/microServer/ssl/server.crt b/tests/tools/microServer/ssl/server.crt new file mode 100644 index 00000000000..0ce6ac5181a --- /dev/null +++ b/tests/tools/microServer/ssl/server.crt @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICszCCAhwCCQCRJsJJ+mTsdDANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wHhcNMTYwODI1MjI1NzIxWhcNMTcwODI1MjI1NzIxWjCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYwc6JQX45GZmMDEjwxYT11 +uVvuBBInfpYJeU8WIXHrKcX5LUSRcBikiKnlfSnMNRohsu6TElQACc60wQ7Q8KDE +lBSsS1FaHzCIl1t1AkXRmz/1H65JSBvrV/6Z1NC+Gp58EbH7Gul8ByC1xaJm5ID1 +Dd++kOPlY5ZI9ZcFS7HLAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAXSVfZ5p1TkhW +QiYq9nfQlBnX2NVaf8ymA8edQR0qH/QBv4/52bNNXC7V/V+ev9LCho2iRMeYYyXB +yo1wBAGR83lS9cF/tOABcYrxjdP54Sfkyh5fomcg8SV7zap6C8mhbV8r3EujbKCx +igH3fMX5F/eRwNCzaMMyQsXaxTJ3trk= +-----END CERTIFICATE----- diff --git a/tests/tools/microServer/ssl/server.pem b/tests/tools/microServer/ssl/server.pem new file mode 100644 index 00000000000..a1de94fa776 --- /dev/null +++ b/tests/tools/microServer/ssl/server.pem @@ -0,0 +1,32 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDWMHOiUF+ORmZjAxI8MWE9dblb7gQSJ36WCXlPFiFx6ynF+S1E +kXAYpIip5X0pzDUaIbLukxJUAAnOtMEO0PCgxJQUrEtRWh8wiJdbdQJF0Zs/9R+u +SUgb61f+mdTQvhqefBGx+xrpfAcgtcWiZuSA9Q3fvpDj5WOWSPWXBUuxywIDAQAB +AoGBAJPxRX2gjFAGWmQbU/YVmXfNH6navh8X/nx9sLeqrpE0AFeJI/ZPiqDKzMal +B43eSfNxwVi+ZxN0L1ICUbL9KKZvHs/QBxWLA1fGVAXrz7sRplEVvakPpTfHoEnv +sKaMWVKaK/S5WGbDhElb6zb/Lwo19DsIAPjGYqFvzFJBmobJAkEA9iSeTGkR9X26 +GywZoYrIMlRh34htOIRx1UUq88rFzdrCF21kQ4lhBIkX5OZMMy652i2gyak4OZTe +YewIv8jw9QJBAN7EQNHG8jPwXfVp91/fqxVQEfumuP2i6uiWWYQgZCmla2+0xcLZ +pMQ6sQEe10hhTrVnzHgAUVp50Ntn2jwBX78CQF09veGAI9d1Cxzj9cmmAvRd1r2Q +tp8kPOLnUsALXib+6WtqewLCdcf8DtsdClyRJMIraq85tRzK8fryKNZNzkkCQEgA +yS7FDj5JgCU15hZgFk1iPx3HCt44jZM2HaL+UUHAzRQjKxTLAl3G1rWVAWLMyQML +lORoveLvotl4HOruSsMCQQCAx9dV9JUSFoyc1CWILp/FgUH/se4cjQCThGO0DoQQ +vGTYmntY7j9WRJ9esQrjdD6Clw8zM/45GIBNwnXzqo7Z +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICszCCAhwCCQCRJsJJ+mTsdDANBgkqhkiG9w0BAQsFADCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wHhcNMTYwODI1MjI1NzIxWhcNMTcwODI1MjI1NzIxWjCBnTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAklMMRIwEAYDVQQHDAlDaGFtcGFpZ24xDjAMBgNVBAoMBVlh +aG9vMQ0wCwYDVQQLDARFZGdlMSgwJgYDVQQDDB9qdWljZXByb2R1Y2UuY29ycC5u +ZTEueWFob28uY29tMSQwIgYJKoZIhvcNAQkBFhVwZXJzaWEuYXppekB5YWhvby5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANYwc6JQX45GZmMDEjwxYT11 +uVvuBBInfpYJeU8WIXHrKcX5LUSRcBikiKnlfSnMNRohsu6TElQACc60wQ7Q8KDE +lBSsS1FaHzCIl1t1AkXRmz/1H65JSBvrV/6Z1NC+Gp58EbH7Gul8ByC1xaJm5ID1 +Dd++kOPlY5ZI9ZcFS7HLAgMBAAEwDQYJKoZIhvcNAQELBQADgYEAXSVfZ5p1TkhW +QiYq9nfQlBnX2NVaf8ymA8edQR0qH/QBv4/52bNNXC7V/V+ev9LCho2iRMeYYyXB +yo1wBAGR83lS9cF/tOABcYrxjdP54Sfkyh5fomcg8SV7zap6C8mhbV8r3EujbKCx +igH3fMX5F/eRwNCzaMMyQsXaxTJ3trk= +-----END CERTIFICATE----- diff --git a/tests/tools/microServer/uWServer.py b/tests/tools/microServer/uWServer.py new file mode 100644 index 00000000000..0ee4ac62189 --- /dev/null +++ b/tests/tools/microServer/uWServer.py @@ -0,0 +1,684 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import string +import http.client +import cgi +import time +import sys +import json +import os +import threading +from ipaddress import ip_address +from http.server import BaseHTTPRequestHandler, HTTPServer +from socketserver import ThreadingMixIn, ForkingMixIn, BaseServer +from http import HTTPStatus +import argparse +import ssl +import socket +import importlib.util + +test_mode_enabled = True +__version__ = "1.0" + + +sys.path.append( + os.path.normpath( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..' + ) + ) +) + +import sessionvalidation.sessionvalidation as sv + + +SERVER_PORT = 5005 # default port +HTTP_VERSION = 'HTTP/1.1' +G_replay_dict = {} + +count = 0 + +# Simple class to hold lists of callbacks associated with a key. + + +class HookSet: + # Helper class to provide controlled access to the HookSet to the loading module. + class Registrar: + def __init__(self, hook_set): + self.hooks = hook_set + + def register(self, hook, cb): + self.hooks.register(hook, cb) + + def __init__(self): + self.hooks = {} + self.modules = [] + self.registrar = HookSet.Registrar(self) + # Define all the valid hooks here. + for item in ['ReadRequestHook']: + if isinstance(item, list): + hook = item[0] + label = item[1] + else: + hook = label = item + exec("HookSet.{} = '{}'".format(label, hook)) + exec("HookSet.Registrar.{} = '{}'".format(label, hook)) + self.hooks[hook] = [] + + def load(self, source): + try: + spec = importlib.util.spec_from_file_location('Observer', source) + mod = importlib.util.module_from_spec(spec) + mod.Hooks = self.registrar + spec.loader.exec_module(mod) + except ImportError: + print("Failed to import {}".format(source)) + else: + self.modules.append(mod) + + # Add a callback cb to the hook. + # Error if the hook isn't defined. + def register(self, hook, cb): + if hook in self.hooks: + self.hooks[hook].append(cb) + else: + raise ValueError("{} is not a valid hook name".format(hook)) + + # Invoke a hook. Pass on any additional arguments to the callback. + def invoke(self, hook, *args, **kwargs): + cb_list = self.hooks[hook] + if cb_list == None: + raise ValueError("{} is not a valid hook name to invoke".format(hook)) + else: + for cb in cb_list: + cb(*args, **kwargs) + + +class ThreadingServer(ThreadingMixIn, HTTPServer): + '''This class forces the creation of a new thread on each connection''' + + def __init__(self, local_addr, handler_class, options): + HTTPServer.__init__(self, local_addr, handler_class) + self.hook_set = HookSet() + if (options.load): + self.hook_set.load(options.load) + + +class ForkingServer(ForkingMixIn, HTTPServer): + '''This class forces the creation of a new process on each connection''' + pass + + +class SSLServer(ThreadingMixIn, HTTPServer): + def __init__(self, server_address, HandlerClass, options): + BaseServer.__init__(self, server_address, HandlerClass) + pwd = os.path.dirname(os.path.realpath(__file__)) + keys = os.path.join(pwd, options.key) + certs = os.path.join(pwd, options.cert) + self.options = options + self.hook_set = HookSet() + + self.daemon_threads = True + self.protocol_version = 'HTTP/1.1' + + if options.load: + self.hook_set.load(options.load) + + if options.clientverify: + self.socket = ssl.wrap_socket(socket.socket(self.address_family, self.socket_type), + keyfile=keys, certfile=certs, server_side=True, cert_reqs=ssl.CERT_REQUIRED, ca_certs='/etc/ssl/certs/ca-certificates.crt') + else: + self.socket = ssl.wrap_socket(socket.socket(self.address_family, self.socket_type), + keyfile=keys, certfile=certs, server_side=True) + + self.server_bind() + self.server_activate() + + +class MyHandler(BaseHTTPRequestHandler): + def handleExpect100Continue(self, contentLength, chunked=False): + print("....expect", contentLength) + self.wfile.write(bytes('HTTP/1.1 100 Continue\r\n\r\n', 'UTF-8')) + # self.send_response(HTTPStatus.CONTINUE) + # self.send_header('Server','blablabla') + #self.send_header('Connection', 'keep-alive') + # self.end_headers() + if(not chunked): + message = self.rfile.read(contentLength) + else: + readChunks() + + def getTestName(self, requestline): + key = None + keys = requestline.split(" ") + # print(keys) + if keys: + rkey = keys[1] + key = rkey.split("/", 1)[1] + if key + "/" in G_replay_dict: + key = key + "/" + elif len(key) > 1 and key[:-1] in G_replay_dict: + key = key[:-1] + return key + + def parseRequestline(self, requestline): + testName = None + return testName + + def testMode(self, requestline): + print(requestline) + key = self.parseRequestline(requestline) + + self.send_response(200) + self.send_header('Connection', 'close') + self.end_headers() + + def get_response_code(self, header): + # this could totally go wrong + return int(header.split(' ')[1]) + + def generator(self): + yield 'microserver' + yield 'yahoo' + + def send_response(self, code, message=None): + ''' Override `send_response()`'s tacking on of server and date header lines. ''' + # self.log_request(code) + self.send_response_only(code, message) + + def createDummyBodywithLength(self, numberOfbytes): + if numberOfbytes == 0: + return None + body = 'a' + while numberOfbytes != 1: + body += 'b' + numberOfbytes -= 1 + return body + + def writeChunkedData(self): + for chunk in self.generator(): + response_string = bytes('%X\r\n%s\r\n' % (len(chunk), chunk), 'UTF-8') + self.wfile.write(response_string) + response_string = bytes('0\r\n\r\n', 'UTF-8') + self.wfile.write(response_string) + + def readChunks(self): + raw_data = b'' + raw_size = self.rfile.readline(65537) + size = str(raw_size, 'UTF-8').rstrip('\r\n') + # print("==========================================>",size) + size = int(size, 16) + while size > 0: + #print("reading bytes",raw_size) + chunk = self.rfile.read(size + 2) # 2 for reading /r/n + #print("cuhnk: ",chunk) + raw_data += chunk + raw_size = self.rfile.readline(65537) + size = str(raw_size, 'UTF-8').rstrip('\r\n') + size = int(size, 16) + #print("full chunk",raw_data) + chunk = self.rfile.readline(65537) # read the extra blank newline \r\n after the last chunk + + def send_header(self, keyword, value): + """Send a MIME header to the headers buffer.""" + if self.request_version != 'HTTP/0.9': + if not hasattr(self, '_headers_buffer'): + self._headers_buffer = [] + self._headers_buffer.append( + ("%s: %s\r\n" % (keyword, value)).encode('UTF-8', 'strict')) # original code used latin-1.. seriously? + + if keyword.lower() == 'connection': + if value.lower() == 'close': + self.close_connection = True + elif value.lower() == 'keep-alive': + self.close_connection = False + + def parse_request(self): + """Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, an + error is sent back. + + """ + + global count, test_mode_enabled + + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = True + requestline = str(self.raw_requestline, 'UTF-8') + # print("request",requestline) + requestline = requestline.rstrip('\r\n') + self.requestline = requestline + + # Examine the headers and look for a Connection directive. + try: + self.headers = http.client.parse_headers(self.rfile, + _class=self.MessageClass) + self.server.hook_set.invoke(HookSet.ReadRequestHook, self.headers) + + # read message body + if self.headers.get('Content-Length') != None: + bodysize = int(self.headers.get('Content-Length')) + #print("length of the body is",bodysize) + message = self.rfile.read(bodysize) + #print("message body",message) + elif self.headers.get('Transfer-Encoding', "") == 'chunked': + # print(self.headers) + self.readChunks() + except http.client.LineTooLong: + self.send_error( + HTTPStatus.BAD_REQUEST, + "Line too long") + return False + except http.client.HTTPException as err: + self.send_error( + HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE, + "Too many headers", + str(err) + ) + return False + + words = requestline.split() + if len(words) == 3: + command, path, version = words + if version[:5] != 'HTTP/': + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request version (%r)" % version) + return False + try: + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + version_number = int(version_number[0]), int(version_number[1]) + except (ValueError, IndexError): + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request version (%r)" % version) + return False + if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": + self.close_connection = False + if version_number >= (2, 0): + self.send_error( + HTTPStatus.HTTP_VERSION_NOT_SUPPORTED, + "Invalid HTTP Version (%s)" % base_version_number) + return False + elif len(words) == 2: + command, path = words + self.close_connection = True + if command != 'GET': + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad HTTP/0.9 request type (%r)" % command) + return False + elif not words: + count += 1 + print("bla bla on 157 {0} => {1}".format(count, self.close_connection)) + return False + else: + self.send_error( + HTTPStatus.BAD_REQUEST, + "Bad request syntax (%r)" % requestline) + return False + self.command, self.path, self.request_version = command, path, version + + conntype = self.headers.get('Connection', "") + if conntype.lower() == 'close': + self.close_connection = True + elif (conntype.lower() == 'keep-alive' and + self.protocol_version >= "HTTP/1.1"): + self.close_connection = False + + return True + + def do_GET(self): + global G_replay_dict, test_mode_enabled + if test_mode_enabled: + request_hash = self.getTestName(self.requestline) + else: + request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5')) + # print("key:",request_hash) + try: + response_string = None + chunkedResponse = False + if request_hash not in G_replay_dict: + self.send_response(404) + self.send_header('Server', 'MicroServer') + self.send_header('Connection', 'close') + self.end_headers() + + else: + resp = G_replay_dict[request_hash] + headers = resp.getHeaders().split('\r\n') + + # set status codes + status_code = self.get_response_code(headers[0]) + self.send_response(status_code) + + # set headers + for header in headers[1:]: # skip first one b/c it's response code + if header == '': + continue + elif 'Content-Length' in header: + if 'Access-Control' in header: # skipping Access-Control-Allow-Credentials, Access-Control-Allow-Origin, Content-Length + header_parts = header.split(':', 1) + header_field = str(header_parts[0].strip()) + header_field_val = str(header_parts[1].strip()) + self.send_header(header_field, header_field_val) + continue + lengthSTR = header.split(':')[1] + length = lengthSTR.strip(' ') + if test_mode_enabled: # the length of the body is given priority in test mode rather than the value in Content-Length. But in replay mode Content-Length gets the priority + if not (resp and resp.getBody()): # Don't attach content-length yet if body is present in the response specified by tester + self.send_header('Content-Length', str(length)) + else: + self.send_header('Content-Length', str(length)) + response_string = self.createDummyBodywithLength(int(length)) + continue + if 'Transfer-Encoding' in header: + self.send_header('Transfer-Encoding', 'Chunked') + response_string = '%X\r\n%s\r\n' % (len('ats'), 'ats') + chunkedResponse = True + continue + + header_parts = header.split(':', 1) + header_field = str(header_parts[0].strip()) + header_field_val = str(header_parts[1].strip()) + #print("{0} === >{1}".format(header_field, header_field_val)) + self.send_header(header_field, header_field_val) + # End for + if test_mode_enabled: + if resp and resp.getBody(): + length = len(bytes(resp.getBody(), 'UTF-8')) + response_string = resp.getBody() + self.send_header('Content-Length', str(length)) + self.end_headers() + + if (chunkedResponse): + self.writeChunkedData() + elif response_string != None and response_string != '': + self.wfile.write(bytes(response_string, 'UTF-8')) + return + except: + e = sys.exc_info() + print("Error", e, self.headers) + self.send_response(400) + self.send_header('Connection', 'close') + self.end_headers() + + def do_HEAD(self): + global G_replay_dict, test_mode_enabled + if test_mode_enabled: + request_hash = self.getTestName(self.requestline) + else: + request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5')) + + if request_hash not in G_replay_dict: + self.send_response(404) + self.send_header('Connection', 'close') + self.end_headers() + + else: + resp = G_replay_dict[request_hash] + headers = resp.getHeaders().split('\r\n') + + # set status codes + status_code = self.get_response_code(headers[0]) + self.send_response(status_code) + + # set headers + for header in headers[1:]: # skip first one b/c it's response code + if header == '': + continue + elif 'Content-Length' in header: + self.send_header('Content-Length', '0') + continue + + header_parts = header.split(':', 1) + header_field = str(header_parts[0].strip()) + header_field_val = str(header_parts[1].strip()) + #print("{0} === >{1}".format(header_field, header_field_val)) + self.send_header(header_field, header_field_val) + + self.end_headers() + + def do_POST(self): + response_string = None + chunkedResponse = False + global G_replay_dict, test_mode_enabled + if test_mode_enabled: + request_hash = self.getTestName(self.requestline) + else: + request_hash, __ = cgi.parse_header(self.headers.get('Content-MD5')) + try: + + if request_hash not in G_replay_dict: + self.send_response(404) + self.send_header('Connection', 'close') + self.end_headers() + resp = None + else: + resp = G_replay_dict[request_hash] + resp_headers = resp.getHeaders().split('\r\n') + # set status codes + status_code = self.get_response_code(resp_headers[0]) + #print("response code",status_code) + self.send_response(status_code) + #print("reposen is ",resp_headers) + # set headers + for header in resp_headers[1:]: # skip first one b/c it's response code + + if header == '': + continue + elif 'Content-Length' in header: + if 'Access-Control' in header: # skipping Access-Control-Allow-Credentials, Access-Control-Allow-Origin, Content-Length + header_parts = header.split(':', 1) + header_field = str(header_parts[0].strip()) + header_field_val = str(header_parts[1].strip()) + self.send_header(header_field, header_field_val) + continue + + lengthSTR = header.split(':')[1] + length = lengthSTR.strip(' ') + if test_mode_enabled: # the length of the body is given priority in test mode rather than the value in Content-Length. But in replay mode Content-Length gets the priority + if not (resp and resp.getBody()): # Don't attach content-length yet if body is present in the response specified by tester + self.send_header('Content-Length', str(length)) + else: + self.send_header('Content-Length', str(length)) + response_string = self.createDummyBodywithLength(int(length)) + continue + if 'Transfer-Encoding' in header: + self.send_header('Transfer-Encoding', 'Chunked') + response_string = '%X\r\n%s\r\n' % (len('microserver'), 'microserver') + chunkedResponse = True + continue + + header_parts = header.split(':', 1) + header_field = str(header_parts[0].strip()) + header_field_val = str(header_parts[1].strip()) + #print("{0} === >{1}".format(header_field, header_field_val)) + self.send_header(header_field, header_field_val) + # End for loop + if test_mode_enabled: + if resp and resp.getBody(): + length = len(bytes(resp.getBody(), 'UTF-8')) + response_string = resp.getBody() + self.send_header('Content-Length', str(length)) + self.end_headers() + + if (chunkedResponse): + self.writeChunkedData() + elif response_string != None and response_string != '': + self.wfile.write(bytes(response_string, 'UTF-8')) + return + except: + e = sys.exc_info() + print("Error", e, self.headers) + self.send_response(400) + self.send_header('Connection', 'close') + self.end_headers() + + +def populate_global_replay_dictionary(sessions): + ''' Populates the global dictionary of {uuid (string): reponse (Response object)} ''' + global G_replay_dict + for session in sessions: + for txn in session.getTransactionIter(): + G_replay_dict[txn._uuid] = txn.getResponse() + + print("size", len(G_replay_dict)) + +# tests will add responses to the dictionary where key is the testname + + +def addResponseHeader(key, response_header): + G_replay_dict[key] = response_header + + +def _path(exists, arg): + path = os.path.abspath(arg) + if not os.path.exists(path) and exists: + msg = '"{0}" is not a valid path'.format(path) + raise argparse.ArgumentTypeError(msg) + return path + + +def _bool(arg): + + opt_true_values = set(['y', 'yes', 'true', 't', '1', 'on', 'all']) + opt_false_values = set(['n', 'no', 'false', 'f', '0', 'off', 'none']) + + tmp = arg.lower() + if tmp in opt_true_values: + return True + elif tmp in opt_false_values: + return False + else: + msg = 'Invalid value Boolean value : "{0}"\n Valid options are {0}'.format(arg, + opt_true_values | opt_false_values) + raise argparse.ArgumentTypeError(msg) + + +def main(): + global test_mode_enabled + parser = argparse.ArgumentParser() + + parser.add_argument("--data-dir", "-d", + type=lambda x: _path(True, x), + required=True, + help="Directory with data file" + ) + + parser.add_argument("--public", "-P", + type=_bool, + default=False, + help="Bind server to public IP 0.0.0.0 vs private IP of 127.0.0.1" + ) + + parser.add_argument("--ip_address", "-ip", + type=str, + default='', + help="IP address of the interface to serve on" + ) + + parser.add_argument("--port", "-p", + type=int, + default=SERVER_PORT, + help="Port to use") + + parser.add_argument("--timeout", "-t", + type=float, + default=None, + help="socket time out in seconds") + + parser.add_argument('-V', '--version', action='version', version='%(prog)s {0}'.format(__version__)) + + parser.add_argument("--mode", "-m", + type=str, + default="test", + help="Mode of operation") + parser.add_argument("--connection", "-c", + type=str, + default="nonSSL", + help="use SSL") + parser.add_argument("--key", "-k", + type=str, + default="ssl/server.pem", + help="key for ssl connnection") + parser.add_argument("--cert", "-cert", + type=str, + default="ssl/server.crt", + help="certificate") + parser.add_argument("--clientverify", "-cverify", + type=bool, + default=False, + help="verify client cert") + parser.add_argument("--load", + dest='load', + type=str, + default='', + help="A file which will install observers on hooks") + + args = parser.parse_args() + options = args + + # set up global dictionary of {uuid (string): response (Response object)} + s = sv.SessionValidator(args.data_dir) + populate_global_replay_dictionary(s.getSessionIter()) + print("Dropped {0} sessions for being malformed".format(len(s.getBadSessionList()))) + + # start server + try: + socket_timeout = args.timeout + test_mode_enabled = args.mode == "test" + + MyHandler.protocol_version = HTTP_VERSION + if options.connection == 'ssl': + server = SSLServer((options.ip_address, options.port), MyHandler, options) + else: + server = ThreadingServer((options.ip_address, options.port), MyHandler, options) + server.timeout = 5 + print("started server") + server_thread = threading.Thread(target=server.serve_forever()) + server_thread.daemon = True + server_thread.start() + + except KeyboardInterrupt: + print("\n=== ^C received, shutting down httpserver ===") + server.socket.close() + # s_server.socket.close() + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/tests/tools/plugins/custom204plugin.cc b/tests/tools/plugins/custom204plugin.cc new file mode 100644 index 00000000000..98fde5891fc --- /dev/null +++ b/tests/tools/plugins/custom204plugin.cc @@ -0,0 +1,153 @@ +/** @file + + A plugin that sets custom 204 response bodies. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include "ts/ts.h" +#include "string.h" + +#define PLUGIN_NAME "custom204plugintest" + +static int +local_handler(TSCont contp, TSEvent event, void *edata) { + + const char *msg = +"\n" +"\n" +"Spec-breaking 204!\n" +"\n" +"\n" +"\n" +"

This is body content for a 204.

\n" +"
\n" +"\n" +"Description: According to rfc7231 I should not have been sent to you!
\n" +"This response was sent via the custom204plugin via a call to TSHttpTxnErrorBodySet.\n" +"
\n" +""; + TSHttpTxn txnp = (TSHttpTxn) edata; + TSMBuffer bufp = nullptr; + TSMLoc hdr_loc = nullptr; + TSMLoc url_loc = nullptr;; + const char *host = nullptr; + int host_length; + const char *test_host = "www.customplugin204.test"; + + switch (event) { + case TS_EVENT_HTTP_PRE_REMAP: + TSDebug(PLUGIN_NAME, "event TS_EVENT_HTTP_PRE_REMAP received"); + TSDebug(PLUGIN_NAME, "running plugin logic."); + if (TSHttpTxnClientReqGet(txnp, &bufp, &hdr_loc) != TS_SUCCESS) { + TSDebug(PLUGIN_NAME, "Couldn't retrieve client request header"); + TSError("[%s] Couldn't retrieve client request header", PLUGIN_NAME); + goto done; + } + TSDebug(PLUGIN_NAME, "got client request"); + + if (TSHttpHdrUrlGet(bufp, hdr_loc, &url_loc) != TS_SUCCESS) { + TSError("[%s] Couldn't retrieve request url", PLUGIN_NAME); + TSDebug(PLUGIN_NAME, "Couldn't retrieve request url"); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + goto done; + } + TSDebug(PLUGIN_NAME, "got client request url"); + + host = TSUrlHostGet(bufp, url_loc, &host_length); + if (!host) { + TSError("[%s] Couldn't retrieve request hostname", PLUGIN_NAME); + TSDebug(PLUGIN_NAME, "Couldn't retrieve request hostname"); + TSHandleMLocRelease(bufp, hdr_loc, url_loc); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + goto done; + } + TSDebug(PLUGIN_NAME, "request's host was retrieved"); + + if (strncmp(host, test_host, strlen(test_host)) == 0) { + TSDebug(PLUGIN_NAME, "host matches, hook TS_HTTP_SEND_RESPONSE_HDR_HOOK"); + TSHttpTxnHookAdd(txnp, TS_HTTP_SEND_RESPONSE_HDR_HOOK, contp); + TSHandleMLocRelease(bufp, hdr_loc, url_loc); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); + return 0; + } + TSDebug(PLUGIN_NAME, "Host != expected host '%s'", test_host); + break; + case TS_EVENT_HTTP_SEND_RESPONSE_HDR: + TSDebug(PLUGIN_NAME, "Returning 204 with custom response body."); + TSHttpTxnSetHttpRetStatus(txnp, TS_HTTP_STATUS_NO_CONTENT); + TSHttpTxnErrorBodySet(txnp, TSstrdup(msg), strlen(msg), + TSstrdup("text/html")); + break; + + case TS_EVENT_HTTP_TXN_CLOSE: + TSDebug(PLUGIN_NAME, "event TS_EVENT_HTTP_TXN_CLOSE received"); + TSContDestroy(contp); + break; + + default: + TSAssert(!"Unexpected event"); + break; + + } + +done: + TSHandleMLocRelease(bufp, hdr_loc, url_loc); + TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); + TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); + return 1; +} + +static int +global_handler(TSCont contp, TSEvent event, void *edata) { + TSHttpTxn txnp = (TSHttpTxn) edata; + TSCont txn_contp = nullptr; + + switch(event) { + case TS_EVENT_HTTP_TXN_START: + txn_contp = TSContCreate(local_handler, TSMutexCreate()); + TSHttpTxnHookAdd(txnp, TS_HTTP_PRE_REMAP_HOOK, txn_contp); + TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, txn_contp); + TSDebug(PLUGIN_NAME, "hooked TS_HTTP_OS_DNS_HOOK and TS_EVENT_HTTP_TXN_CLOSE_HOOK"); + break; + default: + TSAssert(!"Unexpected event"); + break; + } + TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); + return 1; +} + +void +TSPluginInit(int argc, const char *argv[]) +{ + TSPluginRegistrationInfo info; + + info.plugin_name = PLUGIN_NAME; + info.vendor_name = "Apache Software Foundation"; + info.support_email = "dev@trafficserver.apache.org"; + + if (TSPluginRegister(&info) != TS_SUCCESS) { + TSError("[%s] Plugin registration failed", PLUGIN_NAME); + } + + TSCont contp = TSContCreate(global_handler, TSMutexCreate()); + TSHttpHookAdd(TS_HTTP_TXN_START_HOOK, contp); +} diff --git a/tests/tools/sessionvalidation/__init__.py b/tests/tools/sessionvalidation/__init__.py new file mode 100644 index 00000000000..bcbf6855425 --- /dev/null +++ b/tests/tools/sessionvalidation/__init__.py @@ -0,0 +1,17 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/tools/sessionvalidation/badsession.py b/tests/tools/sessionvalidation/badsession.py new file mode 100644 index 00000000000..69b36167f86 --- /dev/null +++ b/tests/tools/sessionvalidation/badsession.py @@ -0,0 +1,35 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class BadSession(object): + ''' + Session encapsulates a single BAD user session. Bad meaning that for some reason the session is invalid. + + _filename is the filename of the bad JSON session + _reason is a string with some kind of explanation on why the session was bad + ''' + + def __repr__(self): + return "".format( + self._filename, self._reason + ) + + def __init__(self, filename, reason): + self._filename = filename + self._reason = reason diff --git a/tests/tools/sessionvalidation/request.py b/tests/tools/sessionvalidation/request.py new file mode 100644 index 00000000000..39598d7962b --- /dev/null +++ b/tests/tools/sessionvalidation/request.py @@ -0,0 +1,48 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import hashlib + + +class Request(object): + ''' Request encapsulates a single request from the UA ''' + + def getTimestamp(self): + return self._timestamp + + def getHeaders(self): + return self._headers + + def getBody(self): + return self._body + + def getHeaderMD5(self): + ''' Returns the MD5 hash of the headers + + This is used to do a unique mapping to a request/response transaction ''' + return hashlib.md5(self._headers.encode()).hexdigest() + + def __repr__(self): + # return str(self._timestamp) + return "".format( + str(self._timestamp), str(self._headers), str(self._body) + ) + + def __init__(self, timestamp, headers, body): + self._timestamp = timestamp + self._headers = headers + self._body = body diff --git a/tests/tools/sessionvalidation/response.py b/tests/tools/sessionvalidation/response.py new file mode 100644 index 00000000000..b8438e2f254 --- /dev/null +++ b/tests/tools/sessionvalidation/response.py @@ -0,0 +1,40 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Response(object): + ''' Response encapsulates a single request from the UA ''' + + def getTimestamp(self): + return self._timestamp + + def getHeaders(self): + return self._headers + + def getBody(self): + return self._body + + def __repr__(self): + return "".format( + self._timestamp, self._headers, self._body + ) + + def __init__(self, timestamp, headers, body): + self._timestamp = timestamp + self._headers = headers + self._body = body diff --git a/tests/tools/sessionvalidation/session.py b/tests/tools/sessionvalidation/session.py new file mode 100644 index 00000000000..e8bb0e2e463 --- /dev/null +++ b/tests/tools/sessionvalidation/session.py @@ -0,0 +1,45 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sessionvalidation.transaction as transaction + + +class Session(object): + ''' Session encapsulates a single user session ''' + + def getTransactionList(self): + ''' Returns a list of transaction objects ''' + return self._transaction_list + + def getTransactionIter(self): + ''' Returns an iterator of transaction objects ''' + return iter(self._transaction_list) + + def returnFirstTransaction(self): + return self._transaction_list[0] + + def __repr__(self): + return "".format( + self._filename, self._version, self._timestamp, self._encoding, repr(self._transaction_list) + ) + + def __init__(self, filename, version, timestamp, transaction_list, encoding=None): + self._filename = filename + self._version = version + self._timestamp = timestamp + self._encoding = encoding + self._transaction_list = transaction_list diff --git a/tests/tools/sessionvalidation/sessionvalidation.py b/tests/tools/sessionvalidation/sessionvalidation.py new file mode 100644 index 00000000000..e8ddc21981e --- /dev/null +++ b/tests/tools/sessionvalidation/sessionvalidation.py @@ -0,0 +1,260 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os + +import sessionvalidation.session as session +import sessionvalidation.transaction as transaction +import sessionvalidation.request as request +import sessionvalidation.response as response + +valid_HTTP_request_methods = ['GET', 'POST', 'HEAD'] +G_VERBOSE_LOG = True + + +def _verbose_print(msg, verbose_on=False): + ''' Print msg if verbose_on is set to True or G_VERBOSE_LOG is set to True''' + if verbose_on or G_VERBOSE_LOG: + print(msg) + + +class SessionValidator(object): + ''' + SessionValidator parses, validates, and exports an API for a given set of JSON sessions generated from Apache Traffic Server + + SessionValidator is initialized with a path to a directory of JSON sessions. It then automatically parses and validates all the + session in the directory. After initialization, the user may use the provided API + + TODO : + Provide a list of guaranteed fields for each type of object (ie a Transaction has a request and a response, a request has ...) + ''' + + def parse(self): + ''' + Constructs Session objects from JSON files on disk and stores objects into _sessions + + All sessions missing required fields (ie. a session timestamp, a response for every request, etc) are + dropped and the filename is stored inside _bad_sessions + ''' + + log_filenames = [os.path.join(self._json_log_dir, f) for f in os.listdir( + self._json_log_dir) if os.path.isfile(os.path.join(self._json_log_dir, f))] + + for fname in log_filenames: + with open(fname) as f: + # first attempt to load the JSON + try: + sesh = json.load(f) + except: + self._bad_sessions.append(fname) + _verbose_print("Warning: JSON parse error on file={0}".format(fname)) + print("Warning: JSON parse error on file={0}".format(fname)) + continue + + # then attempt to extract all the required fields from the JSON + try: + session_timestamp = sesh['timestamp'] + session_version = sesh['version'] + session_txns = list() + for txn in sesh['txns']: + # print("PERSIA____________________________________________________________",txn) + # create transaction Request object + txn_request = txn['request'] + + txn_request_body = '' + if 'body' in txn_request: + txn_request_body = txn_request['body'] + txn_request_obj = request.Request(txn_request['timestamp'], txn_request['headers'], txn_request_body) + # Create transaction Response object + txn_response = txn['response'] + txn_response_body = '' + if 'body' in txn_response: + txn_response_body = txn_response['body'] + txn_response_obj = response.Response(txn_response['timestamp'], txn_response['headers'], txn_response_body) + + # create Transaction object + txn_obj = transaction.Transaction(txn_request_obj, txn_response_obj, txn['uuid']) + session_txns.append(txn_obj) + # print(txn_request['timestamp']) + session_obj = session.Session(fname, session_version, session_timestamp, session_txns) + + except KeyError as e: + self._bad_sessions.append(fname) + print("Warning: parse error on key={0} for file={1}".format(e, fname)) + _verbose_print("Warning: parse error on key={0} for file={1}".format(e, fname)) + continue + + self._sessions.append(session_obj) + + def validate(self): + ''' Prunes out all the invalid Sessions in _sessions ''' + + good_sessions = list() + + for sesh in self._sessions: + if SessionValidator.validateSingleSession(sesh): + good_sessions.append(sesh) + else: + self._bad_sessions.append(sesh._filename) + + self._sessions = good_sessions + + @staticmethod + def validateSingleSession(sesh): + ''' Takes in a single Session object as input, returns whether or not the Session is valid ''' + + retval = True + + try: + # first validate fields + if not sesh._filename: + _verbose_print("bad session filename") + retval = False + elif not sesh._version: + _verbose_print("bad session version") + retval = False + elif float(sesh._timestamp) <= 0: + _verbose_print("bad session timestamp") + retval = False + elif not bool(sesh.getTransactionList()): + _verbose_print("session has no transaction list") + retval = False + + # validate Transactions now + for txn in sesh.getTransactionIter(): + if not SessionValidator.validateSingleTransaction(txn): + retval = False + + except ValueError as e: + _verbose_print("most likely an invalid session timestamp") + retval = False + + return retval + + @staticmethod + def validateSingleTransaction(txn): + ''' Takes in a single Transaction object as input, and returns whether or not the Transaction is valid ''' + + txn_req = txn.getRequest() + txn_resp = txn.getResponse() + retval = True + + #valid_HTTP_request_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS', 'CONNECT', 'PATCH'] + # we can later uncomment the previous line to support more HTTP methods + valid_HTTP_versions = ['HTTP/1.0', 'HTTP/1.1', 'HTTP/2.0'] + + try: + # validate request first + if not txn_req: + _verbose_print("no transaction request") + retval = False + elif txn_req.getBody() == None: + _verbose_print("transaction body is set to None") + retval = False + elif float(txn_req.getTimestamp()) <= 0: + _verbose_print("invalid transaction request timestamp") + retval = False + elif txn_req.getHeaders().split()[0] not in valid_HTTP_request_methods: + _verbose_print("invalid HTTP method for transaction {0}".format(txn_req.getHeaders().split()[0])) + retval = False + elif not txn_req.getHeaders().endswith("\r\n\r\n"): + _verbose_print("transaction request headers didn't end with \\r\\n\\r\\n") + retval = False + elif txn_req.getHeaders().split()[2] not in valid_HTTP_versions: + _verbose_print("invalid HTTP version in request") + retval = False + + # if the Host header is not present and vaild we reject this transaction + found_host = False + for header in txn_req.getHeaders().split('\r\n'): + split_header = header.split(' ') + if split_header[0] == 'Host:': + found_host = True + host_header_no_space = len(split_header) == 1 + host_header_with_space = len(split_header) == 2 and split_header[1] == '' + if host_header_no_space or host_header_with_space: + found_host = False + if not found_host: + print("missing host", txn_req) + _verbose_print("transaction request Host header doesn't have specified host") + retval = False + + # reject if the host is localhost (since ATS seems to ignore remap rules for localhost requests) + if "127.0.0.1" in txn_req.getHeaders() or "localhost" in txn_req.getHeaders(): + _verbose_print("transaction request Host is localhost, we must reject because ATS ignores remap rules for localhost requests") + retval = False + + # now validate response + if not txn_resp: + _verbose_print("no transaction response") + retval = False + elif txn_resp.getBody() == None: + _verbose_print("transaction response body set to None") + retval = False + elif float(txn_resp.getTimestamp()) <= 0: + _verbose_print("invalid transaction response timestamp") + retval = False + elif txn_resp.getHeaders().split()[0] not in valid_HTTP_versions: + _verbose_print("invalid HTTP response header") + retval = False + elif not txn_resp.getHeaders().endswith("\r\n\r\n"): + _verbose_print("transaction response headers didn't end with \\r\\n\\r\\n") + retval = False + + # if any of the 3xx responses have bodies, then the must reject this transaction, since 3xx + # errors by definition can't have bodies + response_line = txn_resp.getHeaders().split('\r\n')[0] + response_code = response_line.split(' ')[1] + if response_code.startswith('3') and txn_resp.getBody(): + _verbose_print("transaction response was 3xx and had a body") + retval = False + + except ValueError as e: + _verbose_print("most likely an invalid transaction timestamp") + retval = False + + except IndexError as e: + _verbose_print("most likely a bad transaction header") + retval = False + + return retval + + def getSessionList(self): + ''' Returns the list of Session objects ''' + return self._sessions + + def getSessionIter(self): + ''' Returns an iterator of the Session objects ''' + return iter(self._sessions) + + def getBadSessionList(self): + ''' Returns a list of bad session filenames (list of strings) ''' + return self._bad_sessions + + def getBadSessionListIter(self): + ''' Returns an iterator of bad session filenames (iterator of strings) ''' + return iter(self._bad_sessions) + + def __init__(self, json_log_dir): + global valid_HTTP_request_methods + self._json_log_dir = json_log_dir + self._bad_sessions = list() # list of filenames + self._sessions = list() # list of _good_ session objects + + self.parse() + self.validate() diff --git a/tests/tools/sessionvalidation/transaction.py b/tests/tools/sessionvalidation/transaction.py new file mode 100644 index 00000000000..19950abea69 --- /dev/null +++ b/tests/tools/sessionvalidation/transaction.py @@ -0,0 +1,40 @@ +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sessionvalidation.request as request +import sessionvalidation.response as response + + +class Transaction(object): + ''' Tranaction encapsulates a single UA transaction ''' + + def getRequest(self): + return self._request + + def getResponse(self): + return self._response + + def __repr__(self): + return "".format( + self._uuid, self._request, self._response + ) + + def __init__(self, request, response, uuid): + self._request = request + self._response = response + self._uuid = uuid diff --git a/tests/tools/tcp_408_client.py b/tests/tools/tcp_408_client.py new file mode 100644 index 00000000000..eb1d72150c8 --- /dev/null +++ b/tests/tools/tcp_408_client.py @@ -0,0 +1,63 @@ +''' +A simple command line interface to send/receive bytes over TCP. +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import socket +import sys +import time + +def tcp_client(host, port, sleep, header, data): + pass + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((host, port)) + s.sendall(header.encode()) + s.sendall(data[0].encode()) + time.sleep(sleep) + s.shutdown(socket.SHUT_WR) + while True: + output = s.recv(4096) # suggested bufsize from docs.python.org + if len(output) <= 0: + break + else: + sys.stdout.write(output.decode()) + s.close() + + +DESCRIPTION =\ + """A simple command line interface to send/receive bytes over TCP. + +The full contents of the given file are sent via a TCP connection to the given +host and port. Then data is read from the connection and printed to standard +output. Streaming is not supported.""" + + +def main(argv): + parser = argparse.ArgumentParser(description=DESCRIPTION) + parser.add_argument('host', help='the target host') + parser.add_argument('port', type=int, help='the target port') + parser.add_argument('sleep', type=int, help='timeout') + args = parser.parse_args() + + header = 'POST / HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 2\r\n\r\n' + data = "aa" + tcp_client(args.host, args.port, args.sleep, header, data) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/tests/tools/tcp_client.py b/tests/tools/tcp_client.py new file mode 100644 index 00000000000..2fb0c00f854 --- /dev/null +++ b/tests/tools/tcp_client.py @@ -0,0 +1,59 @@ +''' +A simple command line interface to send/receive bytes over TCP. +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import socket +import sys + +def tcp_client(host, port, data): + pass + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((host, port)) + s.sendall(data.encode()) + s.shutdown(socket.SHUT_WR) + while True: + output = s.recv(4096) # suggested bufsize from docs.python.org + if len(output) <= 0: + break + else: + sys.stdout.write(output.decode()) + s.close() + +DESCRIPTION=\ +"""A simple command line interface to send/receive bytes over TCP. + +The full contents of the given file are sent via a TCP connection to the given +host and port. Then data is read from the connection and printed to standard +output. Streaming is not supported.""" + +def main(argv): + parser = argparse.ArgumentParser(description=DESCRIPTION) + parser.add_argument('host', help='the target host') + parser.add_argument('port', type=int, help='the target port') + parser.add_argument('file', help='the file with content to be sent') + args = parser.parse_args() + + data = '' + with open(args.file, 'r') as f: + data = f.read() + + tcp_client(args.host, args.port, data) + +if __name__ == "__main__": + main(sys.argv) diff --git a/tests/tools/traffic-replay/Config.py b/tests/tools/traffic-replay/Config.py new file mode 100644 index 00000000000..62826142706 --- /dev/null +++ b/tests/tools/traffic-replay/Config.py @@ -0,0 +1,31 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# SSL config +ca_certs = "/path/to/certfile" +keyfile = "/path/to/keyfile" + +# Proxy config +proxy_host = "127.0.0.1" +proxy_ssl_port = 443 +proxy_nonssl_port = 8080 + +# process and thread config +nProcess = 4 +nThread = 4 diff --git a/tests/tools/traffic-replay/RandomReplay.py b/tests/tools/traffic-replay/RandomReplay.py new file mode 100644 index 00000000000..ad12e0a95fb --- /dev/null +++ b/tests/tools/traffic-replay/RandomReplay.py @@ -0,0 +1,175 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gevent +import socket +import requests +import os +from threading import Thread +import sys +from multiprocessing import current_process +import sessionvalidation.sessionvalidation as sv +import lib.result as result +import extractHeader +import mainProcess +import json +import gzip +bSTOP = False + + +def createDummyBodywithLength(numberOfbytes): + if numberOfbytes <= 0: + return None + body = 'a' + while numberOfbytes != 1: + body += 'b' + numberOfbytes -= 1 + return body + + +def handleResponse(response, *args, **kwargs): + print(response.status_code) + # resp=args[0] + #expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2) + #expected_output = (int(expected_output_split[1]), str( expected_output_split[2])) + #r = result.Result(session_filename, expected_output[0], response.status_code) + # print(r.getResultString(colorize=True)) +# make sure len of the message body is greater than length + + +def gen(): + yield 'pforpersia,champaignurbana'.encode('utf-8') + yield 'there'.encode('utf-8') + + +def txn_replay(session_filename, txn, proxy, result_queue, request_session): + """ Replays a single transaction + :param request_session: has to be a valid requests session""" + req = txn.getRequest() + resp = txn.getResponse() + + # Construct HTTP request & fire it off + txn_req_headers = req.getHeaders() + txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers) + txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier + if 'body' in txn_req_headers_dict: + del txn_req_headers_dict['body'] + + #print("Replaying session") + try: + # response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers), + # 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + # headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me + method = extractHeader.extract_txn_req_method(txn_req_headers) + response = None + body = None + content = None + if 'Transfer-Encoding' in txn_req_headers_dict: + # deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header + # This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this + # BUT, this is not a problem if the data is not chunked encoded.. Strange, huh? + del txn_req_headers_dict['Host'] + if 'Content-Length' in txn_req_headers_dict: + #print("ewww !") + del txn_req_headers_dict['Content-Length'] + body = gen() + if 'Content-Length' in txn_req_headers_dict: + nBytes = int(txn_req_headers_dict['Content-Length']) + body = createDummyBodywithLength(nBytes) + #print("request session is",id(request_session)) + if method == 'GET': + response = request_session.get('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, stream=False, allow_redirects=False, data=body) + if 'Content-Length' in response.headers: + content = response.raw + #print("len: {0} received {1}".format(response.headers['Content-Length'],content)) + + elif method == 'POST': + response = request_session.post('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, stream=False, data=body, allow_redirects=False) + + if 'Content-Length' in response.headers: + content = response.raw + # print("reading==========>>>>>>>>>>>>>.") + # print(content.data) + #print("len: {0} received {1}".format(response.headers['Content-Length'],content)) + elif method == 'HEAD': + response = request_session.head('http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, stream=False) + + #gzip_file = gzip.GzipFile(fileobj=content) + #shutil.copyfileobj(gzip_file, f) + expected = extractHeader.responseHeader_to_dict(resp.getHeaders()) + # print(expected) + if mainProcess.verbose: + expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2) + expected_output = (int(expected_output_split[1]), str(expected_output_split[2])) + r = result.Result(session_filename, expected_output[0], response.status_code) + print(r.getResultString(response.headers, expected, colorize=True)) + r.Compare(response.headers, expected) + # result_queue.put(r) + except UnicodeEncodeError as e: + # these unicode errors are due to the interaction between Requests and our wiretrace data. + # TODO fix + print("UnicodeEncodeError exception") + + except requests.exceptions.ContentDecodingError as e: + print("ContentDecodingError", e) + except: + e = sys.exc_info() + print("ERROR in requests: ", e, response, session_filename) + + +def session_replay(input, proxy, result_queue): + global bSTOP + ''' Replay all transactions in session + + This entire session will be replayed in one requests.Session (so one socket / TCP connection)''' + # if timing_control: + # time.sleep(float(session._timestamp)) # allow other threads to run + while bSTOP == False: + for session in iter(input.get, 'STOP'): + # print(bSTOP) + if session == 'STOP': + print("Queue is empty") + bSTOP = True + break + with requests.Session() as request_session: + request_session.proxies = proxy + for txn in session.getTransactionIter(): + try: + txn_replay(session._filename, txn, proxy, result_queue, request_session) + except: + e = sys.exc_info() + print("ERROR in replaying: ", e, txn.getRequest().getHeaders()) + bSTOP = True + print("Queue is empty") + input.put('STOP') + break + + +def client_replay(input, proxy, result_queue, nThread): + Threads = [] + for i in range(nThread): + t = Thread(target=session_replay, args=[input, proxy, result_queue]) + t.start() + Threads.append(t) + + for t1 in Threads: + t1.join() diff --git a/tests/tools/traffic-replay/SSLReplay.py b/tests/tools/traffic-replay/SSLReplay.py new file mode 100644 index 00000000000..a37924bc066 --- /dev/null +++ b/tests/tools/traffic-replay/SSLReplay.py @@ -0,0 +1,219 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import http.client +import socket +import ssl +import pprint +import gevent +import requests +import os +#import threading +import sys +from multiprocessing import current_process +import sessionvalidation.sessionvalidation as sv +import lib.result as result +import extractHeader +from gevent import monkey, sleep +from threading import Thread +import mainProcess +import json +import extractHeader +import time +import Config +bSTOP = False + + +class ProxyHTTPSConnection(http.client.HTTPSConnection): + "This class allows communication via SSL." + + default_port = http.client.HTTPS_PORT + + # XXX Should key_file and cert_file be deprecated in favour of context? + + def __init__(self, host, port=None, key_file=None, cert_file=None, + timeout=socket._GLOBAL_DEFAULT_TIMEOUT, + source_address=None, *, context=None, + check_hostname=None, server_name=None): + # http.client.HTTPSConnection.__init__(self) + super().__init__(host, port, key_file, cert_file, timeout, source_address, context=context, check_hostname=check_hostname) + ''' + self.key_file = key_file + self.cert_file = cert_file + if context is None: + context = ssl._create_default_https_context() + will_verify = context.verify_mode != ssl.CERT_NONE + if check_hostname is None: + check_hostname = context.check_hostname + if check_hostname and not will_verify: + raise ValueError("check_hostname needs a SSL context with " + "either CERT_OPTIONAL or CERT_REQUIRED") + if key_file or cert_file: + context.load_cert_chain(cert_file, key_file) + self._context = context + self._check_hostname = check_hostname + ''' + self.server_name = server_name + + def connect(self): + "Connect to a host on a given (SSL) port." + http.client.HTTPConnection.connect(self) + + if self._tunnel_host: + server_hostname = self._tunnel_host + else: + server_hostname = self.server_name + self.sock = self._context.wrap_socket(self.sock, + do_handshake_on_connect=True, + server_side=False, + server_hostname=server_hostname) + if not self._context.check_hostname and self._check_hostname: + try: + ssl.match_hostname(self.sock.getpeercert(), server_hostname) + except Exception: + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + raise + + +def txn_replay(session_filename, txn, proxy, result_queue, request_session): + """ Replays a single transaction + :param request_session: has to be a valid requests session""" + req = txn.getRequest() + resp = txn.getResponse() + responseDict = {} + # Construct HTTP request & fire it off + txn_req_headers = req.getHeaders() + txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers) + txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier + if 'body' in txn_req_headers_dict: + del txn_req_headers_dict['body'] + + #print("Replaying session") + try: + # response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers), + # 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + # headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me + method = extractHeader.extract_txn_req_method(txn_req_headers) + response = None + body = None + content = None + if 'Transfer-Encoding' in txn_req_headers_dict: + # deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header + # This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this + # BUT, this is not a problem if the data is not chunked encoded.. Strange, huh? + del txn_req_headers_dict['Host'] + if 'Content-Length' in txn_req_headers_dict: + #print("ewww !") + del txn_req_headers_dict['Content-Length'] + body = gen() + if 'Content-Length' in txn_req_headers_dict: + nBytes = int(txn_req_headers_dict['Content-Length']) + body = createDummyBodywithLength(nBytes) + #print("request session is",id(request_session)) + if method == 'GET': + request_session.request('GET', 'https://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, body=body) + r1 = request_session.getresponse() + responseHeaders = r1.getheaders() + responseContent = r1.read() + + elif method == 'POST': + request_session.request('POST', 'https://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, body=body) + r1 = request_session.getresponse() + responseHeaders = r1.getheaders() + responseContent = r1.read() + + elif method == 'HEAD': + request_session.request('HEAD', 'https://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + headers=txn_req_headers_dict, body=body) + r1 = request_session.getresponse() + responseHeaders = r1.getheaders() + responseContent = r1.read() + for key, value in responseHeaders: + responseDict[key.lower()] = value + expected = extractHeader.responseHeader_to_dict(resp.getHeaders()) + # print(responseDict) + if mainProcess.verbose: + expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2) + expected_output = (int(expected_output_split[1]), str(expected_output_split[2])) + r = result.Result(session_filename, expected_output[0], r1.status) + print(r.getResultString(responseDict, expected, colorize=True)) + r.Compare(responseDict, expected) + # result_queue.put(r) + except UnicodeEncodeError as e: + # these unicode errors are due to the interaction between Requests and our wiretrace data. + # TODO fix + print("UnicodeEncodeError exception") + + except requests.exceptions.ContentDecodingError as e: + print("ContentDecodingError", e) + except: + e = sys.exc_info() + print("ERROR in requests: ", e, response, session_filename) + + +def client_replay(input, proxy, result_queue, nThread): + Threads = [] + for i in range(nThread): + t = Thread(target=session_replay, args=[input, proxy, result_queue]) + t.start() + Threads.append(t) + + for t1 in Threads: + t1.join() + + +def session_replay(input, proxy, result_queue): + ''' Replay all transactions in session + + This entire session will be replayed in one requests.Session (so one socket / TCP connection)''' + # if timing_control: + # time.sleep(float(session._timestamp)) # allow other threads to run + global bSTOP + sslSocks = [] + while bSTOP == False: + for session in iter(input.get, 'STOP'): + txn = session.returnFirstTransaction() + req = txn.getRequest() + # Construct HTTP request & fire it off + txn_req_headers = req.getHeaders() + txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers) + sc = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23) + sc.load_cert_chain(Config.ca_certs, keyfile=Config.keyfile) + conn = ProxyHTTPSConnection(Config.proxy_host, Config.proxy_ssl_port, cert_file=Config.ca_certs, + key_file=Config.keyfile, context=sc, server_name=txn_req_headers_dict['Host']) + for txn in session.getTransactionIter(): + try: + # print(txn._uuid) + txn_replay(session._filename, txn, proxy, result_queue, conn) + except: + e = sys.exc_info() + print("ERROR in replaying: ", e, txn.getRequest().getHeaders()) + #sslSocket.bStop = False + + bSTOP = True + print("stopping now") + input.put('STOP') + break + + # time.sleep(0.5) + for sslSock in sslSocks: + sslSock.ssl_sock.close() diff --git a/tests/tools/traffic-replay/Scheduler.py b/tests/tools/traffic-replay/Scheduler.py new file mode 100644 index 00000000000..438fce6e70d --- /dev/null +++ b/tests/tools/traffic-replay/Scheduler.py @@ -0,0 +1,63 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import random +import json +from multiprocessing import Process, Queue, current_process +from progress.bar import Bar +import sessionvalidation.sessionvalidation as sv +import WorkerTask +import time + + +def LaunchWorkers(path, nProcess, proxy, replay_type, nThread): + ms1 = time.time() + s = sv.SessionValidator(path) + sessions = s.getSessionList() + sessions.sort(key=lambda session: session._timestamp) + Processes = [] + Qsize = 25000 # int (1.5 * len(sessions)/(nProcess)) + QList = [Queue(Qsize) for i in range(nProcess)] + print("Dropped {0} sessions for being malformed. Number of correct sessions {1}".format( + len(s.getBadSessionList()), len(sessions))) + print(range(nProcess)) + OutputQ = Queue() + #======================================== Pre-load queues + for session in sessions: + # if nProcess == 1: + # QList[0].put(session) + # else: + QList[random.randint(0, nProcess - 1)].put(session) + # if QList[0].qsize() > 10 : + # break + #=============================================== Launch Processes + print("size", QList[0].qsize()) + for i in range(nProcess): + QList[i].put('STOP') + for i in range(nProcess): + p = Process(target=WorkerTask.worker, args=[QList[i], OutputQ, proxy, replay_type, nThread]) + p.daemon = False + Processes.append(p) + p.start() + + for p in Processes: + p.join() + ms2 = time.time() + print("OK enough, it is time to exit, running time in seconds", (ms2 - ms1)) diff --git a/tests/tools/traffic-replay/WorkerTask.py b/tests/tools/traffic-replay/WorkerTask.py new file mode 100644 index 00000000000..6ff9d1fffcb --- /dev/null +++ b/tests/tools/traffic-replay/WorkerTask.py @@ -0,0 +1,46 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket +import requests +import os +#import threading +import sys +from multiprocessing import current_process +import sessionvalidation.sessionvalidation as sv +import lib.result as result +from progress.bar import Bar +import extractHeader +import RandomReplay +import SSLReplay +import h2Replay + + +def worker(input, output, proxy, replay_type, nThread): + #progress_bar = Bar(" Replaying sessions {0}".format(current_process().name), max=input.qsize()) + #print("playing {0}=>{1}:{2}".format(current_process().name,session._timestamp,proxy)) + if replay_type == 'random': + RandomReplay.client_replay(input, proxy, output, nThread) + elif replay_type == 'ssl': + SSLReplay.client_replay(input, proxy, output, nThread) + elif replay_type == 'h2': + h2Replay.client_replay(input, proxy, output, nThread) + # progress_bar.next() + # progress_bar.finish() + print("process{0} has exited".format(current_process().name)) diff --git a/tests/tools/traffic-replay/__main__.py b/tests/tools/traffic-replay/__main__.py new file mode 100644 index 00000000000..8bf3e13dab0 --- /dev/null +++ b/tests/tools/traffic-replay/__main__.py @@ -0,0 +1,35 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function +import mainProcess +import argparse + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("-type", action='store', dest='replay_type', help="Replay type: ssl/random/h2") + parser.add_argument("-log_dir", type=str, help="directory of JSON replay files") + parser.add_argument("-v", dest="verbose", help="verify response status code", action="store_true") + + args = parser.parse_args() + + # Let 'er loose + #main(args.log_dir, args.hostname, int(args.port), args.threads, args.timing, args.verbose) + mainProcess.main(args.log_dir, args.replay_type, args.verbose) diff --git a/tests/tools/traffic-replay/extractHeader.py b/tests/tools/traffic-replay/extractHeader.py new file mode 100644 index 00000000000..69fed043fd3 --- /dev/null +++ b/tests/tools/traffic-replay/extractHeader.py @@ -0,0 +1,70 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sessionvalidation + + +def extract_txn_req_method(headers): + ''' Extracts the HTTP request method from the header in a string format ''' + line = (headers.split('\r\n'))[0] + return (line.split(' '))[0] + + +def extract_host(headers): + ''' Returns the host header from the given headers ''' + lines = headers.split('\r\n') + for line in lines: + if 'Host:' in line: + return line.split(' ')[1] + return "notfound" + + +def responseHeader_to_dict(header): + headerFields = header.split('\r\n', 1)[1] + fields = headerFields.split('\r\n') + header = [x for x in fields if (x != u'')] + headers = {} + for line in header: + split_here = line.find(":") + headers[line[:split_here].lower()] = line[(split_here + 1):].strip() + + return headers + + +def header_to_dict(header): + ''' Convert a HTTP header in string format to a python dictionary + Returns a dictionary of header values + ''' + header = header.split('\r\n') + header = [x for x in header if (x != u'')] + headers = {} + for line in header: + if 'GET' in line or 'POST' in line or 'HEAD' in line: # ignore initial request line + continue + + split_here = line.find(":") + headers[line[:split_here]] = line[(split_here + 1):].strip() + + return headers + + +def extract_GET_path(headers): + ''' Extracts the HTTP request URL from the header in a string format ''' + line = (headers.split('\r\n'))[0] + return (line.split(' '))[1] diff --git a/tests/tools/traffic-replay/h2Replay.py b/tests/tools/traffic-replay/h2Replay.py new file mode 100644 index 00000000000..7850aa08a00 --- /dev/null +++ b/tests/tools/traffic-replay/h2Replay.py @@ -0,0 +1,338 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gevent +import os +from threading import Thread +import sys +from multiprocessing import current_process +import sessionvalidation.sessionvalidation as sv +import lib.result as result +import extractHeader +import mainProcess +import json +from hyper import HTTP20Connection +from hyper.tls import wrap_socket, H2_NPN_PROTOCOLS, H2C_PROTOCOL +from hyper.common.bufsocket import BufferedSocket +import hyper +import socket +import logging +import h2 +from h2.connection import H2Configuration +import threading +import Config + +log = logging.getLogger(__name__) +bSTOP = False +hyper.tls._context = hyper.tls.init_context() +hyper.tls._context.check_hostname = False +hyper.tls._context.verify_mode = hyper.compat.ssl.CERT_NONE + + +class _LockedObject(object): + """ + A wrapper class that hides a specific object behind a lock. + + The goal here is to provide a simple way to protect access to an object + that cannot safely be simultaneously accessed from multiple threads. The + intended use of this class is simple: take hold of it with a context + manager, which returns the protected object. + """ + + def __init__(self, obj): + self.lock = threading.RLock() + self._obj = obj + + def __enter__(self): + self.lock.acquire() + return self._obj + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + self.lock.release() + + +class h2ATS(HTTP20Connection): + + def __init_state(self): + """ + Initializes the 'mutable state' portions of the HTTP/2 connection + object. + + This method exists to enable HTTP20Connection objects to be reused if + they're closed, by resetting the connection object to its basic state + whenever it ends up closed. Any situation that needs to recreate the + connection can call this method and it will be done. + + This is one of the only methods in hyper that is truly private, as + users should be strongly discouraged from messing about with connection + objects themselves. + """ + + config1 = H2Configuration( + client_side=True, + header_encoding='utf-8', + validate_outbound_headers=False, + validate_inbound_headers=False, + + ) + self._conn = _LockedObject(h2.connection.H2Connection(config=config1)) + + # Streams are stored in a dictionary keyed off their stream IDs. We + # also save the most recent one for easy access without having to walk + # the dictionary. + # + # We add a set of all streams that we or the remote party forcefully + # closed with RST_STREAM, to avoid encountering issues where frames + # were already in flight before the RST was processed. + # + # Finally, we add a set of streams that recently received data. When + # using multiple threads, this avoids reading on threads that have just + # acquired the I/O lock whose streams have already had their data read + # for them by prior threads. + self.streams = {} + self.recent_stream = None + self.next_stream_id = 1 + self.reset_streams = set() + self.recent_recv_streams = set() + + # The socket used to send data. + self._sock = None + + # Instantiate a window manager. + #self.window_manager = self.__wm_class(65535) + + return + + def __init__(self, host, **kwargs): + HTTP20Connection.__init__(self, host, **kwargs) + self.__init_state() + + def connect(self): + """ + Connect to the server specified when the object was created. This is a + no-op if we're already connected. + + Concurrency + ----------- + + This method is thread-safe. It may be called from multiple threads, and + is a noop for all threads apart from the first. + + :returns: Nothing. + + """ + #print("connecting to ATS") + with self._lock: + if self._sock is not None: + return + sni = self.host + if not self.proxy_host: + host = self.host + port = self.port + else: + host = self.proxy_host + port = self.proxy_port + + sock = socket.create_connection((host, port)) + + if self.secure: + #assert not self.proxy_host, "Proxy with HTTPS not supported." + sock, proto = wrap_socket(sock, sni, self.ssl_context, + force_proto=self.force_proto) + else: + proto = H2C_PROTOCOL + + log.debug("Selected NPN protocol: %s", proto) + assert proto in H2_NPN_PROTOCOLS or proto == H2C_PROTOCOL + + self._sock = BufferedSocket(sock, self.network_buffer_size) + + self._send_preamble() + + +def createDummyBodywithLength(numberOfbytes): + if numberOfbytes == 0: + return None + body = 'a' + while numberOfbytes != 1: + body += 'b' + numberOfbytes -= 1 + return body + + +def handleResponse(response, *args, **kwargs): + print(response.status_code) + # resp=args[0] + #expected_output_split = resp.getHeaders().split('\r\n')[ 0].split(' ', 2) + #expected_output = (int(expected_output_split[1]), str( expected_output_split[2])) + #r = result.Result(session_filename, expected_output[0], response.status_code) + # print(r.getResultString(colorize=True)) +# make sure len of the message body is greater than length + + +def gen(): + yield 'pforpersia,champaignurbana'.encode('utf-8') + yield 'there'.encode('utf-8') + + +def txn_replay(session_filename, txn, proxy, result_queue, h2conn, request_IDs): + """ Replays a single transaction + :param request_session: has to be a valid requests session""" + req = txn.getRequest() + resp = txn.getResponse() + # Construct HTTP request & fire it off + txn_req_headers = req.getHeaders() + txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers) + txn_req_headers_dict['Content-MD5'] = txn._uuid # used as unique identifier + if 'body' in txn_req_headers_dict: + del txn_req_headers_dict['body'] + responseID = -1 + #print("Replaying session") + try: + # response = request_session.request(extractHeader.extract_txn_req_method(txn_req_headers), + # 'http://' + extractHeader.extract_host(txn_req_headers) + extractHeader.extract_GET_path(txn_req_headers), + # headers=txn_req_headers_dict,stream=False) # making stream=False raises contentdecoding exception? kill me + method = extractHeader.extract_txn_req_method(txn_req_headers) + response = None + mbody = None + #txn_req_headers_dict['Host'] = "localhost" + if 'Transfer-Encoding' in txn_req_headers_dict: + # deleting the host key, since the STUPID post/get functions are going to add host field anyway, so there will be multiple host fields in the header + # This confuses the ATS and it returns 400 "Invalid HTTP request". I don't believe this + # BUT, this is not a problem if the data is not chunked encoded.. Strange, huh? + #del txn_req_headers_dict['Host'] + if 'Content-Length' in txn_req_headers_dict: + #print("ewww !") + del txn_req_headers_dict['Content-Length'] + mbody = gen() + if 'Content-Length' in txn_req_headers_dict: + nBytes = int(txn_req_headers_dict['Content-Length']) + mbody = createDummyBodywithLength(nBytes) + if 'Connection' in txn_req_headers_dict: + del txn_req_headers_dict['Connection'] + #str2 = extractHeader.extract_host(txn_req_headers)+ extractHeader.extract_GET_path(txn_req_headers) + # print(str2) + if method == 'GET': + responseID = h2conn.request('GET', url=extractHeader.extract_GET_path( + txn_req_headers), headers=txn_req_headers_dict, body=mbody) + #print("get response", responseID) + return responseID + # request_IDs.append(responseID) + #response = h2conn.get_response(id) + # print(response.headers) + # if 'Content-Length' in response.headers: + # content = response.read() + #print("len: {0} received {1}".format(response.headers['Content-Length'],content)) + + elif method == 'POST': + responseID = h2conn.request('POST', url=extractHeader.extract_GET_path( + txn_req_headers), headers=txn_req_headers_dict, body=mbody) + print("get response", responseID) + return responseID + + elif method == 'HEAD': + responseID = h2conn.request('HEAD', url=extractHeader.extract_GET_path(txn_req_headers), headers=txn_req_headers_dict) + print("get response", responseID) + return responseID + + # print(response.headers) + #print("logged respose") + expected = extractHeader.responseHeader_to_dict(resp.getHeaders()) + # print(expected) + if mainProcess.verbose: + expected_output_split = resp.getHeaders().split('\r\n')[0].split(' ', 2) + expected_output = (int(expected_output_split[1]), str(expected_output_split[2])) + r = result.Result(session_filename, expected_output[0], response.status_code) + print(r.getResultString(response.headers, expected, colorize=True)) + + # return responseID + + except UnicodeEncodeError as e: + # these unicode errors are due to the interaction between Requests and our wiretrace data. + # TODO fix + print("UnicodeEncodeError exception") + + except: + e = sys.exc_info() + print("ERROR in requests: ", e, response, session_filename) + + +def session_replay(input, proxy, result_queue): + global bSTOP + ''' Replay all transactions in session + + This entire session will be replayed in one requests.Session (so one socket / TCP connection)''' + # if timing_control: + # time.sleep(float(session._timestamp)) # allow other threads to run + while bSTOP == False: + for session in iter(input.get, 'STOP'): + print(bSTOP) + if session == 'STOP': + print("Queue is empty") + bSTOP = True + break + txn = session.returnFirstTransaction() + req = txn.getRequest() + # Construct HTTP request & fire it off + txn_req_headers = req.getHeaders() + txn_req_headers_dict = extractHeader.header_to_dict(txn_req_headers) + with h2ATS(txn_req_headers_dict['Host'], secure=True, proxy_host=Config.proxy_host, proxy_port=Config.proxy_ssl_port) as h2conn: + request_IDs = [] + respList = [] + for txn in session.getTransactionIter(): + try: + ret = txn_replay(session._filename, txn, proxy, result_queue, h2conn, request_IDs) + respList.append(txn.getResponse()) + request_IDs.append(ret) + #print("txn return value is ",ret) + except: + e = sys.exc_info() + print("ERROR in replaying: ", e, txn.getRequest().getHeaders()) + for id in request_IDs: + expectedH = respList.pop(0) + # print("extracting",id) + response = h2conn.get_response(id) + #print("code {0}:{1}".format(response.status,response.headers)) + response_dict = {} + if mainProcess.verbose: + for field, value in response.headers.items(): + response_dict[field.decode('utf-8')] = value.decode('utf-8') + + expected_output_split = expectedH.getHeaders().split('\r\n')[0].split(' ', 2) + expected_output = (int(expected_output_split[1]), str(expected_output_split[2])) + r = result.Result("", expected_output[0], response.status) + expected_Dict = extractHeader.responseHeader_to_dict(expectedH.getHeaders()) + print(r.getResultString(response_dict, expected_Dict, colorize=True)) + # r.Compare(response_dict,expected_Dict) + + bSTOP = True + print("Queue is empty") + input.put('STOP') + break + + +def client_replay(input, proxy, result_queue, nThread): + Threads = [] + for i in range(nThread): + t = Thread(target=session_replay, args=[input, proxy, result_queue]) + t.start() + Threads.append(t) + + for t1 in Threads: + t1.join() diff --git a/tests/tools/traffic-replay/mainProcess.py b/tests/tools/traffic-replay/mainProcess.py new file mode 100644 index 00000000000..0c392300a07 --- /dev/null +++ b/tests/tools/traffic-replay/mainProcess.py @@ -0,0 +1,63 @@ +#!/bin/env python3 +''' +''' +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys +import json +import socket +import os +import threading +import time +import argparse +import subprocess +import shlex +from multiprocessing import Pool, Process +from collections import deque +from progress.bar import Bar +import sessionvalidation.sessionvalidation as sv +import lib.result as result +import WorkerTask +import Scheduler +import Config +verbose = False + + +def check_for_ats(hostname, port): + ''' Checks to see if ATS is running on `hostname` and `port` + If not running, this function will terminate the script + ''' + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + result = sock.connect_ex((hostname, port)) + if result != 0: + # hostname:port is not being listened to + print('==========') + print('Error: Apache Traffic Server is not running on {0}:{1}'.format(hostname, port)) + print('Aborting') + print('==========') + sys.exit() +# Note: this function can't handle multi-line (ie wrapped line) headers +# Hopefully this isn't an issue because multi-line headers are deprecated now + + +def main(path, replay_type, Bverbose): + global verbose + verbose = Bverbose + check_for_ats(Config.proxy_host, Config.proxy_nonssl_port) + proxy = {"http": "http://{0}:{1}".format(Config.proxy_host, Config.proxy_nonssl_port)} + Scheduler.LaunchWorkers(path, Config.nProcess, proxy, replay_type, Config.nThread) diff --git a/tools/changelog.pl b/tools/changelog.pl index 8ac7fc98d89..b151a6964d7 100755 --- a/tools/changelog.pl +++ b/tools/changelog.pl @@ -22,36 +22,64 @@ use WWW::Curl::Easy; use JSON; -my $fixversion = shift; -my $url = "https://issues.apache.org/jira"; -my $jql = "project = TS AND status in (Resolved, Closed) AND fixVersion = $fixversion ORDER BY key ASC"; +my $owner = shift; +my $repo = shift; +my $milestone = shift; +my $url = "https://api.github.com"; -sub jira_search +sub milestone_lookup { my $url = shift; - my $jql = shift; - my $index = shift; - my $endpoint = "/rest/api/2/search"; - - my $query = { - jql => $jql, - startAt => $index, - fields => [ - "summary", - "issuetype" - ] - }; - - my $req_body = to_json($query); + my $owner = shift; + my $repo = shift; + my $milestone_title = shift; + my $endpoint = "/repos/$owner/$repo/milestones"; + + my $params = "state=all"; + my $resp_body; my $curl = WWW::Curl::Easy->new; - $curl->setopt(CURLOPT_POST, 1); - $curl->setopt(CURLOPT_POSTFIELDS, $req_body); - $curl->setopt(CURLOPT_HTTPHEADER, ['Content-Type: application/json']); - open(my $fileb, ">", \$resp_body); - $curl->setopt(CURLOPT_WRITEDATA, $fileb); - $curl->setopt(CURLOPT_URL, $url . $endpoint); + #$curl->setopt(CURLOPT_VERBOSE, 1); + $curl->setopt(CURLOPT_HTTPHEADER, ['Accept: application/vnd.github.v3+json', 'User-Agent: Awesome-Octocat-App']); + $curl->setopt(CURLOPT_WRITEDATA, \$resp_body); + $curl->setopt(CURLOPT_URL, $url . $endpoint . '?' . $params); + + my $retcode = $curl->perform(); + if ($retcode == 0 && $curl->getinfo(CURLINFO_HTTP_CODE) == 200) + { + my $milestones = from_json($resp_body); + foreach my $milestone (@{ $milestones }) + { + if ($milestone->{title} eq $milestone_title) + { + return $milestone->{number}; + } + } + } + + return undef; +} + +sub issue_search +{ + my $url = shift; + my $owner = shift; + my $repo = shift; + my $milestone_id = shift; + my $page = shift; + my $endpoint = "/repos/$owner/$repo/issues"; + + my $params = "milestone=$milestone_id&state=closed&page=$page"; + + my $resp_body; + my $curl = WWW::Curl::Easy->new; + + #$curl->setopt(CURLOPT_VERBOSE, 1); + $curl->setopt(CURLOPT_HTTPHEADER, ['Accept: application/vnd.github.v3+json', 'User-Agent: Awesome-Octocat-App']); + $curl->setopt(CURLOPT_WRITEDATA, \$resp_body); + $curl->setopt(CURLOPT_URL, $url . $endpoint . '?' . $params); + my $retcode = $curl->perform(); if ($retcode == 0 && $curl->getinfo(CURLINFO_HTTP_CODE) == 200) { return from_json($resp_body); @@ -60,49 +88,35 @@ sub jira_search undef; } -my $count = 0; -my $changelog; -my $issues; +my $milestone_id = milestone_lookup($url, $owner, $repo, $milestone); -do +if (!defined($milestone_id)) { - $issues = jira_search($url, $jql, $count); + exit 1; +} - if (!defined($issues)) - { - exit 1; - } +my $issues; +my $changelog; +my $page = 1; - foreach my $issue (@{ $issues->{issues} }) +do { + $issues = issue_search($url, $owner, $repo, $milestone_id, $page); + foreach my $issue (@{ $issues }) { if (defined($issue)) { - push @{ $changelog->{$issue->{fields}{issuetype}{name}} }, {key => $issue->{key}, summary => $issue->{fields}{summary}}; - $count++; + push @{ $changelog }, {number => $issue->{number}, title => $issue->{title}}; } } -} -while ($count < $issues->{total}); + $page++; +} while (scalar @{ $issues }); -if (!defined($changelog)) +if (defined($changelog)) { - exit 1; -} - -print "Changes with Apache Traffic Server $fixversion\n"; + print "Changes with Apache Traffic Server $milestone\n"; -foreach my $key (sort keys %{ $changelog }) -{ - print "\n$key:\n"; - foreach my $issue (@{ $changelog->{$key} }) + foreach my $issue (sort {$a->{number} <=> $b->{number}} @{ $changelog }) { - chomp $issue->{summary}; - $issue->{summary} =~ s/\s+$//; # Trim trailing whitespace - print " *) [$issue->{key}] "; - if (length($issue->{summary}) <= (131 - 15)) { - print "$issue->{summary}\n"; - } else { - print substr($issue->{summary}, 0, (131 - 18)), "...\n"; - } + print " #$issue->{number} - $issue->{title}\n"; } } diff --git a/tools/clang-format.sh b/tools/clang-format.sh index 293965bedbb..5eba5adff7e 100755 --- a/tools/clang-format.sh +++ b/tools/clang-format.sh @@ -20,12 +20,13 @@ set -e # exit on error +PKGDATE="20160415" DIR=${1:-.} -ROOT=${ROOT:-$(cd $(dirname $0) && git rev-parse --show-toplevel)/.git/fmt} -PACKAGE="clang-format-20160415.tar.bz2" +ROOT=${ROOT:-$(cd $(dirname $0) && git rev-parse --show-toplevel)/.git/fmt/${PKGDATE}} +PACKAGE="clang-format-${PKGDATE}.tar.bz2" VERSION="clang-format version 3.9.0 (trunk 265913)" -URL=${URL:-https://bintray.com/artifact/download/apache/trafficserver/${PACKAGE}} +URL=${URL:-https://ci.trafficserver.apache.org/bintray/${PACKAGE}} TAR=${TAR:-tar} CURL=${CURL:-curl} diff --git a/tools/jtest/jtest.cc b/tools/jtest/jtest.cc index ef9bdd08cc5..774a6ddff9b 100644 --- a/tools/jtest/jtest.cc +++ b/tools/jtest/jtest.cc @@ -41,9 +41,9 @@ #include #include #include -#include #include #include +#include #include