Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions doc/admin-guide/monitoring/statistics/core/cache.en.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,6 @@
Cache
*****

.. ts:stat:: global proxy.node.cache.contents.num_docs integer
:ungathered:

Represents the number of documents currently residing in the cache.

.. ts:stat:: global proxy.process.cache_total_hits counter

Represents the total number of cache lookups which have been satisfied by
Expand Down
26 changes: 5 additions & 21 deletions doc/admin-guide/monitoring/statistics/core/general.en.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,35 +26,19 @@ General

Human-readable version number string for the currently running |TS| instance.

.. ts:stat:: global proxy.node.config.reconfigure_required integer
.. ts:stat:: global proxy.process.proxy.reconfigure_required integer
:type: flag

.. ts:stat:: global proxy.node.config.reconfigure_time integer
.. ts:stat:: global proxy.process.proxy.reconfigure_time integer

.. ts:stat:: global proxy.node.config.restart_required.proxy integer
.. ts:stat:: global proxy.process.proxy.restart_required integer
:type: flag

.. ts:stat:: global proxy.node.hostname_FQ string ats-host.example.com

Fully-qualified domain name for the host on which |TS| is running.

.. ts:stat:: global proxy.node.hostname string ats-host

The hostname only, without domain, for the host on which |TS| is running.

.. ts:stat:: global proxy.node.proxy_running integer
:type: flag

Indicates whether any form of HTTP proxying is currently enabled in the
running instance of |TS|.

.. ts:stat:: global proxy.node.restarts.proxy.cache_ready_time integer
.. ts:stat:: global proxy.process.proxy.cache_ready_time integer
:type: gauge
:units: seconds

.. ts:stat:: global proxy.node.restarts.proxy.restart_count integer
.. ts:stat:: global proxy.node.restarts.proxy.start_time integer
.. ts:stat:: global proxy.node.restarts.proxy.stop_time integer
.. ts:stat:: global proxy.process.proxy.start_time integer
.. ts:stat:: global proxy.process.user_agent_total_bytes integer
.. ts:stat:: global proxy.process.http.tunnels integer
.. ts:stat:: global proxy.process.update.fails integer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,6 @@ Hierarchical Cache
:type: counter
:units: bytes

.. ts:stat:: global proxy.node.http.parent_proxy_total_response_bytes integer
:type: counter
:units: bytes

.. ts:stat:: global proxy.process.http.current_parent_proxy_connections integer
:type: counter

Expand All @@ -47,4 +43,3 @@ Hierarchical Cache

.. ts:stat:: global proxy.process.http.total_parent_proxy_connections integer
:type: counter

2 changes: 1 addition & 1 deletion doc/appendices/command-line/traffic_ctl.en.rst
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ Display the current value of a configuration record.
modified since the previous configuration load, this command is a no-op.

The timestamp of the last reconfiguration event (in seconds since epoch) is published in the
`proxy.node.config.reconfigure_time` metric.
`proxy.process.proxy.reconfigure_time` metric.

.. program:: traffic_ctl config
.. option:: set RECORD VALUE
Expand Down
2 changes: 1 addition & 1 deletion doc/developer-guide/jsonrpc/jsonrpc-api.en.rst
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ The response will contain the default `success_response` or a proper rpc error,

Validation:

You can request for the record `proxy.node.config.reconfigure_time` which will be updated with the time of the requested update.
You can request for the record `proxy.process.proxy.reconfigure_time` which will be updated with the time of the requested update.


.. _jsonrpc-api-management-metrics:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,6 @@ msgid ""
"cache hits, over the previous 10 seconds,"
msgstr ""

#: ../../admin-guide/monitoring/statistics/core/bandwidth.en.rst:54
msgid ""
"The difference of :ts:stat:`proxy.process.user_agent_total_bytes` and :ts:stat:"
"`proxy.process.origin_server_total_bytes`, divided by :ts:stat:`proxy.node."
"user_agent_total_bytes`."
msgstr ""

#: ../../admin-guide/monitoring/statistics/core/bandwidth.en.rst:58
msgid ""
"Represents the ratio of bytes served to user agents which were satisfied by "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ msgstr ""
#: ../../../appendices/command-line/traffic_ctl.en.rst:151
msgid ""
"The timestamp of the last reconfiguration event (in seconds since epoch) is "
"published in the `proxy.node.config.reconfigure_time` metric."
"published in the `proxy.process.proxy.reconfigure_time` metric."
msgstr ""

#: ../../../appendices/command-line/traffic_ctl.en.rst:158
Expand Down
2 changes: 1 addition & 1 deletion include/api/Metrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class Metrics
{
_blobs[0] = new MetricStorage();
ink_release_assert(_blobs[0]);
ink_release_assert(0 == newMetric("proxy.node.api.metrics.bad_id")); // Reserve slot 0 for errors, this should always be 0
ink_release_assert(0 == newMetric("proxy.process.api.metrics.bad_id")); // Reserve slot 0 for errors, this should always be 0
}

// Singleton
Expand Down
2 changes: 1 addition & 1 deletion include/tscore/TSSystemState.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class TSSystemState
return unlikely(_instance().event_system_shut_down);
}

// Keeps track if the server is in draining state, follows the proxy.node.config.draining metric.
// Keeps track if the server is in draining state, follows the proxy.process.proxy.draining metric.
//
static bool
is_draining()
Expand Down
11 changes: 7 additions & 4 deletions mgmt/rpc/handlers/config/Configuration.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "config/FileManager.h"

#include "rpc/handlers/common/RecordsUtils.h"
#include "api/Metrics.h"

namespace utils = rpc::handlers::records::utils;

Expand Down Expand Up @@ -183,6 +184,9 @@ set_config_records(std::string_view const &id, YAML::Node const &params)
ts::Rv<YAML::Node>
reload_config(std::string_view const &id, YAML::Node const &params)
{
ts::Metrics &intm = ts::Metrics::getInstance();
static auto reconf_time = intm.lookup("proxy.process.proxy.reconfigure_time");
static auto reconf_req = intm.lookup("proxy.process.proxy.reconfigure_required");
ts::Rv<YAML::Node> resp;
Debug("RPC", "invoke plugin callbacks");
// if there is any error, report it back.
Expand All @@ -191,10 +195,9 @@ reload_config(std::string_view const &id, YAML::Node const &params)
}
// If any callback was register(TSMgmtUpdateRegister) for config notifications, then it will be eventually notify.
FileManager::instance().invokeConfigPluginCallbacks();
// save config time.
RecSetRecordInt("proxy.node.config.reconfigure_time", time(nullptr), REC_SOURCE_DEFAULT);
// TODO: we may not need this any more
RecSetRecordInt("proxy.node.config.reconfigure_required", 0, REC_SOURCE_DEFAULT);

intm[reconf_time] = time(nullptr);
intm[reconf_req] = 0;

return resp;
}
Expand Down
17 changes: 10 additions & 7 deletions mgmt/rpc/handlers/server/Server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,17 +63,20 @@ namespace err = rpc::handlers::errors;
static bool
is_server_draining()
{
RecInt draining = 0;
if (RecGetRecordInt("proxy.node.config.draining", &draining) != REC_ERR_OKAY) {
return false;
}
return draining != 0;
ts::Metrics &intm = ts::Metrics::getInstance();
static auto drain_id = intm.lookup("proxy.process.proxy.draining");

return (intm[drain_id] != 0);
}

static void inline set_server_drain(bool drain)
static void
set_server_drain(bool drain)
{
ts::Metrics &intm = ts::Metrics::getInstance();
static auto drain_id = intm.lookup("proxy.process.proxy.draining");

TSSystemState::drain(drain);
RecSetRecordInt("proxy.node.config.draining", TSSystemState::is_draining() ? 1 : 0, REC_SOURCE_DEFAULT);
intm[drain_id] = TSSystemState::is_draining() ? 1 : 0;
}

ts::Rv<YAML::Node>
Expand Down
4 changes: 2 additions & 2 deletions plugins/lua/ts_lua_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ ts_lua_script_registered(lua_State *L, char *script)
// first check the reconfigure_time for the script. if it is not found, then it is new
// if it matches the current reconfigure_time, then it is loaded already
// And we return the conf pointer of it. Otherwise it can be loaded again.
if (TS_SUCCESS == TSMgmtIntGet("proxy.node.config.reconfigure_time", &curr_time)) {
if (TS_SUCCESS == TSMgmtIntGet("proxy.process.proxy.reconfigure_time", &curr_time)) {
lua_pushliteral(L, "__scriptTime");
lua_pushstring(L, script);
lua_concat(L, 2);
Expand Down Expand Up @@ -260,7 +260,7 @@ ts_lua_script_register(lua_State *L, char *script, ts_lua_instance_conf *conf)
TSDebug(TS_LUA_DEBUG_TAG, "[%s] registering script [%s]", __FUNCTION__, script);

// we recorded the script reconfigure_time and its conf pointer in registry
if (TS_SUCCESS == TSMgmtIntGet("proxy.node.config.reconfigure_time", &time)) {
if (TS_SUCCESS == TSMgmtIntGet("proxy.process.proxy.reconfigure_time", &time)) {
lua_pushliteral(L, "__scriptTime");
lua_pushstring(L, script);
lua_concat(L, 2);
Expand Down
2 changes: 1 addition & 1 deletion src/api/test_Metrics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ TEST_CASE("Metrics", "[libtsapi][Metrics]")
{
auto [name, value] = *m.begin();
REQUIRE(value == 0);
REQUIRE(name == "proxy.node.api.metrics.bad_id");
REQUIRE(name == "proxy.process.api.metrics.bad_id");

REQUIRE(m.begin() != m.end());
REQUIRE(++m.begin() == m.end());
Expand Down
25 changes: 0 additions & 25 deletions src/records/RecordsConfig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1240,31 +1240,6 @@ static const RecordElement RecordsConfig[] =
{RECT_CONFIG, "proxy.config.plugin.load_elevated", RECD_INT, "0", RECU_RESTART_TS, RR_NULL, RECC_INT, "[0-1]", RECA_READ_ONLY}
,

//##############################################################################
//#
//# Local Manager Specific Records File
//#
//# <RECORD-TYPE> <NAME> <TYPE> <VALUE (till end of line)>
//#
//# *NOTE*: All NODE Records must be placed continuously!
//#
//# Add NODE Records Here
//##############################################################################
{RECT_NODE, "proxy.node.hostname_FQ", RECD_STRING, nullptr, RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_NODE, "proxy.node.hostname", RECD_STRING, nullptr, RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
//#
//# Restart Stats
//#
{RECT_NODE, "proxy.node.restarts.proxy.start_time", RECD_INT, "0", RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_NODE, "proxy.node.restarts.proxy.cache_ready_time", RECD_INT, "0", RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_NODE, "proxy.node.restarts.proxy.stop_time", RECD_INT, "0", RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
{RECT_NODE, "proxy.node.restarts.proxy.restart_count", RECD_INT, "0", RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL}
,
//#
//# SSL parent proxying info
//#
Expand Down
8 changes: 4 additions & 4 deletions src/traffic_ctl/CtrlPrinters.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,13 +215,13 @@ ConfigStatusPrinter::write_output(YAML::Node const &result)
recordName = recordInfo.name;
if (recordName == "proxy.process.version.server.long") {
std::cout << "Version: " << recordInfo.currentValue << "\n";
} else if (recordName == "proxy.node.restarts.proxy.start_time") {
} else if (recordName == "proxy.process.proxy.start_time") {
std::cout << swoc::bwprint(text, "{}: {}\n", "Started at", FloatDate(recordInfo.currentValue, "%a %d %b %Y %H:%M:%S"));
} else if (recordName == "proxy.node.config.reconfigure_time") {
} else if (recordName == "proxy.process.proxy.reconfigure_time") {
std::cout << swoc::bwprint(text, "{}: {}\n", "Reconfigured at", FloatDate(recordInfo.currentValue, "%a %d %b %Y %H:%M:%S"));
} else if (recordName == "proxy.node.config.reconfigure_required") {
} else if (recordName == "proxy.process.proxy.reconfigure_required") {
std::cout << "Reconfigure required: " << ((recordInfo.currentValue == "1") ? "yes" : "no") << "\n";
} else if (recordName == "proxy.node.config.restart_required.proxy") {
} else if (recordName == "proxy.process.proxy.restart_required") {
std::cout << "Restart required: " << ((recordInfo.currentValue == "1") ? "yes" : "no") << "\n";
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/traffic_ctl/jsonrpc/CtrlRPCRequests.h
Original file line number Diff line number Diff line change
Expand Up @@ -244,8 +244,8 @@ struct ConfigStatusRequest : shared::rpc::RecordLookupRequest {
ConfigStatusRequest() : super()
{
static const std::array<std::string, 5> statusFieldsNames = {
"proxy.process.version.server.long", "proxy.node.restarts.proxy.start_time", "proxy.node.config.reconfigure_time",
"proxy.node.config.reconfigure_required", "proxy.node.config.restart_required.proxy"};
"proxy.process.version.server.long", "proxy.process.proxy.start_time", "proxy.process.proxy.reconfigure_time",
"proxy.process.proxy.reconfigure_required", "proxy.process.proxy.restart_required"};
for (auto &&recordName : statusFieldsNames) {
super::emplace_rec(recordName, shared::rpc::NOT_REGEX, shared::rpc::METRIC_REC_TYPES);
}
Expand Down
37 changes: 26 additions & 11 deletions src/traffic_server/traffic_server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,9 @@ class SignalContinuation : public Continuation
int
periodic(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
{
ts::Metrics &intm = ts::Metrics::getInstance();
static auto drain_id = intm.lookup("proxy.process.proxy.draining");

if (signal_received[SIGUSR1]) {
signal_received[SIGUSR1] = false;

Expand Down Expand Up @@ -306,7 +309,7 @@ class SignalContinuation : public Continuation

RecInt timeout = 0;
if (RecGetRecordInt("proxy.config.stop.shutdown_timeout", &timeout) == REC_ERR_OKAY && timeout) {
RecSetRecordInt("proxy.node.config.draining", 1, REC_SOURCE_DEFAULT);
intm[drain_id] = 1;
TSSystemState::drain(true);
// Close listening sockets here only if TS is running standalone
RecInt close_sockets = 0;
Expand Down Expand Up @@ -424,9 +427,11 @@ class MemoryLimit : public Continuation
public:
MemoryLimit() : Continuation(new_ProxyMutex())
{
ts::Metrics &intm = ts::Metrics::getInstance();

memset(&_usage, 0, sizeof(_usage));
SET_HANDLER(&MemoryLimit::periodic);
RecRegisterStatInt(RECT_PROCESS, "proxy.process.traffic_server.memory.rss", static_cast<RecInt>(0), RECP_NON_PERSISTENT);
memory_rss = intm.newMetricPtr("proxy.process.traffic_server.memory.rss");
}

~MemoryLimit() override { mutex = nullptr; }
Expand All @@ -446,7 +451,7 @@ class MemoryLimit : public Continuation
_memory_limit = _memory_limit >> 10; // divide by 1024

if (getrusage(RUSAGE_SELF, &_usage) == 0) {
RecSetRecordInt("proxy.process.traffic_server.memory.rss", _usage.ru_maxrss << 10, REC_SOURCE_DEFAULT); // * 1024
ts::Metrics::write(memory_rss, _usage.ru_maxrss << 10); // * 1024
Debug("server", "memory usage - ru_maxrss: %ld memory limit: %" PRId64, _usage.ru_maxrss, _memory_limit);
if (_memory_limit > 0) {
if (_usage.ru_maxrss > _memory_limit) {
Expand Down Expand Up @@ -474,6 +479,7 @@ class MemoryLimit : public Continuation
private:
int64_t _memory_limit = 0;
struct rusage _usage;
ts::Metrics::IntType *memory_rss;
};

/** Gate the emission of the "Traffic Server is fuly initialized" log message.
Expand Down Expand Up @@ -799,8 +805,10 @@ CB_After_Cache_Init()
emit_fully_initialized_message();
}

time_t cache_ready_at = time(nullptr);
RecSetRecordInt("proxy.node.restarts.proxy.cache_ready_time", cache_ready_at, REC_SOURCE_DEFAULT);
ts::Metrics &intm = ts::Metrics::getInstance();
auto id = intm.lookup("proxy.process.proxy.cache_ready_time");

intm[id].store(time(nullptr));

// Alert the plugins the cache is initialized.
hook = lifecycle_hooks->get(TS_LIFECYCLE_CACHE_READY_HOOK);
Expand Down Expand Up @@ -1836,12 +1844,19 @@ main(int /* argc ATS_UNUSED */, const char **argv)
syslog_log_configure();

// Register stats
RecRegisterStatInt(RECT_NODE, "proxy.node.config.reconfigure_time", time(nullptr), RECP_NON_PERSISTENT);
RecRegisterStatInt(RECT_NODE, "proxy.node.config.reconfigure_required", 0, RECP_NON_PERSISTENT);
RecRegisterStatInt(RECT_NODE, "proxy.node.config.restart_required.proxy", 0, RECP_NON_PERSISTENT);
RecRegisterStatInt(RECT_NODE, "proxy.node.config.draining", 0, RECP_NON_PERSISTENT);
RecRegisterStatInt(RECT_NODE, "proxy.node.proxy_running", 1, RECP_NON_PERSISTENT);
RecSetRecordInt("proxy.node.restarts.proxy.start_time", time(nullptr), REC_SOURCE_DEFAULT);
ts::Metrics &intm = ts::Metrics::getInstance();
int32_t id;

id = intm.newMetric("proxy.process.proxy.reconfigure_time");
intm[id] = time(nullptr);
id = intm.newMetric("proxy.process.proxy.start_time");
intm[id] = time(nullptr);
// These all gets initialied to 0
intm.newMetric("proxy.process.proxy.reconfigure_required");
intm.newMetric("proxy.process.proxy.restart_required");
intm.newMetric("proxy.process.proxy.draining");
// This gets updated later (in the callback)
intm.newMetric("proxy.process.proxy.cache_ready_time");

// init huge pages
int enabled;
Expand Down