From f06f2cbb540ff554b78add73337bead309558e94 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 5 Sep 2016 10:58:14 -0500 Subject: [PATCH 1/3] TS-4532: Static type checking for time. Rough first pass just for iocore/eventsystem to get a feel for the changes. Changed lib/ts, iocore/eventsystem to compile. Partial conversion in iocore/net. --- cmd/traffic_cop/traffic_cop.cc | 14 +- cmd/traffic_manager/metrics.cc | 12 +- iocore/aio/AIO.cc | 8 +- iocore/aio/P_AIO.h | 4 +- iocore/aio/test_AIO.cc | 2 +- iocore/cache/CacheDir.cc | 2 +- iocore/cache/P_CacheDir.h | 2 +- iocore/cache/P_CacheInternal.h | 4 +- iocore/cache/P_CacheTest.h | 2 +- iocore/cluster/ClusterCache.cc | 12 +- iocore/cluster/ClusterConfig.cc | 2 +- iocore/cluster/ClusterHandler.cc | 20 +- iocore/cluster/ClusterHandlerBase.cc | 2 +- iocore/cluster/ClusterLoadMonitor.cc | 16 +- iocore/cluster/P_ClusterCache.h | 18 +- iocore/cluster/P_ClusterCacheInternal.h | 2 +- iocore/cluster/P_ClusterHandler.h | 22 +- iocore/cluster/P_ClusterLoadMonitor.h | 12 +- iocore/cluster/P_TimeTrace.h | 2 +- iocore/dns/DNS.cc | 6 +- iocore/dns/P_DNSProcessor.h | 14 +- iocore/eventsystem/I_EThread.h | 12 +- iocore/eventsystem/I_Event.h | 16 +- iocore/eventsystem/I_EventProcessor.h | 12 +- iocore/eventsystem/I_Lock.h | 6 +- iocore/eventsystem/I_PriorityEventQueue.h | 22 +- iocore/eventsystem/I_ProtectedQueue.h | 2 +- iocore/eventsystem/I_Thread.h | 12 +- iocore/eventsystem/PQ-List.cc | 19 +- iocore/eventsystem/P_Freer.h | 8 +- iocore/eventsystem/P_UnixEThread.h | 30 +-- iocore/eventsystem/P_UnixEvent.h | 8 +- iocore/eventsystem/P_UnixEventProcessor.h | 22 +- iocore/eventsystem/ProtectedQueue.cc | 4 +- iocore/eventsystem/Thread.cc | 2 +- iocore/eventsystem/UnixEThread.cc | 32 +-- iocore/eventsystem/UnixEvent.cc | 22 +- iocore/eventsystem/UnixEventProcessor.cc | 2 +- iocore/hostdb/HostDB.cc | 4 +- iocore/hostdb/P_RefCountCacheSerializer.h | 8 +- iocore/net/I_NetVConnection.h | 8 +- iocore/net/I_UDPPacket.h | 8 +- iocore/net/OCSPStapling.cc | 6 +- iocore/net/P_NetAccept.h | 2 +- iocore/net/P_SSLNetVConnection.h | 4 +- iocore/net/P_UDPNet.h | 32 +-- iocore/net/P_UDPPacket.h | 14 +- iocore/net/P_UnixNet.h | 36 +-- iocore/net/P_UnixNetVConnection.h | 42 ++-- iocore/net/SSLConfig.cc | 2 +- iocore/net/SSLNetVConnection.cc | 6 +- iocore/net/UnixNet.cc | 71 +++--- iocore/net/UnixNetAccept.cc | 2 +- iocore/net/UnixNetPages.cc | 2 +- iocore/net/UnixNetProcessor.cc | 2 +- iocore/net/UnixNetVConnection.cc | 50 ++-- iocore/net/UnixUDPNet.cc | 22 +- lib/ts/Diags.cc | 5 +- lib/ts/EventNotify.cc | 2 +- lib/ts/ink_hrtime.cc | 56 +---- lib/ts/ink_hrtime.h | 255 +++++--------------- plugins/experimental/memcache/tsmemcache.cc | 26 +- plugins/experimental/memcache/tsmemcache.h | 2 +- proxy/ICP.cc | 2 +- proxy/ICP.h | 12 +- proxy/ICPConfig.cc | 2 +- proxy/ICPlog.h | 2 +- proxy/InkAPI.cc | 10 +- proxy/InkIOCoreAPI.cc | 2 +- proxy/Milestones.h | 16 +- proxy/PluginVC.cc | 12 +- proxy/PluginVC.h | 16 +- proxy/ProxyClientSession.h | 8 +- proxy/ProxyClientTransaction.h | 4 +- proxy/RegressionSM.cc | 2 +- proxy/RegressionSM.h | 2 +- proxy/TestClock.cc | 4 +- proxy/TestClusterHash.cc | 2 +- proxy/TestDNS.cc | 6 +- proxy/TimeTrace.h | 2 +- proxy/congest/Congestion.cc | 8 +- proxy/congest/Congestion.h | 22 +- proxy/congest/CongestionDB.cc | 6 +- proxy/http/Http1ClientSession.h | 4 +- proxy/http/Http1ClientTransaction.h | 4 +- proxy/http/HttpSM.cc | 20 +- proxy/http/HttpSM.h | 2 +- proxy/http/HttpTransact.cc | 14 +- proxy/http/HttpTransact.h | 10 +- proxy/http2/Http2Stream.cc | 6 +- proxy/http2/Http2Stream.h | 12 +- proxy/logging/Log.cc | 2 +- proxy/logging/LogAccessHttp.cc | 22 +- proxy/logging/LogAccessICP.cc | 4 +- tools/jtest/jtest.cc | 14 +- 95 files changed, 587 insertions(+), 743 deletions(-) diff --git a/cmd/traffic_cop/traffic_cop.cc b/cmd/traffic_cop/traffic_cop.cc index d5537a117a9..0eb6579476c 100644 --- a/cmd/traffic_cop/traffic_cop.cc +++ b/cmd/traffic_cop/traffic_cop.cc @@ -121,8 +121,8 @@ static int init_sleep_time = cop_sleep_time; // 10 sec #define MANAGER_FLAP_RETRY_MSEC 60000 // if flapping, don't try to restart until after this retry duration static bool manager_flapping = false; // is the manager flapping? static int manager_flap_count = 0; // how many times has the manager flapped? -static ink_hrtime manager_flap_interval_start_time = 0; // first time we attempted to start the manager in past little while) -static ink_hrtime manager_flap_retry_start_time = 0; // first time we attempted to start the manager in past little while) +static ts_hrtick manager_flap_interval_start_time = 0; // first time we attempted to start the manager in past little while) +static ts_hrtick manager_flap_retry_start_time = 0; // first time we attempted to start the manager in past little while) #endif // transient syscall error timeout @@ -406,22 +406,22 @@ safe_kill(const char *lockfile_name, const char *pname, bool group) cop_log_trace("Leaving safe_kill(%s, %s, %d)\n", lockfile_name, pname, group); } -// ink_hrtime milliseconds() +// ts_hrtick milliseconds() // // Returns the result of gettimeofday converted to // one 64bit int // -static ink_hrtime +static ts_hrtick milliseconds(void) { struct timeval now; cop_log_trace("Entering milliseconds()\n"); now = ink_gettimeofday(); - // Make liberal use of casting to ink_hrtime to ensure the + // Make liberal use of casting to ts_hrtick to ensure the // compiler does not truncate our result cop_log_trace("Leaving milliseconds()\n"); - return ((ink_hrtime)now.tv_sec * 1000) + ((ink_hrtime)now.tv_usec / 1000); + return ((ts_hrtick)now.tv_sec * 1000) + ((ts_hrtick)now.tv_usec / 1000); } static void @@ -1392,7 +1392,7 @@ check_programs() safe_kill(server_lockfile, server_binary, false); } // Spawn the manager (check for flapping manager too) - ink_hrtime now = milliseconds(); + ts_hrtick now = milliseconds(); if (!manager_flapping) { if ((manager_flap_interval_start_time == 0) || (now - manager_flap_interval_start_time > MANAGER_FLAP_INTERVAL_MSEC)) { // either: diff --git a/cmd/traffic_manager/metrics.cc b/cmd/traffic_manager/metrics.cc index 8e3d19dea11..6c5f9f98c47 100644 --- a/cmd/traffic_manager/metrics.cc +++ b/cmd/traffic_manager/metrics.cc @@ -141,12 +141,12 @@ struct EvaluatorList { void evaluate(lua_State *L) const { - ink_hrtime start = ink_get_hrtime_internal(); - ink_hrtime elapsed; + ts_hrtick start = ink_get_hrtime_internal(); + ts_hrtick elapsed; forv_Vec(Evaluator, e, this->evaluators) { e->eval(L); } - elapsed = ink_hrtime_diff(ink_get_hrtime_internal(), start); - Debug("lua", "evaluated %u metrics in %fmsec", evaluators.length(), ink_hrtime_to_usec(elapsed) / 1000.0); + elapsed = ts_hrtick_diff(ink_get_hrtime_internal(), start); + Debug("lua", "evaluated %u metrics in %fmsec", evaluators.length(), ts_hrtick_to_usec(elapsed) / 1000.0); } bool update; @@ -169,8 +169,8 @@ update_metrics_namespace(lua_State *L) static int64_t timestamp_now_msec() { - ink_hrtime now = ink_get_hrtime_internal(); - return ink_hrtime_to_msec(now); + ts_hrtick now = ink_get_hrtime_internal(); + return ts_hrtick_to_msec(now); } static int diff --git a/iocore/aio/AIO.cc b/iocore/aio/AIO.cc index 430755ce4f6..4d1c029fe99 100644 --- a/iocore/aio/AIO.cc +++ b/iocore/aio/AIO.cc @@ -70,7 +70,7 @@ aio_stats_cb(const char * /* name ATS_UNUSED */, RecDataT data_type, RecData *da int64_t new_val = 0; int64_t diff = 0; int64_t count, sum; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); // The RecGetGlobalXXX stat functions are cheaper than the // RecGetXXX functions. The Global ones are expensive // for increments and decrements. But for AIO stats we @@ -78,7 +78,7 @@ aio_stats_cb(const char * /* name ATS_UNUSED */, RecDataT data_type, RecData *da RecGetGlobalRawStatSum(aio_rsb, id, &sum); RecGetGlobalRawStatCount(aio_rsb, id, &count); - int64_t time_diff = ink_hrtime_to_msec(now - count); + int64_t time_diff = ts_hrtick_to_msec(now - count); if (time_diff == 0) { data->rec_float = 0.0; return 0; @@ -115,7 +115,7 @@ static AIOTestData *data; int AIOTestData::ink_aio_stats(int event, void *d) { - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); double time_msec = (double)(now - start) / (double)HRTIME_MSECOND; int i = (aio_reqs[0] == NULL) ? 1 : 0; for (; i < num_filedes; ++i) @@ -510,7 +510,7 @@ aio_thread_main(void *arg) op->thread->schedule_imm_signal(op); ink_mutex_acquire(&my_aio_req->aio_mutex); } while (1); - timespec timedwait_msec = ink_hrtime_to_timespec(Thread::get_hrtime_updated() + HRTIME_MSECONDS(net_config_poll_timeout)); + timespec timedwait_msec = ts_hrtick_to_timespec(Thread::get_hrtime_updated() + HRTIME_MSECONDS(net_config_poll_timeout)); ink_cond_timedwait(&my_aio_req->aio_cond, &my_aio_req->aio_mutex, &timedwait_msec); } return 0; diff --git a/iocore/aio/P_AIO.h b/iocore/aio/P_AIO.h index 259dfec6fd2..27b69021c67 100644 --- a/iocore/aio/P_AIO.h +++ b/iocore/aio/P_AIO.h @@ -98,7 +98,7 @@ struct AIO_Reqs; struct AIOCallbackInternal : public AIOCallback { AIOCallback *first; AIO_Reqs *aio_req; - ink_hrtime sleep_time; + ts_hrtick sleep_time; int io_complete(int event, void *data); AIOCallbackInternal() { @@ -141,7 +141,7 @@ class AIOTestData : public Continuation int num_req; int num_temp; int num_queue; - ink_hrtime start; + ts_hrtick start; int ink_aio_stats(int event, void *data); diff --git a/iocore/aio/test_AIO.cc b/iocore/aio/test_AIO.cc index ae90b52c103..8d743f08ce1 100644 --- a/iocore/aio/test_AIO.cc +++ b/iocore/aio/test_AIO.cc @@ -89,7 +89,7 @@ struct AIO_Device : public Continuation { int fd; int id; char *buf; - ink_hrtime time_start, time_end; + ts_hrtick time_start, time_end; int seq_reads; int seq_writes; int rand_reads; diff --git a/iocore/cache/CacheDir.cc b/iocore/cache/CacheDir.cc index d8ba85dddce..0d9fd5bed7b 100644 --- a/iocore/cache/CacheDir.cc +++ b/iocore/cache/CacheDir.cc @@ -1357,7 +1357,7 @@ dir_corrupt_bucket(Dir *b, int s, Vol *d) EXCLUSIVE_REGRESSION_TEST(Cache_dir)(RegressionTest *t, int /* atype ATS_UNUSED */, int *status) { - ink_hrtime ttime; + ts_hrtick ttime; int ret = REGRESSION_TEST_PASSED; if ((CacheProcessor::IsCacheEnabled() != CACHE_INITIALIZED) || gnvol < 1) { diff --git a/iocore/cache/P_CacheDir.h b/iocore/cache/P_CacheDir.h index 951d7087a8a..53729677929 100644 --- a/iocore/cache/P_CacheDir.h +++ b/iocore/cache/P_CacheDir.h @@ -267,7 +267,7 @@ struct CacheSync : public Continuation { off_t writepos; AIOCallbackInternal io; Event *trigger; - ink_hrtime start_time; + ts_hrtick start_time; int mainEvent(int event, Event *e); void aio_write(int fd, char *b, int n, off_t o); diff --git a/iocore/cache/P_CacheInternal.h b/iocore/cache/P_CacheInternal.h index 40a6fd7094d..691ad486961 100644 --- a/iocore/cache/P_CacheInternal.h +++ b/iocore/cache/P_CacheInternal.h @@ -93,7 +93,7 @@ struct EvacuationBlock; do { \ ink_assert(!trigger); \ writer_lock_retry++; \ - ink_hrtime _t = HRTIME_MSECONDS(cache_read_while_writer_retry_delay); \ + ts_hrtick _t = HRTIME_MSECONDS(cache_read_while_writer_retry_delay); \ if (writer_lock_retry > 2) \ _t = HRTIME_MSECONDS(cache_read_while_writer_retry_delay) * 2; \ trigger = mutex->thread_holding->schedule_in_local(this, _t); \ @@ -452,7 +452,7 @@ struct CacheVC : public CacheVConnection { CacheKey *read_key; ContinuationHandler save_handler; uint32_t pin_in_cache; - ink_hrtime start_time; + ts_hrtick start_time; int base_stat; int recursive; int closed; diff --git a/iocore/cache/P_CacheTest.h b/iocore/cache/P_CacheTest.h index dd8c32d53d3..2df89926d68 100644 --- a/iocore/cache/P_CacheTest.h +++ b/iocore/cache/P_CacheTest.h @@ -69,7 +69,7 @@ struct CacheTestSM : public RegressionSM { Action *timeout; Action *cache_action; - ink_hrtime start_time; + ts_hrtick start_time; CacheVConnection *cache_vc; VIO *cvio; MIOBuffer *buffer; diff --git a/iocore/cluster/ClusterCache.cc b/iocore/cluster/ClusterCache.cc index 8c9399e18d4..d128a23c95b 100644 --- a/iocore/cluster/ClusterCache.cc +++ b/iocore/cluster/ClusterCache.cc @@ -66,9 +66,9 @@ static Ptr remoteCacheContQueueMutex[REMOTE_CONNECT_HASH]; static int cluster_sequence_number = 1; #ifdef CLUSTER_TEST_DEBUG -static ink_hrtime cache_cluster_timeout = HRTIME_SECONDS(65536); +static ts_hrtick cache_cluster_timeout = HRTIME_SECONDS(65536); #else -static ink_hrtime cache_cluster_timeout = CACHE_CLUSTER_TIMEOUT; +static ts_hrtick cache_cluster_timeout = CACHE_CLUSTER_TIMEOUT; #endif /////////////////// @@ -831,7 +831,7 @@ CacheContinuation::localVCsetupEvent(int event, ClusterVConnection *vc) } else if (((event == CLUSTER_EVENT_OPEN) || (event == CLUSTER_EVENT_OPEN_EXISTS)) && (((ptrdiff_t)timeout & (ptrdiff_t)1) == 0)) { - ink_hrtime now; + ts_hrtick now; now = Thread::get_hrtime(); CLUSTER_SUM_DYN_STAT(CLUSTER_OPEN_DELAY_TIME_STAT, now - start_time); LOG_EVENT_TIME(start_time, open_delay_time_dist, open_delay_events); @@ -1554,7 +1554,7 @@ CacheContinuation::replyOpEvent(int event, VConnection *cvc) { ink_assert(magicno == (int)MagicNo); Debug("cache_proto", "replyOpEvent(this=%p,event=%d,VC=%p)", this, event, cvc); - ink_hrtime now; + ts_hrtick now; now = Thread::get_hrtime(); CLUSTER_SUM_DYN_STAT(CLUSTER_CACHE_CALLBACK_TIME_STAT, now - start_time); LOG_EVENT_TIME(start_time, callback_time_dist, cache_callbacks); @@ -2080,7 +2080,7 @@ CacheContinuation::remoteOpEvent(int event_code, Event *e) { ink_assert(magicno == (int)MagicNo); int event = event_code; - ink_hrtime now; + ts_hrtick now; if (start_time) { int res; if (event != EVENT_INTERVAL) { @@ -2556,7 +2556,7 @@ cache_lookup_ClusterFunction(ClusterHandler *ch, void *data, int len) int CacheContinuation::replyLookupEvent(int event, void * /* d ATS_UNUSED */) { - ink_hrtime now; + ts_hrtick now; now = Thread::get_hrtime(); CLUSTER_SUM_DYN_STAT(CLUSTER_CACHE_CALLBACK_TIME_STAT, now - start_time); LOG_EVENT_TIME(start_time, callback_time_dist, cache_callbacks); diff --git a/iocore/cluster/ClusterConfig.cc b/iocore/cluster/ClusterConfig.cc index e9b789661a1..6be84d8a7d9 100644 --- a/iocore/cluster/ClusterConfig.cc +++ b/iocore/cluster/ClusterConfig.cc @@ -365,7 +365,7 @@ cluster_machine_at_depth(unsigned int hash, int *pprobe_depth, ClusterMachine ** #endif ClusterConfiguration *cc = this_cluster()->current_configuration(); ClusterConfiguration *next_cc = cc; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); int fake_probe_depth = 0; int &probe_depth = pprobe_depth ? (*pprobe_depth) : fake_probe_depth; int tprobe_depth = probe_depth; diff --git a/iocore/cluster/ClusterHandler.cc b/iocore/cluster/ClusterHandler.cc index 476bf70d706..1db0ce30239 100644 --- a/iocore/cluster/ClusterHandler.cc +++ b/iocore/cluster/ClusterHandler.cc @@ -340,7 +340,7 @@ ClusterHandler::close_ClusterVConnection(ClusterVConnection *vc) } clusterProcessor.invoke_remote(vc->ch, CLOSE_CHANNEL_CLUSTER_FUNCTION, data, len); } - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); CLUSTER_DECREMENT_DYN_STAT(CLUSTER_CONNECTIONS_OPEN_STAT); CLUSTER_SUM_DYN_STAT(CLUSTER_CON_TOTAL_TIME_STAT, now - vc->start_time); if (!local_channel(channel)) { @@ -450,7 +450,7 @@ ClusterHandler::build_initial_vector(bool read_flag) // This isn't used. // MIOBuffer *w; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); ClusterState &s = (read_flag ? read : write); OutgoingControl *oc = s.msg.outgoing_control.head; IncomingControl *ic = incoming_control.head; @@ -935,7 +935,7 @@ ClusterHandler::process_small_control_msgs() read.msg.did_small_control_msgs = 1; } - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); char *p = (char *)&read.msg.descriptor[read.msg.count] + read.msg.control_data_offset; char *endp = (char *)&read.msg.descriptor[read.msg.count] + read.msg.control_bytes; @@ -1149,7 +1149,7 @@ ClusterHandler::update_channels_read() int ClusterHandler::process_incoming_callouts(ProxyMutex *mutex) { - ink_hrtime now; + ts_hrtick now; // // Atomically dequeue all active requests from the external queue and // move them to the local working queue. Insertion queue order is @@ -1429,7 +1429,7 @@ ClusterHandler::update_channels_written() // Lower the priority of those with too little data and raise that of // those with too much data. // - ink_hrtime now; + ts_hrtick now; for (int i = 0; i < write.msg.count; i++) { if (write.msg.descriptor[i].type == CLUSTER_SEND_DATA) { if (write.msg.descriptor[i].channel != CLUSTER_CONTROL_CHANNEL) { @@ -1857,7 +1857,7 @@ ClusterHandler::add_small_controlmsg_descriptors() c->free_data(); c->mutex = NULL; p += c->len; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); CLUSTER_SUM_DYN_STAT(CLUSTER_CTRL_MSGS_SEND_TIME_STAT, now - c->submit_time); LOG_EVENT_TIME(c->submit_time, cluster_send_time_dist, cluster_send_events); c->freeall(); @@ -2488,7 +2488,7 @@ ClusterHandler::mainClusterEvent(int event, Event *e) return EVENT_CONT; } -int ClusterHandler::process_read(ink_hrtime /* now ATS_UNUSED */) +int ClusterHandler::process_read(ts_hrtick /* now ATS_UNUSED */) { #ifdef CLUSTER_STATS _process_read_calls++; @@ -2762,7 +2762,7 @@ int ClusterHandler::process_read(ink_hrtime /* now ATS_UNUSED */) #ifdef CLUSTER_STATS _n_read_complete++; #endif - ink_hrtime rdmsg_end_time = Thread::get_hrtime(); + ts_hrtick rdmsg_end_time = Thread::get_hrtime(); CLUSTER_SUM_DYN_STAT(CLUSTER_RDMSG_ASSEMBLE_TIME_STAT, rdmsg_end_time - read.start_time); read.start_time = HRTIME_MSECONDS(0); if (dump_msgs) @@ -2786,7 +2786,7 @@ int ClusterHandler::process_read(ink_hrtime /* now ATS_UNUSED */) } int -ClusterHandler::process_write(ink_hrtime now, bool only_write_control_msgs) +ClusterHandler::process_write(ts_hrtick now, bool only_write_control_msgs) { #ifdef CLUSTER_STATS _process_write_calls++; @@ -2952,7 +2952,7 @@ ClusterHandler::process_write(ink_hrtime now, bool only_write_control_msgs) _n_write_complete++; #endif write.state = ClusterState::WRITE_START; - ink_hrtime curtime = Thread::get_hrtime(); + ts_hrtick curtime = Thread::get_hrtime(); if (!on_stolen_thread) { // diff --git a/iocore/cluster/ClusterHandlerBase.cc b/iocore/cluster/ClusterHandlerBase.cc index b366f7ed296..b9c04fb1409 100644 --- a/iocore/cluster/ClusterHandlerBase.cc +++ b/iocore/cluster/ClusterHandlerBase.cc @@ -1217,7 +1217,7 @@ ClusterHandler::protoZombieEvent(int /* event ATS_UNUSED */, Event *e) // after NO_RACE_DELAY // bool failed = false; - ink_hrtime delay = CLUSTER_MEMBER_DELAY * 5; + ts_hrtick delay = CLUSTER_MEMBER_DELAY * 5; EThread *t = e ? e->ethread : this_ethread(); head_p item; diff --git a/iocore/cluster/ClusterLoadMonitor.cc b/iocore/cluster/ClusterLoadMonitor.cc index 205a9217a16..ab9fba17248 100644 --- a/iocore/cluster/ClusterLoadMonitor.cc +++ b/iocore/cluster/ClusterLoadMonitor.cc @@ -82,8 +82,8 @@ ClusterLoadMonitor::ClusterLoadMonitor(ClusterHandler *ch) ping_response_buckets = (int *)ats_malloc(nbytes); memset((char *)ping_response_buckets, 0, nbytes); - nbytes = sizeof(ink_hrtime) * ping_history_buf_length; - ping_response_history_buf = (ink_hrtime *)ats_malloc(nbytes); + nbytes = sizeof(ts_hrtick) * ping_history_buf_length; + ping_response_history_buf = (ts_hrtick *)ats_malloc(nbytes); memset((char *)ping_response_history_buf, 0, nbytes); last_ping_message_sent = HRTIME_SECONDS(0); @@ -153,7 +153,7 @@ ClusterLoadMonitor::compute_cluster_load() } else { n_bucket = 1; } - ink_hrtime current_ping_latency = HRTIME_MSECONDS(n_bucket * msecs_per_ping_response_bucket); + ts_hrtick current_ping_latency = HRTIME_MSECONDS(n_bucket * msecs_per_ping_response_bucket); // Invalidate messages associated with this sample interval cluster_load_msg_start_sequence_number = cluster_load_msg_sequence_number; @@ -177,7 +177,7 @@ ClusterLoadMonitor::compute_cluster_load() // exceed the threshold. int start, end; - ink_hrtime ping_latency_threshold = HRTIME_MSECONDS(ping_latency_threshold_msecs); + ts_hrtick ping_latency_threshold = HRTIME_MSECONDS(ping_latency_threshold_msecs); start = ping_history_buf_head - 1; if (start < 0) @@ -215,7 +215,7 @@ ClusterLoadMonitor::compute_cluster_load() } void -ClusterLoadMonitor::note_ping_response_time(ink_hrtime response_time, int sequence_number) +ClusterLoadMonitor::note_ping_response_time(ts_hrtick response_time, int sequence_number) { #ifdef CLUSTER_TOMCAT ProxyMutex *mutex = this->ch->mutex.get(); // hack for stats @@ -234,7 +234,7 @@ void ClusterLoadMonitor::recv_cluster_load_msg(cluster_load_ping_msg *m) { // We have received back our ping message. - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); if ((now >= m->send_time) && ((m->sequence_number >= cluster_load_msg_start_sequence_number) && (m->sequence_number < cluster_load_msg_sequence_number))) { @@ -263,7 +263,7 @@ ClusterLoadMonitor::cluster_load_ping_rethandler(ClusterHandler *ch, void *data, } void -ClusterLoadMonitor::send_cluster_load_msg(ink_hrtime current_time) +ClusterLoadMonitor::send_cluster_load_msg(ts_hrtick current_time) { // Build and send cluster load ping message. @@ -290,7 +290,7 @@ ClusterLoadMonitor::cluster_load_periodic(int /* event ATS_UNUSED */, Event * /* } // Generate periodic ping messages. - ink_hrtime current_time = Thread::get_hrtime(); + ts_hrtick current_time = Thread::get_hrtime(); if ((current_time - last_ping_message_sent) > HRTIME_MSECONDS(ping_message_send_msec_interval)) { send_cluster_load_msg(current_time); last_ping_message_sent = current_time; diff --git a/iocore/cluster/P_ClusterCache.h b/iocore/cluster/P_ClusterCache.h index eeca3f16bf3..ca354959392 100644 --- a/iocore/cluster/P_ClusterCache.h +++ b/iocore/cluster/P_ClusterCache.h @@ -172,7 +172,7 @@ struct ClusterConfiguration { // ClusterConfiguration(); unsigned char hash_table[CLUSTER_HASH_TABLE_SIZE]; - ink_hrtime changed; + ts_hrtick changed; SLINK(ClusterConfiguration, link); }; @@ -363,8 +363,8 @@ struct ClusterVConnectionBase : public CacheVConnection { // ClusterVConnectionBase, or the ClusterVConnectionBase // creation callback. // - void set_active_timeout(ink_hrtime timeout_in); - void set_inactivity_timeout(ink_hrtime timeout_in); + void set_active_timeout(ts_hrtick timeout_in); + void set_inactivity_timeout(ts_hrtick timeout_in); void cancel_active_timeout(); void cancel_inactivity_timeout(); @@ -381,8 +381,8 @@ struct ClusterVConnectionBase : public CacheVConnection { ClusterVConnState write; LINKM(ClusterVConnectionBase, read, link) LINKM(ClusterVConnectionBase, write, link) - ink_hrtime inactivity_timeout_in; - ink_hrtime active_timeout_in; + ts_hrtick inactivity_timeout_in; + ts_hrtick active_timeout_in; Event *inactivity_timeout; Event *active_timeout; @@ -495,8 +495,8 @@ struct ClusterVConnection : public ClusterVConnectionBase { // // NOT Thread-safe (see Net.h for details) // - // void set_active_timeout(ink_hrtime timeout_in); - // void set_inactivity_timeout(ink_hrtime timeout_in); + // void set_active_timeout(ts_hrtick timeout_in); + // void set_inactivity_timeout(ts_hrtick timeout_in); // // Private @@ -546,8 +546,8 @@ struct ClusterVConnection : public ClusterVConnectionBase { void allow_remote_close(); bool schedule_write(); void set_type(int); - ink_hrtime start_time; - ink_hrtime last_activity_time; + ts_hrtick start_time; + ts_hrtick last_activity_time; Queue byte_bank_q; // done awaiting completion int n_set_data_msgs; // # pending set_data() msgs on VC int n_recv_set_data_msgs; // # set_data() msgs received on VC diff --git a/iocore/cluster/P_ClusterCacheInternal.h b/iocore/cluster/P_ClusterCacheInternal.h index 15b90d1fc1b..91b22be7741 100644 --- a/iocore/cluster/P_ClusterCacheInternal.h +++ b/iocore/cluster/P_ClusterCacheInternal.h @@ -99,7 +99,7 @@ struct CacheContinuation : public Continuation { ClusterMachine *target_machine; int probe_depth; ClusterMachine *past_probes[CONFIGURATION_HISTORY_PROBE_DEPTH]; - ink_hrtime start_time; + ts_hrtick start_time; ClusterMachine *from; ClusterHandler *ch; VConnection *cache_vc; diff --git a/iocore/cluster/P_ClusterHandler.h b/iocore/cluster/P_ClusterHandler.h index 00bd77090a9..13794d6cad5 100644 --- a/iocore/cluster/P_ClusterHandler.h +++ b/iocore/cluster/P_ClusterHandler.h @@ -83,7 +83,7 @@ struct ClusterControl : public Continuation { struct OutgoingControl : public ClusterControl { ClusterHandler *ch; - ink_hrtime submit_time; + ts_hrtick submit_time; static OutgoingControl *alloc(); @@ -128,7 +128,7 @@ struct OutgoingControl : public ClusterControl { // incoming control messsage are received by this machine // struct IncomingControl : public ClusterControl { - ink_hrtime recognized_time; + ts_hrtick recognized_time; static IncomingControl *alloc(); @@ -365,8 +365,8 @@ struct ClusterState : public Continuation { int missed; bool missed_msg; - ink_hrtime last_time; - ink_hrtime start_time; + ts_hrtick last_time; + ts_hrtick start_time; Ptr block[MAX_TCOUNT]; class MIOBuffer *mbuf; @@ -486,12 +486,12 @@ struct ClusterHandler : public ClusterHandlerBase { ClusterState read; ClusterState write; - ink_hrtime current_time; - ink_hrtime last; - ink_hrtime last_report; + ts_hrtick current_time; + ts_hrtick last; + ts_hrtick last_report; int n_since_last_report; - ink_hrtime last_cluster_op_enable; - ink_hrtime last_trace_dump; + ts_hrtick last_cluster_op_enable; + ts_hrtick last_trace_dump; DLL delayed_reads; ClusterLoadMonitor *clm; @@ -653,8 +653,8 @@ struct ClusterHandler : public ClusterHandlerBase { bool vc_ok_write(ClusterVConnection *); int do_open_local_requests(); void swap_descriptor_bytes(); - int process_read(ink_hrtime); - int process_write(ink_hrtime, bool); + int process_read(ts_hrtick); + int process_write(ts_hrtick, bool); void dump_write_msg(int); void dump_read_msg(); diff --git a/iocore/cluster/P_ClusterLoadMonitor.h b/iocore/cluster/P_ClusterLoadMonitor.h index c499c07633e..236a4ba9e59 100644 --- a/iocore/cluster/P_ClusterLoadMonitor.h +++ b/iocore/cluster/P_ClusterLoadMonitor.h @@ -55,7 +55,7 @@ class ClusterLoadMonitor : public Continuation int magicno; int version; int sequence_number; - ink_hrtime send_time; + ts_hrtick send_time; ClusterLoadMonitor *monitor; enum { @@ -79,9 +79,9 @@ class ClusterLoadMonitor : public Continuation private: void compute_cluster_load(); - void note_ping_response_time(ink_hrtime, int); + void note_ping_response_time(ts_hrtick, int); void recv_cluster_load_msg(cluster_load_ping_msg *); - void send_cluster_load_msg(ink_hrtime); + void send_cluster_load_msg(ts_hrtick); int cluster_load_periodic(int, Event *); private: @@ -101,14 +101,14 @@ class ClusterLoadMonitor : public Continuation // Class specific data ClusterHandler *ch; int *ping_response_buckets; - ink_hrtime *ping_response_history_buf; + ts_hrtick *ping_response_history_buf; int ping_history_buf_head; Action *periodic_action; int cluster_overloaded; int cancel_periodic; - ink_hrtime last_ping_message_sent; - ink_hrtime last_cluster_load_compute; + ts_hrtick last_ping_message_sent; + ts_hrtick last_cluster_load_compute; int cluster_load_msg_sequence_number; int cluster_load_msg_start_sequence_number; }; diff --git a/iocore/cluster/P_TimeTrace.h b/iocore/cluster/P_TimeTrace.h index 1bf3c89c077..fce5158006f 100644 --- a/iocore/cluster/P_TimeTrace.h +++ b/iocore/cluster/P_TimeTrace.h @@ -66,7 +66,7 @@ extern int cluster_send_events; #ifdef ENABLE_TIME_TRACE #define LOG_EVENT_TIME(_start_time, _time_dist, _time_cnt) \ do { \ - ink_hrtime now = ink_get_hrtime(); \ + ts_hrtick now = ink_get_hrtime(); \ unsigned int bucket = (now - _start_time) / HRTIME_MSECONDS(10); \ if (bucket > TIME_DIST_BUCKETS) \ bucket = TIME_DIST_BUCKETS; \ diff --git a/iocore/dns/DNS.cc b/iocore/dns/DNS.cc index c2fa8a11e3a..cf8d24872b2 100644 --- a/iocore/dns/DNS.cc +++ b/iocore/dns/DNS.cc @@ -553,7 +553,7 @@ DNSHandler::recover() } void -DNSHandler::retry_named(int ndx, ink_hrtime t, bool reopen) +DNSHandler::retry_named(int ndx, ts_hrtick t, bool reopen) { if (reopen && ((t - last_primary_reopen) > DNS_PRIMARY_REOPEN_PERIOD)) { Debug("dns", "retry_named: reopening DNS connection for index %d", ndx); @@ -576,7 +576,7 @@ DNSHandler::retry_named(int ndx, ink_hrtime t, bool reopen) void DNSHandler::try_primary_named(bool reopen) { - ink_hrtime t = Thread::get_hrtime(); + ts_hrtick t = Thread::get_hrtime(); if (reopen && ((t - last_primary_reopen) > DNS_PRIMARY_REOPEN_PERIOD)) { Debug("dns", "try_primary_named: reopening primary DNS connection"); last_primary_reopen = t; @@ -793,7 +793,7 @@ DNSHandler::mainEvent(int event, Event *e) { recv_dns(event, e); if (dns_ns_rr) { - ink_hrtime t = Thread::get_hrtime(); + ts_hrtick t = Thread::get_hrtime(); if (t - last_primary_retry > DNS_PRIMARY_RETRY_PERIOD) { for (int i = 0; i < n_con; i++) { if (ns_down[i]) { diff --git a/iocore/dns/P_DNSProcessor.h b/iocore/dns/P_DNSProcessor.h index ed7acbb7ab5..694863330de 100644 --- a/iocore/dns/P_DNSProcessor.h +++ b/iocore/dns/P_DNSProcessor.h @@ -141,8 +141,8 @@ struct DNSEntry : public Continuation { HostResStyle host_res_style; ///< Preferred IP address family. int retries; int which_ns; - ink_hrtime submit_time; - ink_hrtime send_time; + ts_hrtick submit_time; + ts_hrtick send_time; char qname[MAXDNAME]; int qname_len; int orig_qname_len; @@ -215,9 +215,9 @@ struct DNSHandler : public Continuation { int ns_down[MAX_NAMED]; int failover_number[MAX_NAMED]; int failover_soon_number[MAX_NAMED]; - ink_hrtime crossed_failover_number[MAX_NAMED]; - ink_hrtime last_primary_retry; - ink_hrtime last_primary_reopen; + ts_hrtick crossed_failover_number[MAX_NAMED]; + ts_hrtick last_primary_retry; + ts_hrtick last_primary_reopen; ink_res_state m_res; int txn_lookup_timeout; @@ -246,7 +246,7 @@ struct DNSHandler : public Continuation { { if (is_debug_tag_set("dns")) { Debug("dns", "failover_now: Considering immediate failover, target time is %" PRId64 "", - (ink_hrtime)HRTIME_SECONDS(dns_failover_period)); + (ts_hrtick)HRTIME_SECONDS(dns_failover_period)); Debug("dns", "\tdelta time is %" PRId64 "", (Thread::get_hrtime() - crossed_failover_number[i])); } return (crossed_failover_number[i] && @@ -270,7 +270,7 @@ struct DNSHandler : public Continuation { void failover(); void rr_failure(int ndx); void recover(); - void retry_named(int ndx, ink_hrtime t, bool reopen = true); + void retry_named(int ndx, ts_hrtick t, bool reopen = true); void try_primary_named(bool reopen = true); void switch_named(int ndx); uint16_t get_query_id(); diff --git a/iocore/eventsystem/I_EThread.h b/iocore/eventsystem/I_EThread.h index cf0999d067d..ab5d1ce334f 100644 --- a/iocore/eventsystem/I_EThread.h +++ b/iocore/eventsystem/I_EThread.h @@ -133,7 +133,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_at(Continuation *c, ink_hrtime atimeout_at, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_at(Continuation *c, ts_hrtick atimeout_at, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** Schedules the continuation on this EThread to receive an event @@ -153,7 +153,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_in(Continuation *c, ink_hrtime atimeout_in, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_in(Continuation *c, ts_nanoseconds atimeout_in, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** Schedules the continuation on this EThread to receive an event @@ -174,7 +174,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_every(Continuation *c, ink_hrtime aperiod, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_every(Continuation *c, ts_nanoseconds aperiod, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** Schedules the continuation on this EThread to receive an event @@ -213,7 +213,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_at_local(Continuation *c, ink_hrtime atimeout_at, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_at_local(Continuation *c, ts_hrtick atimeout_at, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** Schedules the continuation on this EThread to receive an event @@ -234,7 +234,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_in_local(Continuation *c, ink_hrtime atimeout_in, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_in_local(Continuation *c, ts_nanoseconds atimeout_in, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** Schedules the continuation on this EThread to receive an event @@ -254,7 +254,7 @@ class EThread : public Thread of this callback. */ - Event *schedule_every_local(Continuation *c, ink_hrtime aperiod, int callback_event = EVENT_INTERVAL, void *cookie = NULL); + Event *schedule_every_local(Continuation *c, ts_nanoseconds aperiod, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /* private */ diff --git a/iocore/eventsystem/I_Event.h b/iocore/eventsystem/I_Event.h index a73eed7a464..ea23b59818b 100644 --- a/iocore/eventsystem/I_Event.h +++ b/iocore/eventsystem/I_Event.h @@ -138,7 +138,7 @@ class EThread; Time values: - The schedulling functions use a time parameter typed as ink_hrtime + The schedulling functions use a time parameter typed as ts_hrtick for specifying the timeouts or periods. This is a nanosecond value supported by libts and you should use the time functions and macros defined in ink_hrtime.h. @@ -176,7 +176,7 @@ class Event : public Action @param callback_event Event code to return at the completion of this event. See the Remarks section. */ - void schedule_at(ink_hrtime atimeout_at, int callback_event = EVENT_INTERVAL); + void schedule_at(ts_hrtick atimeout_at, int callback_event = EVENT_INTERVAL); /** Reschedules this event to callback at time 'atimeout_at'. @@ -187,7 +187,7 @@ class Event : public Action @param callback_event Event code to return at the completion of this event. See the Remarks section. */ - void schedule_in(ink_hrtime atimeout_in, int callback_event = EVENT_INTERVAL); + void schedule_in(ts_nanoseconds atimeout_in, int callback_event = EVENT_INTERVAL); /** Reschedules this event to callback every 'aperiod'. Instructs @@ -198,7 +198,7 @@ class Event : public Action @param callback_event Event code to return at the completion of this event. See the Remarks section. */ - void schedule_every(ink_hrtime aperiod, int callback_event = EVENT_INTERVAL); + void schedule_every(ts_nanoseconds aperiod, int callback_event = EVENT_INTERVAL); // inherited from Action::cancel // virtual void cancel(Continuation * c = NULL); @@ -214,8 +214,8 @@ class Event : public Action unsigned int in_heap : 4; int callback_event; - ink_hrtime timeout_at; - ink_hrtime period; + ts_hrtick timeout_at; + ts_nanoseconds period; /** This field can be set when an event is created. It is returned @@ -229,10 +229,10 @@ class Event : public Action Event(); - Event *init(Continuation *c, ink_hrtime atimeout_at = 0, ink_hrtime aperiod = 0); + Event *init(Continuation *c, ts_hrtick atimeout_at = TS_HRTICK_ZERO, ts_nanoseconds aperiod = ts_nanoseconds::zero()); #ifdef ENABLE_TIME_TRACE - ink_hrtime start_time; + ts_hrtick start_time; #endif private: diff --git a/iocore/eventsystem/I_EventProcessor.h b/iocore/eventsystem/I_EventProcessor.h index 03364d68373..58017e74da4 100644 --- a/iocore/eventsystem/I_EventProcessor.h +++ b/iocore/eventsystem/I_EventProcessor.h @@ -167,7 +167,7 @@ class EventProcessor : public Processor this callback. */ - Event *schedule_at(Continuation *c, ink_hrtime atimeout_at, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, + Event *schedule_at(Continuation *c, ts_hrtick atimeout_at, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** @@ -189,7 +189,7 @@ class EventProcessor : public Processor this callback. */ - Event *schedule_in(Continuation *c, ink_hrtime atimeout_in, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, + Event *schedule_in(Continuation *c, ts_nanoseconds atimeout_in, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, void *cookie = NULL); /** @@ -211,7 +211,7 @@ class EventProcessor : public Processor this callback. */ - Event *schedule_every(Continuation *c, ink_hrtime aperiod, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, + Event *schedule_every(Continuation *c, ts_nanoseconds aperiod, EventType event_type = ET_CALL, int callback_event = EVENT_INTERVAL, void *cookie = NULL); //////////////////////////////////////////// @@ -223,9 +223,9 @@ class EventProcessor : public Processor //////////////////////////////////////////// Event *reschedule_imm(Event *e, int callback_event = EVENT_IMMEDIATE); - Event *reschedule_at(Event *e, ink_hrtime atimeout_at, int callback_event = EVENT_INTERVAL); - Event *reschedule_in(Event *e, ink_hrtime atimeout_in, int callback_event = EVENT_INTERVAL); - Event *reschedule_every(Event *e, ink_hrtime aperiod, int callback_event = EVENT_INTERVAL); + Event *reschedule_at(Event *e, ts_hrtick atimeout_at, int callback_event = EVENT_INTERVAL); + Event *reschedule_in(Event *e, ts_hrtick atimeout_in, int callback_event = EVENT_INTERVAL); + Event *reschedule_every(Event *e, ts_hrtick aperiod, int callback_event = EVENT_INTERVAL); EventProcessor(); diff --git a/iocore/eventsystem/I_Lock.h b/iocore/eventsystem/I_Lock.h index bb55cc07e67..1407b19473a 100644 --- a/iocore/eventsystem/I_Lock.h +++ b/iocore/eventsystem/I_Lock.h @@ -28,7 +28,7 @@ #include "ts/Diags.h" #include "I_Thread.h" -#define MAX_LOCK_TIME HRTIME_MSECONDS(200) +static const ts_nanoseconds MAX_LOCK_TIME(ts_microseconds(200)); #define THREAD_MUTEX_THREAD_HOLDING (-1024 * 1024) /*------------------------------------------------------*\ @@ -208,7 +208,7 @@ class ProxyMutex : public RefCountObj int nthread_holding; #ifdef DEBUG - ink_hrtime hold_time; + ts_hrtick hold_time; SourceLocation srcloc; const char *handler; @@ -241,7 +241,7 @@ class ProxyMutex : public RefCountObj thread_holding = NULL; nthread_holding = 0; #ifdef DEBUG - hold_time = 0; + hold_time = hold_time.min(); handler = NULL; #ifdef MAX_LOCK_TAKEN taken = 0; diff --git a/iocore/eventsystem/I_PriorityEventQueue.h b/iocore/eventsystem/I_PriorityEventQueue.h index 886dea692e2..50aa163b744 100644 --- a/iocore/eventsystem/I_PriorityEventQueue.h +++ b/iocore/eventsystem/I_PriorityEventQueue.h @@ -28,20 +28,24 @@ #include "I_Event.h" // <5ms, 10, 20, 40, 80, 160, 320, 640, 1280, 2560, 5120 -#define N_PQ_LIST 10 -#define PQ_BUCKET_TIME(_i) (HRTIME_MSECONDS(5) << (_i)) +// Subtle - return type is in ns while the computation is ms. This works because conversion to +// faster clock is automatic and compile time available. +constexpr ts_nanoseconds PQ_BUCKET_TIME(int i) { return ts_milliseconds(5 << i); } +static const int N_PQ_LIST = 10; +//#define N_PQ_LIST 10 +//#define PQ_BUCKET_TIME(_i) (HRTIME_MSECONDS(5) << (_i)) class EThread; struct PriorityEventQueue { Que(Event, link) after[N_PQ_LIST]; - ink_hrtime last_check_time; + ts_hrtick last_check_time; uint32_t last_check_buckets; void - enqueue(Event *e, ink_hrtime now) + enqueue(Event *e, ts_hrtick now) { - ink_hrtime t = e->timeout_at - now; + ts_nanoseconds t = e->timeout_at - now; int i = 0; // equivalent but faster if (t <= PQ_BUCKET_TIME(3)) { @@ -95,7 +99,7 @@ struct PriorityEventQueue { } Event * - dequeue_ready(ink_hrtime t) + dequeue_ready(ts_hrtick t) { (void)t; Event *e = after[0].dequeue(); @@ -106,16 +110,16 @@ struct PriorityEventQueue { return e; } - void check_ready(ink_hrtime now, EThread *t); + void check_ready(ts_hrtick now, EThread *t); - ink_hrtime + ts_hrtick earliest_timeout() { for (int i = 0; i < N_PQ_LIST; i++) { if (after[i].head) return last_check_time + (PQ_BUCKET_TIME(i) / 2); } - return last_check_time + HRTIME_FOREVER; + return ts_hrtick::max(); } PriorityEventQueue(); diff --git a/iocore/eventsystem/I_ProtectedQueue.h b/iocore/eventsystem/I_ProtectedQueue.h index 7dfba985406..9c26b30337d 100644 --- a/iocore/eventsystem/I_ProtectedQueue.h +++ b/iocore/eventsystem/I_ProtectedQueue.h @@ -44,7 +44,7 @@ struct ProtectedQueue { void enqueue_local(Event *e); // Safe when called from the same thread void remove(Event *e); Event *dequeue_local(); - void dequeue_timed(ink_hrtime cur_time, ink_hrtime timeout, bool sleep); + void dequeue_timed(ts_hrtick cur_time, ts_hrtick timeout, bool sleep); InkAtomicList al; ink_mutex lock; diff --git a/iocore/eventsystem/I_Thread.h b/iocore/eventsystem/I_Thread.h index 5acccee0b0f..055daa513f1 100644 --- a/iocore/eventsystem/I_Thread.h +++ b/iocore/eventsystem/I_Thread.h @@ -117,7 +117,7 @@ class Thread Thread(); virtual ~Thread(); - static ink_hrtime cur_time; + static ts_hrtick cur_time; inkcoreapi static ink_thread_key thread_data_key; // For THREAD_ALLOC @@ -159,7 +159,7 @@ class Thread @note The cached copy shared among threads which means the cached copy is udpated for all threads if any thread updates it. */ - static ink_hrtime get_hrtime(); + static ts_hrtick get_hrtime(); /** Get the operating system high resolution time. @@ -168,21 +168,21 @@ class Thread @note This also updates the cached time. */ - static ink_hrtime get_hrtime_updated(); + static ts_hrtick get_hrtime_updated(); }; extern Thread *this_thread(); -TS_INLINE ink_hrtime +TS_INLINE ts_hrtick Thread::get_hrtime() { return cur_time; } -TS_INLINE ink_hrtime +TS_INLINE ts_hrtick Thread::get_hrtime_updated() { - return cur_time = ink_get_hrtime_internal(); + return cur_time = ts_hrtick::clock::now(); } #endif /*_I_Thread_h*/ diff --git a/iocore/eventsystem/PQ-List.cc b/iocore/eventsystem/PQ-List.cc index 5710bf61783..f2b223d7f16 100644 --- a/iocore/eventsystem/PQ-List.cc +++ b/iocore/eventsystem/PQ-List.cc @@ -22,18 +22,29 @@ */ #include "P_EventSystem.h" +// This makes a duration type with ticks the size of PQ_BUCKET_TIME(0). +// When a duration is assigned to an instance of this, @c count is the number of PQ_BUCKET_TIME(0) periods. +typedef std::chrono::duration, decltype(PQ_BUCKET_TIME(0))::period>> bucket_time; + +namespace { + // Get the number of PQ_BUCKET_TIME(0) periods since the epoch for time @a tick + inline uint32_t Get_Bucket_Tick(ts_hrtick tick) + { + return std::chrono::duration_cast(tick.time_since_epoch()).count(); + } +} PriorityEventQueue::PriorityEventQueue() { last_check_time = Thread::get_hrtime_updated(); - last_check_buckets = last_check_time / PQ_BUCKET_TIME(0); + last_check_buckets = Get_Bucket_Tick(last_check_time); } void -PriorityEventQueue::check_ready(ink_hrtime now, EThread *t) +PriorityEventQueue::check_ready(ts_hrtick now, EThread *t) { int i, j, k = 0; - uint32_t check_buckets = (uint32_t)(now / PQ_BUCKET_TIME(0)); + uint32_t check_buckets = Get_Bucket_Tick(now); uint32_t todo_buckets = check_buckets ^ last_check_buckets; last_check_time = now; last_check_buckets = check_buckets; @@ -52,7 +63,7 @@ PriorityEventQueue::check_ready(ink_hrtime now, EThread *t) e->cancelled = 0; EVENT_FREE(e, eventAllocator, t); } else { - ink_hrtime tt = e->timeout_at - now; + ts_nanoseconds tt = e->timeout_at - now; for (j = i; j > 0 && tt <= PQ_BUCKET_TIME(j - 1);) j--; e->in_heap = j; diff --git a/iocore/eventsystem/P_Freer.h b/iocore/eventsystem/P_Freer.h index b9d17eab4d7..650476204e4 100644 --- a/iocore/eventsystem/P_Freer.h +++ b/iocore/eventsystem/P_Freer.h @@ -49,7 +49,7 @@ template struct DeleterContinuation : public Continuation { template TS_INLINE void -new_Deleter(C *ap, ink_hrtime t) +new_Deleter(C *ap, ts_hrtick t) { eventProcessor.schedule_in(new DeleterContinuation(ap), t, ET_TASK); } @@ -71,7 +71,7 @@ template struct FreeCallContinuation : public Continuation { template TS_INLINE void -new_FreeCaller(C *ap, ink_hrtime t) +new_FreeCaller(C *ap, ts_nanoseconds t) { eventProcessor.schedule_in(new FreeCallContinuation(ap), t, ET_TASK); } @@ -96,7 +96,7 @@ struct FreerContinuation : public Continuation { }; TS_INLINE void -new_Freer(void *ap, ink_hrtime t) +new_Freer(void *ap, ts_nanoseconds t) { eventProcessor.schedule_in(new FreerContinuation(ap), t, ET_TASK); } @@ -121,7 +121,7 @@ template struct DereferContinuation : public Continuation { template TS_INLINE void -new_Derefer(C *ap, ink_hrtime t) +new_Derefer(C *ap, ts_hrtick t) { eventProcessor.schedule_in(new DereferContinuation(ap), t, ET_TASK); } diff --git a/iocore/eventsystem/P_UnixEThread.h b/iocore/eventsystem/P_UnixEThread.h index dd696c33290..3cc8ba690a2 100644 --- a/iocore/eventsystem/P_UnixEThread.h +++ b/iocore/eventsystem/P_UnixEThread.h @@ -34,13 +34,13 @@ #include "I_EThread.h" #include "I_EventProcessor.h" -const int DELAY_FOR_RETRY = HRTIME_MSECONDS(10); +static const ts_nanoseconds DELAY_FOR_RETRY(ts_milliseconds(10)); TS_INLINE Event * EThread::schedule_spawn(Continuation *cont) { Event *e = EVENT_ALLOC(eventAllocator, this); - return schedule(e->init(cont, 0, 0)); + return schedule(e->init(cont)); } TS_INLINE Event * @@ -49,7 +49,7 @@ EThread::schedule_imm(Continuation *cont, int callback_event, void *cookie) Event *e = ::eventAllocator.alloc(); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, 0, 0)); + return schedule(e->init(cont)); } TS_INLINE Event * @@ -58,29 +58,29 @@ EThread::schedule_imm_signal(Continuation *cont, int callback_event, void *cooki Event *e = ::eventAllocator.alloc(); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, 0, 0), true); + return schedule(e->init(cont), true); } TS_INLINE Event * -EThread::schedule_at(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_at(Continuation *cont, ts_hrtick t, int callback_event, void *cookie) { Event *e = ::eventAllocator.alloc(); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, t, 0)); + return schedule(e->init(cont, t)); } TS_INLINE Event * -EThread::schedule_in(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_in(Continuation *cont, ts_nanoseconds t, int callback_event, void *cookie) { Event *e = ::eventAllocator.alloc(); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, get_hrtime() + t, 0)); + return schedule(e->init(cont, get_hrtime() + t)); } TS_INLINE Event * -EThread::schedule_every(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_every(Continuation *cont, ts_nanoseconds t, int callback_event, void *cookie) { Event *e = ::eventAllocator.alloc(); e->callback_event = callback_event; @@ -108,29 +108,29 @@ EThread::schedule_imm_local(Continuation *cont, int callback_event, void *cookie Event *e = EVENT_ALLOC(eventAllocator, this); e->callback_event = callback_event; e->cookie = cookie; - return schedule_local(e->init(cont, 0, 0)); + return schedule_local(e->init(cont)); } TS_INLINE Event * -EThread::schedule_at_local(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_at_local(Continuation *cont, ts_hrtick t, int callback_event, void *cookie) { Event *e = EVENT_ALLOC(eventAllocator, this); e->callback_event = callback_event; e->cookie = cookie; - return schedule_local(e->init(cont, t, 0)); + return schedule_local(e->init(cont, t)); } TS_INLINE Event * -EThread::schedule_in_local(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_in_local(Continuation *cont, ts_nanoseconds t, int callback_event, void *cookie) { Event *e = EVENT_ALLOC(eventAllocator, this); e->callback_event = callback_event; e->cookie = cookie; - return schedule_local(e->init(cont, get_hrtime() + t, 0)); + return schedule_local(e->init(cont, get_hrtime() + t)); } TS_INLINE Event * -EThread::schedule_every_local(Continuation *cont, ink_hrtime t, int callback_event, void *cookie) +EThread::schedule_every_local(Continuation *cont, ts_nanoseconds t, int callback_event, void *cookie) { Event *e = EVENT_ALLOC(eventAllocator, this); e->callback_event = callback_event; diff --git a/iocore/eventsystem/P_UnixEvent.h b/iocore/eventsystem/P_UnixEvent.h index a79cb88ddbf..f1b29a3771c 100644 --- a/iocore/eventsystem/P_UnixEvent.h +++ b/iocore/eventsystem/P_UnixEvent.h @@ -25,12 +25,12 @@ #define _P_UnixEvent_h_ TS_INLINE Event * -Event::init(Continuation *c, ink_hrtime atimeout_at, ink_hrtime aperiod) +Event::init(Continuation *c, ts_hrtick atimeout_at, ts_nanoseconds aperiod) { continuation = c; timeout_at = atimeout_at; period = aperiod; - immediate = !period && !atimeout_at; + immediate = period != period.zero() && atimeout_at != TS_HRTICK_ZERO; cancelled = false; return this; } @@ -49,9 +49,7 @@ Event::Event() in_the_priority_queue(false), immediate(false), globally_allocated(true), - in_heap(false), - timeout_at(0), - period(0) + in_heap(false) { } diff --git a/iocore/eventsystem/P_UnixEventProcessor.h b/iocore/eventsystem/P_UnixEventProcessor.h index 5ee486761d7..316a25ac632 100644 --- a/iocore/eventsystem/P_UnixEventProcessor.h +++ b/iocore/eventsystem/P_UnixEventProcessor.h @@ -92,7 +92,7 @@ EventProcessor::schedule_imm_signal(Continuation *cont, EventType et, int callba #endif e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, 0, 0), et, true); + return schedule(e->init(cont), et, true); } TS_INLINE Event * @@ -106,43 +106,43 @@ EventProcessor::schedule_imm(Continuation *cont, EventType et, int callback_even #endif e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, 0, 0), et); + return schedule(e->init(cont), et); } TS_INLINE Event * -EventProcessor::schedule_at(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie) +EventProcessor::schedule_at(Continuation *cont, ts_hrtick t, EventType et, int callback_event, void *cookie) { Event *e = eventAllocator.alloc(); - ink_assert(t > 0); + ink_assert(t > TS_HRTICK_ZERO); ink_assert(et < MAX_EVENT_TYPES); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, t, 0), et); + return schedule(e->init(cont, t), et); } TS_INLINE Event * -EventProcessor::schedule_in(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie) +EventProcessor::schedule_in(Continuation *cont, ts_nanoseconds t, EventType et, int callback_event, void *cookie) { Event *e = eventAllocator.alloc(); ink_assert(et < MAX_EVENT_TYPES); e->callback_event = callback_event; e->cookie = cookie; - return schedule(e->init(cont, Thread::get_hrtime() + t, 0), et); + return schedule(e->init(cont, Thread::get_hrtime() + t), et); } TS_INLINE Event * -EventProcessor::schedule_every(Continuation *cont, ink_hrtime t, EventType et, int callback_event, void *cookie) +EventProcessor::schedule_every(Continuation *cont, ts_nanoseconds t, EventType et, int callback_event, void *cookie) { Event *e = eventAllocator.alloc(); - ink_assert(t != 0); + ink_assert(t != t.zero()); ink_assert(et < MAX_EVENT_TYPES); e->callback_event = callback_event; e->cookie = cookie; - if (t < 0) - return schedule(e->init(cont, t, t), et); + if (t < t.zero()) + return schedule(e->init(cont, TS_HRTICK_ZERO - t, t), et); else return schedule(e->init(cont, Thread::get_hrtime() + t, t), et); } diff --git a/iocore/eventsystem/ProtectedQueue.cc b/iocore/eventsystem/ProtectedQueue.cc index bd56e981eed..5773ae8fe8d 100644 --- a/iocore/eventsystem/ProtectedQueue.cc +++ b/iocore/eventsystem/ProtectedQueue.cc @@ -137,14 +137,14 @@ flush_signals(EThread *thr) } void -ProtectedQueue::dequeue_timed(ink_hrtime cur_time, ink_hrtime timeout, bool sleep) +ProtectedQueue::dequeue_timed(ts_hrtick cur_time, ts_hrtick timeout, bool sleep) { (void)cur_time; Event *e; if (sleep) { ink_mutex_acquire(&lock); if (INK_ATOMICLIST_EMPTY(al)) { - timespec ts = ink_hrtime_to_timespec(timeout); + timespec ts = ts_hrtick_to_timespec(timeout); ink_cond_timedwait(&might_have_data, &lock, &ts); } ink_mutex_release(&lock); diff --git a/iocore/eventsystem/Thread.cc b/iocore/eventsystem/Thread.cc index e8c8a4453bb..1e018e18995 100644 --- a/iocore/eventsystem/Thread.cc +++ b/iocore/eventsystem/Thread.cc @@ -37,7 +37,7 @@ static ink_thread_key init_thread_key(); -ink_hrtime Thread::cur_time = 0; +ts_hrtick Thread::cur_time; inkcoreapi ink_thread_key Thread::thread_data_key = init_thread_key(); Thread::Thread() diff --git a/iocore/eventsystem/UnixEThread.cc b/iocore/eventsystem/UnixEThread.cc index cfb12a13a11..a6b6a8ceeb5 100644 --- a/iocore/eventsystem/UnixEThread.cc +++ b/iocore/eventsystem/UnixEThread.cc @@ -36,13 +36,13 @@ struct AIOCallback; #define MAX_HEARTBEATS_MISSED 10 #define NO_HEARTBEAT -1 -#define THREAD_MAX_HEARTBEAT_MSECONDS 60 +static const ts_nanoseconds THREAD_MAX_HEARTBEAT(ts_milliseconds(60)); #define NO_ETHREAD_ID -1 bool shutdown_event_system = false; EThread::EThread() - : generator((uint64_t)Thread::get_hrtime_updated() ^ (uint64_t)(uintptr_t)this), + : generator(static_cast(Thread::get_hrtime_updated().time_since_epoch().count()) ^ reinterpret_cast(this)), ethreads_to_be_signalled(NULL), n_ethreads_to_be_signalled(0), main_accept_index(-1), @@ -55,7 +55,7 @@ EThread::EThread() } EThread::EThread(ThreadType att, int anid) - : generator((uint64_t)Thread::get_hrtime_updated() ^ (uint64_t)(uintptr_t)this), + : generator(static_cast(Thread::get_hrtime_updated().time_since_epoch().count()) ^ reinterpret_cast(this)), ethreads_to_be_signalled(NULL), n_ethreads_to_be_signalled(0), main_accept_index(-1), @@ -147,10 +147,10 @@ EThread::process_event(Event *e, int calling_code) ink_assert(!e->in_the_priority_queue); ink_assert(c_temp == e->continuation); MUTEX_RELEASE(lock); - if (e->period) { + if (e->period.count()) { if (!e->in_the_prot_queue && !e->in_the_priority_queue) { - if (e->period < 0) - e->timeout_at = e->period; + if (e->period.count() < 0) + e->timeout_at = TS_HRTICK_ZERO + e->period; else { this->get_hrtime_updated(); e->timeout_at = cur_time + e->period; @@ -182,7 +182,7 @@ EThread::execute() case REGULAR: { Event *e; Que(Event, link) NegativeQueue; - ink_hrtime next_time = 0; + ts_hrtick next_time; // give priority to immediate events for (;;) { @@ -195,10 +195,10 @@ EThread::execute() while ((e = EventQueueExternal.dequeue_local())) { if (e->cancelled) free_event(e); - else if (!e->timeout_at) { // IMMEDIATE - ink_assert(e->period == 0); + else if (e->timeout_at == TS_HRTICK_ZERO) { // IMMEDIATE + ink_assert(e->period.count() == 0); process_event(e, e->callback_event); - } else if (e->timeout_at > 0) // INTERVAL + } else if (e->timeout_at > TS_HRTICK_ZERO) // INTERVAL EventQueue.enqueue(e, cur_time); else { // NEGATIVE Event *p = NULL; @@ -220,7 +220,7 @@ EThread::execute() EventQueue.check_ready(cur_time, this); while ((e = EventQueue.dequeue_ready(cur_time))) { ink_assert(e); - ink_assert(e->timeout_at > 0); + ink_assert(e->timeout_at > TS_HRTICK_ZERO); if (e->cancelled) free_event(e); else { @@ -239,7 +239,7 @@ EThread::execute() if (!INK_ATOMICLIST_EMPTY(EventQueueExternal.al)) EventQueueExternal.dequeue_timed(cur_time, next_time, false); while ((e = EventQueueExternal.dequeue_local())) { - if (!e->timeout_at) + if (e->timeout_at == TS_HRTICK_ZERO) process_event(e, e->callback_event); else { if (e->cancelled) @@ -252,7 +252,7 @@ EThread::execute() // be executed in this round (because you can't have // more than one poll between two executions of a // negative event) - if (e->timeout_at < 0) { + if (e->timeout_at < TS_HRTICK_ZERO) { Event *p = NULL; Event *a = NegativeQueue.head; while (a && a->timeout_at > e->timeout_at) { @@ -275,10 +275,10 @@ EThread::execute() EventQueueExternal.dequeue_timed(cur_time, next_time, false); } else { // Means there are no negative events next_time = EventQueue.earliest_timeout(); - ink_hrtime sleep_time = next_time - cur_time; + ts_nanoseconds sleep_time = next_time - cur_time; - if (sleep_time > THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND) { - next_time = cur_time + THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND; + if (sleep_time > THREAD_MAX_HEARTBEAT) { + next_time = cur_time + THREAD_MAX_HEARTBEAT; } // dequeue all the external events and put them in a local // queue. If there are no external events available, do a diff --git a/iocore/eventsystem/UnixEvent.cc b/iocore/eventsystem/UnixEvent.cc index 8596f47120f..519b9e5e426 100644 --- a/iocore/eventsystem/UnixEvent.cc +++ b/iocore/eventsystem/UnixEvent.cc @@ -38,8 +38,8 @@ Event::schedule_imm(int acallback_event) ink_assert(ethread == this_ethread()); if (in_the_priority_queue) ethread->EventQueue.remove(this); - timeout_at = 0; - period = 0; + timeout_at = TS_HRTICK_ZERO; + period = ts_nanoseconds::zero(); immediate = true; mutex = continuation->mutex; if (!in_the_prot_queue) @@ -47,15 +47,15 @@ Event::schedule_imm(int acallback_event) } void -Event::schedule_at(ink_hrtime atimeout_at, int acallback_event) +Event::schedule_at(ts_hrtick atimeout_at, int acallback_event) { callback_event = acallback_event; ink_assert(ethread == this_ethread()); - ink_assert(atimeout_at > 0); + ink_assert(atimeout_at > TS_HRTICK_ZERO); if (in_the_priority_queue) ethread->EventQueue.remove(this); timeout_at = atimeout_at; - period = 0; + period = ts_nanoseconds::zero(); immediate = false; mutex = continuation->mutex; if (!in_the_prot_queue) @@ -63,14 +63,14 @@ Event::schedule_at(ink_hrtime atimeout_at, int acallback_event) } void -Event::schedule_in(ink_hrtime atimeout_in, int acallback_event) +Event::schedule_in(ts_nanoseconds atimeout_in, int acallback_event) { callback_event = acallback_event; ink_assert(ethread == this_ethread()); if (in_the_priority_queue) ethread->EventQueue.remove(this); timeout_at = Thread::get_hrtime() + atimeout_in; - period = 0; + period = ts_nanoseconds::zero(); immediate = false; mutex = continuation->mutex; if (!in_the_prot_queue) @@ -78,15 +78,15 @@ Event::schedule_in(ink_hrtime atimeout_in, int acallback_event) } void -Event::schedule_every(ink_hrtime aperiod, int acallback_event) +Event::schedule_every(ts_nanoseconds aperiod, int acallback_event) { callback_event = acallback_event; ink_assert(ethread == this_ethread()); - ink_assert(aperiod != 0); + ink_assert(aperiod != ts_nanoseconds::zero()); if (in_the_priority_queue) ethread->EventQueue.remove(this); - if (aperiod < 0) { - timeout_at = aperiod; + if (aperiod < ts_nanoseconds::zero()) { + timeout_at = TS_HRTICK_ZERO + aperiod; } else { timeout_at = Thread::get_hrtime() + aperiod; } diff --git a/iocore/eventsystem/UnixEventProcessor.cc b/iocore/eventsystem/UnixEventProcessor.cc index fc985ddafd9..32681183ec8 100644 --- a/iocore/eventsystem/UnixEventProcessor.cc +++ b/iocore/eventsystem/UnixEventProcessor.cc @@ -176,7 +176,7 @@ EventProcessor::spawn_thread(Continuation *cont, const char *thr_name, size_t st ink_release_assert(n_dthreads < MAX_EVENT_THREADS); Event *e = eventAllocator.alloc(); - e->init(cont, 0, 0); + e->init(cont); all_dthreads[n_dthreads] = new EThread(DEDICATED, e); e->ethread = all_dthreads[n_dthreads]; e->mutex = e->continuation->mutex = all_dthreads[n_dthreads]->mutex; diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc index 918d69ea756..453be4d674d 100644 --- a/iocore/hostdb/HostDB.cc +++ b/iocore/hostdb/HostDB.cc @@ -239,7 +239,7 @@ HostDBProcessor::cache() struct HostDBBackgroundTask : public Continuation { int frequency; - ink_hrtime start_time; + ts_hrtick start_time; virtual int sync_event(int event, void *edata) = 0; int wait_event(int event, void *edata); @@ -255,7 +255,7 @@ HostDBBackgroundTask::HostDBBackgroundTask(int frequency) : Continuation(new_Pro int HostDBBackgroundTask::wait_event(int, void *) { - ink_hrtime next_sync = HRTIME_SECONDS(this->frequency) - (Thread::get_hrtime() - start_time); + ts_hrtick next_sync = HRTIME_SECONDS(this->frequency) - (Thread::get_hrtime() - start_time); SET_HANDLER(&HostDBBackgroundTask::sync_event); if (next_sync > HRTIME_MSECONDS(100)) diff --git a/iocore/hostdb/P_RefCountCacheSerializer.h b/iocore/hostdb/P_RefCountCacheSerializer.h index f0bf624986a..e49ba84fe66 100644 --- a/iocore/hostdb/P_RefCountCacheSerializer.h +++ b/iocore/hostdb/P_RefCountCacheSerializer.h @@ -67,8 +67,8 @@ template class RefCountCacheSerializer : public Continuation std::string filename; std::string tmp_filename; - ink_hrtime time_per_partition; - ink_hrtime start; + ts_hrtick time_per_partition; + ts_hrtick start; int total_items; int64_t total_size; @@ -187,8 +187,8 @@ RefCountCacheSerializer::write_partition(int /* event */, Event *e) SET_HANDLER(&RefCountCacheSerializer::pause_event); // Figure out how much time we spent - ink_hrtime elapsed = Thread::get_hrtime() - this->start; - ink_hrtime expected_elapsed = (this->partition * this->time_per_partition); + ts_hrtick elapsed = Thread::get_hrtime() - this->start; + ts_hrtick expected_elapsed = (this->partition * this->time_per_partition); // If we were quicker than our pace-- lets reschedule in the future if (elapsed < expected_elapsed) { diff --git a/iocore/net/I_NetVConnection.h b/iocore/net/I_NetVConnection.h index 1434b184471..b75ad86298c 100644 --- a/iocore/net/I_NetVConnection.h +++ b/iocore/net/I_NetVConnection.h @@ -399,7 +399,7 @@ class NetVConnection : public VConnection interfaces. */ - virtual void set_active_timeout(ink_hrtime timeout_in) = 0; + virtual void set_active_timeout(ts_nanoseconds timeout_in) = 0; /** Sets time after which SM should be notified if the requested @@ -413,7 +413,7 @@ class NetVConnection : public VConnection is currently active. See section on timeout semantics above. */ - virtual void set_inactivity_timeout(ink_hrtime timeout_in) = 0; + virtual void set_inactivity_timeout(ts_nanoseconds timeout_in) = 0; /** Clears the active timeout. No active timeouts will be sent until @@ -452,10 +452,10 @@ class NetVConnection : public VConnection virtual bool add_to_active_queue() = 0; /** @return the current active_timeout value in nanosecs */ - virtual ink_hrtime get_active_timeout() = 0; + virtual ts_nanoseconds get_active_timeout() = 0; /** @return current inactivity_timeout value in nanosecs */ - virtual ink_hrtime get_inactivity_timeout() = 0; + virtual ts_nanoseconds get_inactivity_timeout() = 0; /** Force an @a event if a write operation empties the write buffer. diff --git a/iocore/net/I_UDPPacket.h b/iocore/net/I_UDPPacket.h index 7b0be5fa92d..56bc616fe4b 100644 --- a/iocore/net/I_UDPPacket.h +++ b/iocore/net/I_UDPPacket.h @@ -72,24 +72,24 @@ class UDPPacket @param to address of where to send packet - @param when ink_hrtime relative to ink_get_hrtime_internal() + @param when ts_hrtick relative to ink_get_hrtime_internal() @param buf if !NULL, then len bytes copied from buf and made into packet. @param len # of bytes to copy from buf */ -extern UDPPacket *new_UDPPacket(struct sockaddr const *to, ink_hrtime when = 0, char *buf = NULL, int len = 0); +extern UDPPacket *new_UDPPacket(struct sockaddr const *to, ts_hrtick when = TS_HRTICK_ZERO, char *buf = NULL, int len = 0); /** Create a new packet to be sent over UDPConnection. This clones and makes a reference to an existing IOBufferBlock chain. @param to address of where to send packet - @param when ink_hrtime relative to ink_get_hrtime_internal() + @param when ts_hrtick relative to ink_get_hrtime_internal() @param block if !NULL, then the IOBufferBlock chain of data to use for packet @param len # of bytes to reference from block */ -TS_INLINE UDPPacket *new_UDPPacket(struct sockaddr const *to, ink_hrtime when = 0, IOBufferBlock *block = NULL, int len = 0); +TS_INLINE UDPPacket *new_UDPPacket(struct sockaddr const *to, ts_hrtick when = TS_HRTICK_ZERO, IOBufferBlock *block = NULL, int len = 0); /** Create a new packet to be sent over UDPConnection. Packet has no destination or data. diff --git a/iocore/net/OCSPStapling.cc b/iocore/net/OCSPStapling.cc index bf5d413e467..ac4f313bcbd 100644 --- a/iocore/net/OCSPStapling.cc +++ b/iocore/net/OCSPStapling.cc @@ -251,13 +251,13 @@ stapling_check_response(certinfo *cinf, OCSP_RESPONSE *rsp) static OCSP_RESPONSE * query_responder(BIO *b, char *host, char *path, OCSP_REQUEST *req, int req_timeout) { - ink_hrtime start, end; + ts_hrtick start, end; OCSP_RESPONSE *resp = NULL; OCSP_REQ_CTX *ctx; int rv; start = Thread::get_hrtime(); - end = ink_hrtime_add(start, ink_hrtime_from_sec(req_timeout)); + end = ts_hrtick_add(start, ts_hrtick_from_sec(req_timeout)); ctx = OCSP_sendreq_new(b, path, NULL, -1); OCSP_REQ_CTX_add1_header(ctx, "Host", host); @@ -265,7 +265,7 @@ query_responder(BIO *b, char *host, char *path, OCSP_REQUEST *req, int req_timeo do { rv = OCSP_sendreq_nbio(&resp, ctx); - ink_hrtime_sleep(HRTIME_MSECONDS(1)); + ts_hrtick_sleep(HRTIME_MSECONDS(1)); } while ((rv == -1) && BIO_should_retry(b) && (Thread::get_hrtime() < end)); OCSP_REQ_CTX_free(ctx); diff --git a/iocore/net/P_NetAccept.h b/iocore/net/P_NetAccept.h index 0a20a13a0a3..ed5eef66458 100644 --- a/iocore/net/P_NetAccept.h +++ b/iocore/net/P_NetAccept.h @@ -80,7 +80,7 @@ struct NetAcceptAction : public Action, public RefCountObj { // Handles accepting connections. // struct NetAccept : public Continuation { - ink_hrtime period; + ts_hrtick period; Server server; AcceptFunctionPtr accept_fn; int ifd; diff --git a/iocore/net/P_SSLNetVConnection.h b/iocore/net/P_SSLNetVConnection.h index eaf6283d843..07ee065846d 100644 --- a/iocore/net/P_SSLNetVConnection.h +++ b/iocore/net/P_SSLNetVConnection.h @@ -253,8 +253,8 @@ class SSLNetVConnection : public UnixNetVConnection virtual int populate(Connection &con, Continuation *c, void *arg); SSL *ssl; - ink_hrtime sslHandshakeBeginTime; - ink_hrtime sslLastWriteTime; + ts_hrtick sslHandshakeBeginTime; + ts_hrtick sslLastWriteTime; int64_t sslTotalBytesSent; /// Set by asynchronous hooks to request a specific operation. diff --git a/iocore/net/P_UDPNet.h b/iocore/net/P_UDPNet.h index 60630b5313d..2070b8c0733 100644 --- a/iocore/net/P_UDPNet.h +++ b/iocore/net/P_UDPNet.h @@ -71,17 +71,17 @@ class PacketQueue virtual ~PacketQueue() {} int nPackets; - ink_hrtime lastPullLongTermQ; + ts_hrtick lastPullLongTermQ; Queue longTermQ; Queue bucket[N_SLOTS]; - ink_hrtime delivery_time[N_SLOTS]; + ts_hrtick delivery_time[N_SLOTS]; int now_slot; void init(void) { now_slot = 0; - ink_hrtime now = ink_get_hrtime_internal(); + ts_hrtick now = ink_get_hrtime_internal(); int i = now_slot; int j = 0; while (j < N_SLOTS) { @@ -92,7 +92,7 @@ class PacketQueue } void - addPacket(UDPPacketInternal *e, ink_hrtime now = 0) + addPacket(UDPPacketInternal *e, ts_hrtick now = 0) { int before = 0; int slot; @@ -109,7 +109,7 @@ class PacketQueue if (e->delivery_time < now) e->delivery_time = now; - ink_hrtime s = e->delivery_time - delivery_time[now_slot]; + ts_hrtick s = e->delivery_time - delivery_time[now_slot]; if (s < 0) { before = 1; @@ -136,7 +136,7 @@ class PacketQueue } UDPPacketInternal * - firstPacket(ink_hrtime t) + firstPacket(ts_hrtick t) { if (t > delivery_time[now_slot]) { return bucket[now_slot].head; @@ -190,12 +190,12 @@ class PacketQueue } void - advanceNow(ink_hrtime t) + advanceNow(ts_hrtick t) { int s = now_slot; int prev; - if (ink_hrtime_to_msec(t - lastPullLongTermQ) >= SLOT_TIME_MSEC * ((N_SLOTS - 1) / 2)) { + if (ts_hrtick_to_msec(t - lastPullLongTermQ) >= SLOT_TIME_MSEC * ((N_SLOTS - 1) / 2)) { Queue tempQ; UDPPacketInternal *p; // pull in all the stuff from long-term slot @@ -225,7 +225,7 @@ class PacketQueue if (s != now_slot) Debug("udpnet-service", "Advancing by (%d slots): behind by %" PRId64 " ms", s - now_slot, - ink_hrtime_to_msec(t - delivery_time[now_slot])); + ts_hrtick_to_msec(t - delivery_time[now_slot])); now_slot = s; } @@ -241,7 +241,7 @@ class PacketQueue public: UDPPacketInternal * - dequeue_ready(ink_hrtime t) + dequeue_ready(ts_hrtick t) { (void)t; UDPPacketInternal *e = bucket[now_slot].dequeue(); @@ -254,12 +254,12 @@ class PacketQueue } void - check_ready(ink_hrtime now) + check_ready(ts_hrtick now) { (void)now; } - ink_hrtime + ts_hrtick earliest_timeout() { int s = now_slot; @@ -282,8 +282,8 @@ class PacketQueue class UDPQueue { PacketQueue pipeInfo; - ink_hrtime last_report; - ink_hrtime last_service; + ts_hrtick last_report; + ts_hrtick last_service; int packets; int added; @@ -319,8 +319,8 @@ struct UDPNetHandler : public Continuation { // UDPBind InkAtomicList udpNewConnections; Event *trigger_event; - ink_hrtime nextCheck; - ink_hrtime lastCheck; + ts_hrtick nextCheck; + ts_hrtick lastCheck; int startNetEvent(int event, Event *data); int mainNetEvent(int event, Event *data); diff --git a/iocore/net/P_UDPPacket.h b/iocore/net/P_UDPPacket.h index f93f71491b5..72f389d2374 100644 --- a/iocore/net/P_UDPPacket.h +++ b/iocore/net/P_UDPPacket.h @@ -48,7 +48,7 @@ class UDPPacketInternal : public UDPPacket uint64_t pktLength; int reqGenerationNum; - ink_hrtime delivery_time; // when to deliver packet + ts_hrtick delivery_time; // when to deliver packet Ptr chain; Continuation *cont; // callback on error @@ -62,7 +62,7 @@ inkcoreapi extern ClassAllocator udpPacketAllocator; TS_INLINE UDPPacketInternal::UDPPacketInternal() - : pktLength(0), reqGenerationNum(0), delivery_time(0), cont(NULL), conn(NULL), in_the_priority_queue(0), in_heap(0) + : pktLength(0), reqGenerationNum(0), cont(NULL), conn(NULL), in_the_priority_queue(0), in_heap(0) { memset(&from, '\0', sizeof(from)); memset(&to, '\0', sizeof(to)); @@ -165,7 +165,7 @@ UDPPacket::getConnection(void) } TS_INLINE UDPPacket * -new_UDPPacket(struct sockaddr const *to, ink_hrtime when, char *buf, int len) +new_UDPPacket(struct sockaddr const *to, ts_hrtick when, char *buf, int len) { UDPPacketInternal *p = udpPacketAllocator.alloc(); @@ -186,7 +186,7 @@ new_UDPPacket(struct sockaddr const *to, ink_hrtime when, char *buf, int len) } TS_INLINE UDPPacket * -new_UDPPacket(struct sockaddr const *to, ink_hrtime when, IOBufferBlock *buf, int len) +new_UDPPacket(struct sockaddr const *to, ts_hrtick when, IOBufferBlock *buf, int len) { (void)len; UDPPacketInternal *p = udpPacketAllocator.alloc(); @@ -207,7 +207,7 @@ new_UDPPacket(struct sockaddr const *to, ink_hrtime when, IOBufferBlock *buf, in } TS_INLINE UDPPacket * -new_UDPPacket(struct sockaddr const *to, ink_hrtime when, Ptr buf) +new_UDPPacket(struct sockaddr const *to, ts_hrtick when, Ptr buf) { UDPPacketInternal *p = udpPacketAllocator.alloc(); @@ -221,7 +221,7 @@ new_UDPPacket(struct sockaddr const *to, ink_hrtime when, Ptr buf } TS_INLINE UDPPacket * -new_UDPPacket(ink_hrtime when, Ptr buf) +new_UDPPacket(ts_hrtick when, Ptr buf) { return new_UDPPacket(NULL, when, buf); } @@ -233,7 +233,7 @@ new_incoming_UDPPacket(struct sockaddr *from, char *buf, int len) p->in_the_priority_queue = 0; p->in_heap = 0; - p->delivery_time = 0; + p->delivery_time = TS_HRTICK_ZERO; ats_ip_copy(&p->from, from); IOBufferBlock *body = new_IOBufferBlock(); diff --git a/iocore/net/P_UnixNet.h b/iocore/net/P_UnixNet.h index 9eb71f0fd97..d6a3a8db593 100644 --- a/iocore/net/P_UnixNet.h +++ b/iocore/net/P_UnixNet.h @@ -120,13 +120,13 @@ class NetHandler; typedef int (NetHandler::*NetContHandler)(int, void *); typedef unsigned int uint32; -extern ink_hrtime last_throttle_warning; -extern ink_hrtime last_shedding_warning; -extern ink_hrtime emergency_throttle_time; +extern ts_hrtick last_throttle_warning; +extern ts_hrtick last_shedding_warning; +extern ts_hrtick emergency_throttle_time; extern int net_connections_throttle; extern int fds_throttle; extern int fds_limit; -extern ink_hrtime last_transient_accept_error; +extern ts_hrtick last_transient_accept_error; extern int http_accept_port_number; //#define INACTIVITY_TIMEOUT @@ -138,7 +138,7 @@ extern int http_accept_port_number; #define THROTTLE_FD_HEADROOM (128 + 64) // CACHE_DB_FDS + 64 -#define TRANSIENT_ACCEPT_ERROR_MESSAGE_EVERY HRTIME_HOURS(24) +static const ts_nanoseconds TRANSIENT_ACCEPT_ERROR_MESSAGE_EVERY(ts_hours(24)); // also the 'throttle connect headroom' #define THROTTLE_AT_ONCE 5 @@ -147,7 +147,7 @@ extern int http_accept_port_number; #define NET_THROTTLE_ACCEPT_HEADROOM 1.1 // 10% #define NET_THROTTLE_CONNECT_HEADROOM 1.0 // 0% -#define NET_THROTTLE_MESSAGE_EVERY HRTIME_MINUTES(10) +static const ts_nanoseconds NET_THROTTLE_MESSAGE_EVERY(ts_minutes(10)); #define PRINT_IP(x) ((uint8_t *)&(x))[0], ((uint8_t *)&(x))[1], ((uint8_t *)&(x))[2], ((uint8_t *)&(x))[3] @@ -214,7 +214,7 @@ class NetHandler : public Continuation NetHandler(); private: - void _close_vc(UnixNetVConnection *vc, ink_hrtime now, int &handle_event, int &closed, int &total_idle_time, + void _close_vc(UnixNetVConnection *vc, ts_hrtick now, int &handle_event, int &closed, int &total_idle_time, int &total_idle_count); }; @@ -257,7 +257,7 @@ net_connections_to_throttle(ThrottleType t) TS_INLINE void check_shedding_warning() { - ink_hrtime t = Thread::get_hrtime(); + ts_hrtick t = Thread::get_hrtime(); if (t - last_shedding_warning > NET_THROTTLE_MESSAGE_EVERY) { last_shedding_warning = t; RecSignalWarning(REC_SIGNAL_SYSTEM_ERROR, "number of connections reaching shedding limit"); @@ -265,13 +265,13 @@ check_shedding_warning() } TS_INLINE int -emergency_throttle(ink_hrtime now) +emergency_throttle(ts_hrtick now) { return emergency_throttle_time > now; } TS_INLINE int -check_net_throttle(ThrottleType t, ink_hrtime now) +check_net_throttle(ThrottleType t, ts_hrtick now) { int connections = net_connections_to_throttle(t); @@ -287,7 +287,7 @@ check_net_throttle(ThrottleType t, ink_hrtime now) TS_INLINE void check_throttle_warning() { - ink_hrtime t = Thread::get_hrtime(); + ts_hrtick t = Thread::get_hrtime(); if (t - last_throttle_warning > NET_THROTTLE_MESSAGE_EVERY) { last_throttle_warning = t; RecSignalWarning(REC_SIGNAL_SYSTEM_ERROR, "too many connections, throttling"); @@ -312,7 +312,7 @@ check_emergency_throttle(Connection &con) int emergency = fds_limit - EMERGENCY_THROTTLE; if (fd > emergency) { int over = fd - emergency; - emergency_throttle_time = Thread::get_hrtime() + (over * over) * HRTIME_SECOND; + emergency_throttle_time = Thread::get_hrtime() + ts_seconds(over * over); RecSignalWarning(REC_SIGNAL_SYSTEM_ERROR, "too many open file descriptors, emergency throttling"); int hyper_emergency = fds_limit - HYPER_EMERGENCY_THROTTLE; if (fd > hyper_emergency) @@ -383,8 +383,8 @@ accept_error_seriousness(int res) TS_INLINE void check_transient_accept_error(int res) { - ink_hrtime t = Thread::get_hrtime(); - if (!last_transient_accept_error || t - last_transient_accept_error > TRANSIENT_ACCEPT_ERROR_MESSAGE_EVERY) { + ts_hrtick t = Thread::get_hrtime(); + if (TS_HRTICK_ZERO != last_transient_accept_error || t - last_transient_accept_error > TRANSIENT_ACCEPT_ERROR_MESSAGE_EVERY) { last_transient_accept_error = t; Warning("accept thread received transient error: errno = %d", -res); #if defined(linux) @@ -409,8 +409,8 @@ read_disable(NetHandler *nh, UnixNetVConnection *vc) } #else if (!vc->write.enabled) { - vc->next_inactivity_timeout_at = 0; - Debug("socket", "read_disable updating inactivity_at %" PRId64 ", NetVC=%p", vc->next_inactivity_timeout_at, vc); + vc->next_inactivity_timeout_at = TS_HRTICK_ZERO; + Debug("socket", "read_disable updating inactivity_at %" PRId64 ", NetVC=%p", raw_ticks(vc->next_inactivity_timeout_at), vc); } #endif vc->read.enabled = 0; @@ -430,8 +430,8 @@ write_disable(NetHandler *nh, UnixNetVConnection *vc) } #else if (!vc->read.enabled) { - vc->next_inactivity_timeout_at = 0; - Debug("socket", "write_disable updating inactivity_at %" PRId64 ", NetVC=%p", vc->next_inactivity_timeout_at, vc); + vc->next_inactivity_timeout_at = TS_HRTICK_ZERO; + Debug("socket", "write_disable updating inactivity_at %" PRId64 ", NetVC=%p", raw_ticks(vc->next_inactivity_timeout_at), vc); } #endif vc->write.enabled = 0; diff --git a/iocore/net/P_UnixNetVConnection.h b/iocore/net/P_UnixNetVConnection.h index 6317076020c..d82aee598eb 100644 --- a/iocore/net/P_UnixNetVConnection.h +++ b/iocore/net/P_UnixNetVConnection.h @@ -145,8 +145,8 @@ class UnixNetVConnection : public NetVConnection // called when handing an event from this NetVConnection,// // or the NetVConnection creation callback. // //////////////////////////////////////////////////////////// - virtual void set_active_timeout(ink_hrtime timeout_in); - virtual void set_inactivity_timeout(ink_hrtime timeout_in); + virtual void set_active_timeout(ts_nanoseconds timeout_in); + virtual void set_inactivity_timeout(ts_nanoseconds timeout_in); virtual void cancel_active_timeout(); virtual void cancel_inactivity_timeout(); virtual void set_action(Continuation *c); @@ -240,14 +240,14 @@ class UnixNetVConnection : public NetVConnection LINK(UnixNetVConnection, keep_alive_queue_link); LINK(UnixNetVConnection, active_queue_link); - ink_hrtime inactivity_timeout_in; - ink_hrtime active_timeout_in; + ts_nanoseconds inactivity_timeout_in; + ts_nanoseconds active_timeout_in; #ifdef INACTIVITY_TIMEOUT Event *inactivity_timeout; Event *activity_timeout; #else - ink_hrtime next_inactivity_timeout_at; - ink_hrtime next_activity_timeout_at; + ts_hrtick next_inactivity_timeout_at; + ts_hrtick next_activity_timeout_at; #endif EventIO ep; @@ -268,7 +268,7 @@ class UnixNetVConnection : public NetVConnection Connection con; int recursion; - ink_hrtime submit_time; + ts_hrtick submit_time; OOB_callback *oob_ptr; bool from_accept_thread; @@ -289,8 +289,8 @@ class UnixNetVConnection : public NetVConnection virtual int populate(Connection &con, Continuation *c, void *arg); virtual void free(EThread *t); - virtual ink_hrtime get_inactivity_timeout(); - virtual ink_hrtime get_active_timeout(); + virtual ts_nanoseconds get_inactivity_timeout(); + virtual ts_nanoseconds get_active_timeout(); virtual void set_local_addr(); virtual void set_remote_addr(); @@ -336,22 +336,22 @@ UnixNetVConnection::set_local_addr() safe_getsockname(con.fd, &local_addr.sa, &local_sa_size); } -TS_INLINE ink_hrtime +TS_INLINE ts_nanoseconds UnixNetVConnection::get_active_timeout() { return active_timeout_in; } -TS_INLINE ink_hrtime +TS_INLINE ts_nanoseconds UnixNetVConnection::get_inactivity_timeout() { return inactivity_timeout_in; } TS_INLINE void -UnixNetVConnection::set_inactivity_timeout(ink_hrtime timeout_in) +UnixNetVConnection::set_inactivity_timeout(ts_hrtick timeout_in) { - Debug("socket", "Set inactive timeout=%" PRId64 ", for NetVC=%p", timeout_in, this); + Debug("socket", "Set inactive timeout=%" PRId64 ", for NetVC=%p", timeout_in.count(), this); inactivity_timeout_in = timeout_in; #ifdef INACTIVITY_TIMEOUT @@ -375,18 +375,18 @@ UnixNetVConnection::set_inactivity_timeout(ink_hrtime timeout_in) } else inactivity_timeout = 0; #else - if (timeout_in) { + if (timeout_in.count()) { next_inactivity_timeout_at = Thread::get_hrtime() + timeout_in; } else { - next_inactivity_timeout_at = 0; + next_inactivity_timeout_at = TS_HRTICK_ZERO; } #endif } TS_INLINE void -UnixNetVConnection::set_active_timeout(ink_hrtime timeout_in) +UnixNetVConnection::set_active_timeout(ts_nanoseconds timeout_in) { - Debug("socket", "Set active timeout=%" PRId64 ", NetVC=%p", timeout_in, this); + Debug("socket", "Set active timeout=%" PRId64 ", NetVC=%p", timeout_in.count(), this); active_timeout_in = timeout_in; #ifdef INACTIVITY_TIMEOUT if (active_timeout) @@ -417,7 +417,7 @@ TS_INLINE void UnixNetVConnection::cancel_inactivity_timeout() { Debug("socket", "Cancel inactive timeout for NetVC=%p", this); - inactivity_timeout_in = 0; + inactivity_timeout_in = ts_nanoseconds::zero(); #ifdef INACTIVITY_TIMEOUT if (inactivity_timeout) { Debug("socket", "Cancel inactive timeout for NetVC=%p", this); @@ -425,7 +425,7 @@ UnixNetVConnection::cancel_inactivity_timeout() inactivity_timeout = NULL; } #else - next_inactivity_timeout_at = 0; + next_inactivity_timeout_at = TS_HRTICK_ZERO; #endif } @@ -433,7 +433,7 @@ TS_INLINE void UnixNetVConnection::cancel_active_timeout() { Debug("socket", "Cancel active timeout for NetVC=%p", this); - active_timeout_in = 0; + active_timeout_in = ts_nanoseconds::zero(); #ifdef INACTIVITY_TIMEOUT if (active_timeout) { Debug("socket", "Cancel active timeout for NetVC=%p", this); @@ -441,7 +441,7 @@ UnixNetVConnection::cancel_active_timeout() active_timeout = NULL; } #else - next_activity_timeout_at = 0; + next_activity_timeout_at = TS_HRTICK_ZERO; #endif } diff --git a/iocore/net/SSLConfig.cc b/iocore/net/SSLConfig.cc index 13b6921ed41..37d8bffb7b2 100644 --- a/iocore/net/SSLConfig.cc +++ b/iocore/net/SSLConfig.cc @@ -378,7 +378,7 @@ SSLCertificateConfig::reconfigure() if (is_action_tag_set("test.multicert.delay")) { const int secs = 60; Debug("ssl", "delaying certificate reload by %dsecs", secs); - ink_hrtime_sleep(HRTIME_SECONDS(secs)); + ts_hrtick_sleep(HRTIME_SECONDS(secs)); } SSLParseCertificateConfiguration(params, lookup); diff --git a/iocore/net/SSLNetVConnection.cc b/iocore/net/SSLNetVConnection.cc index 5fa81b645d4..071263f3dd9 100644 --- a/iocore/net/SSLNetVConnection.cc +++ b/iocore/net/SSLNetVConnection.cc @@ -647,10 +647,10 @@ SSLNetVConnection::load_buffer_and_write(int64_t towrite, MIOBufferAccessor &buf ssl_error_t err = SSL_ERROR_NONE; // Dynamic TLS record sizing - ink_hrtime now = 0; + ts_hrtick now = 0; if (SSLConfigParams::ssl_maxrecord == -1) { now = Thread::get_hrtime_updated(); - int msec_since_last_write = ink_hrtime_diff_msec(now, sslLastWriteTime); + int msec_since_last_write = ts_hrtick_diff_msec(now, sslLastWriteTime); if (msec_since_last_write > SSL_DEF_TLS_RECORD_MSEC_THRESHOLD) { // reset sslTotalBytesSent upon inactivity for SSL_DEF_TLS_RECORD_MSEC_THRESHOLD @@ -1096,7 +1096,7 @@ SSLNetVConnection::sslServerHandShakeEvent(int &err) // do we want to include cert info in trace? if (sslHandshakeBeginTime) { - const ink_hrtime ssl_handshake_time = Thread::get_hrtime() - sslHandshakeBeginTime; + const ts_hrtick ssl_handshake_time = Thread::get_hrtime() - sslHandshakeBeginTime; Debug("ssl", "ssl handshake time:%" PRId64, ssl_handshake_time); sslHandshakeBeginTime = 0; SSL_INCREMENT_DYN_STAT_EX(ssl_total_handshake_time_stat, ssl_handshake_time); diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc index 79a350381ff..6c14e455dcb 100644 --- a/iocore/net/UnixNet.cc +++ b/iocore/net/UnixNet.cc @@ -23,13 +23,13 @@ #include "P_Net.h" -ink_hrtime last_throttle_warning; -ink_hrtime last_shedding_warning; -ink_hrtime emergency_throttle_time; +ts_hrtick last_throttle_warning; +ts_hrtick last_shedding_warning; +ts_hrtick emergency_throttle_time; int net_connections_throttle; int fds_throttle; int fds_limit = 8000; -ink_hrtime last_transient_accept_error; +ts_hrtick last_transient_accept_error; extern "C" void fd_reify(struct ev_loop *); @@ -42,11 +42,13 @@ int update_cop_config(const char *name, RecDataT data_type, RecData data, void * class InactivityCop : public Continuation { public: - explicit InactivityCop(Ptr &m) : Continuation(m.get()), default_inactivity_timeout(0) + explicit InactivityCop(Ptr &m) : Continuation(m.get()) { + int config_value; SET_HANDLER(&InactivityCop::check_inactivity); - REC_ReadConfigInteger(default_inactivity_timeout, "proxy.config.net.default_inactivity_timeout"); - Debug("inactivity_cop", "default inactivity timeout is set to: %d", default_inactivity_timeout); + REC_ReadConfigInteger(config_value, "proxy.config.net.default_inactivity_timeout"); + default_inactivity_timeout = ts_seconds(config_value); + Debug("inactivity_cop", "default inactivity timeout is set to: %" PRId64, std::chrono::duration_cast(default_inactivity_timeout).count()); RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", update_cop_config, (void *)this); } @@ -55,7 +57,7 @@ class InactivityCop : public Continuation check_inactivity(int event, Event *e) { (void)event; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); NetHandler &nh = *get_NetHandler(this_ethread()); Debug("inactivity_cop_check", "Checking inactivity on Thread-ID #%d", this_ethread()->id); @@ -80,25 +82,31 @@ class InactivityCop : public Continuation } // set a default inactivity timeout if one is not set - if (vc->next_inactivity_timeout_at == 0 && default_inactivity_timeout > 0) { - Debug("inactivity_cop", "vc: %p inactivity timeout not set, setting a default of %d", vc, default_inactivity_timeout); - vc->set_inactivity_timeout(HRTIME_SECONDS(default_inactivity_timeout)); + if (vc->next_inactivity_timeout_at == TS_HRTICK_ZERO && default_inactivity_timeout.count() > 0) { + Debug("inactivity_cop", "vc: %p inactivity timeout not set, setting a default of %" PRId64, vc, + std::chrono::duration_cast(default_inactivity_timeout).count()); + vc->set_inactivity_timeout(default_inactivity_timeout); NET_INCREMENT_DYN_STAT(default_inactivity_timeout_stat); } else { Debug("inactivity_cop_verbose", "vc: %p now: %" PRId64 " timeout at: %" PRId64 " timeout in: %" PRId64, vc, - ink_hrtime_to_sec(now), ink_hrtime_to_sec(vc->next_inactivity_timeout_at), - ink_hrtime_to_sec(vc->inactivity_timeout_in)); + std::chrono::duration_cast(now.time_since_epoch()).count(), + std::chrono::duration_cast(vc->next_inactivity_timeout_at.time_since_epoch()).count(), + std::chrono::duration_cast(vc->inactivity_timeout_in).count() + ); } - if (vc->next_inactivity_timeout_at && vc->next_inactivity_timeout_at < now) { + if (vc->next_inactivity_timeout_at != TS_HRTICK_ZERO && vc->next_inactivity_timeout_at < now) { if (nh.keep_alive_queue.in(vc)) { // only stat if the connection is in keep-alive, there can be other inactivity timeouts - ink_hrtime diff = (now - (vc->next_inactivity_timeout_at - vc->inactivity_timeout_in)) / HRTIME_SECOND; - NET_SUM_DYN_STAT(keep_alive_queue_timeout_total_stat, diff); + auto diff = std::chrono::duration_cast(now - (vc->next_inactivity_timeout_at - vc->inactivity_timeout_in)); + NET_SUM_DYN_STAT(keep_alive_queue_timeout_total_stat, diff.count()); NET_INCREMENT_DYN_STAT(keep_alive_queue_timeout_count_stat); } Debug("inactivity_cop_verbose", "vc: %p now: %" PRId64 " timeout at: %" PRId64 " timeout in: %" PRId64, vc, - ink_hrtime_to_sec(now), vc->next_inactivity_timeout_at, vc->inactivity_timeout_in); + std::chrono::duration_cast(now.time_since_epoch()).count(), + std::chrono::duration_cast(vc->next_inactivity_timeout_at.time_since_epoch()).count(), + std::chrono::duration_cast(vc->inactivity_timeout_in).count() + ); vc->handleEvent(EVENT_IMMEDIATE, e); } } @@ -111,13 +119,13 @@ class InactivityCop : public Continuation } void - set_default_timeout(const int x) + set_default_timeout(ts_nanoseconds x) { default_inactivity_timeout = x; } private: - int default_inactivity_timeout; // only used when one is not set for some bad reason + ts_nanoseconds default_inactivity_timeout; // only used when one is not set for some bad reason }; int @@ -129,7 +137,7 @@ update_cop_config(const char *name, RecDataT data_type ATS_UNUSED, RecData data, if (cop != NULL) { if (strcmp(name, "proxy.config.net.default_inactivity_timeout") == 0) { Debug("inactivity_cop_dynamic", "proxy.config.net.default_inactivity_timeout updated to %" PRId64, data.rec_int); - cop->set_default_timeout(data.rec_int); + cop->set_default_timeout(ts_seconds(data.rec_int)); } } @@ -274,7 +282,7 @@ initialize_thread_for_net(EThread *thread) int cop_freq = 1; REC_ReadConfigInteger(cop_freq, "proxy.config.net.inactivity_check_frequency"); - thread->schedule_every(inactivityCop, HRTIME_SECONDS(cop_freq)); + thread->schedule_every(inactivityCop, ts_seconds(cop_freq)); #endif thread->signal_hook = net_signal_hook_function; @@ -376,7 +384,7 @@ NetHandler::startNetEvent(int event, Event *e) (void)event; SET_HANDLER((NetContHandler)&NetHandler::mainNetEvent); - e->schedule_every(-HRTIME_MSECONDS(net_event_period)); + e->schedule_every(ts_milliseconds(-net_event_period)); trigger_event = e; return EVENT_CONT; } @@ -590,7 +598,7 @@ NetHandler::manage_active_queue(bool ignore_queue_size = false) return true; } - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); // loop over the non-active connections and try to close them UnixNetVConnection *vc = active_queue.head; @@ -601,8 +609,8 @@ NetHandler::manage_active_queue(bool ignore_queue_size = false) int total_idle_count = 0; for (; vc != NULL; vc = vc_next) { vc_next = vc->active_queue_link.next; - if ((vc->inactivity_timeout_in && vc->next_inactivity_timeout_at <= now) || - (vc->active_timeout_in && vc->next_activity_timeout_at <= now)) { + if ((vc->inactivity_timeout_in.count() && vc->next_inactivity_timeout_at <= now) || + (vc->active_timeout_in.count() && vc->next_activity_timeout_at <= now)) { _close_vc(vc, now, handle_event, closed, total_idle_time, total_idle_count); } if (ignore_queue_size == false && max_connections_active_per_thread_in > active_queue_size) { @@ -634,7 +642,7 @@ void NetHandler::manage_keep_alive_queue() { uint32_t total_connections_in = active_queue_size + keep_alive_queue_size; - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); Debug("net_queue", "max_connections_per_thread_in: %d total_connections_in: %d active_queue_size: %d keep_alive_queue_size: %d", max_connections_per_thread_in, total_connections_in, active_queue_size, keep_alive_queue_size); @@ -667,7 +675,7 @@ NetHandler::manage_keep_alive_queue() } void -NetHandler::_close_vc(UnixNetVConnection *vc, ink_hrtime now, int &handle_event, int &closed, int &total_idle_time, +NetHandler::_close_vc(UnixNetVConnection *vc, ts_hrtick now, int &handle_event, int &closed, int &total_idle_time, int &total_idle_count) { if (vc->thread != this_ethread()) { @@ -677,7 +685,7 @@ NetHandler::_close_vc(UnixNetVConnection *vc, ink_hrtime now, int &handle_event, if (!lock.is_locked()) { return; } - ink_hrtime diff = (now - (vc->next_inactivity_timeout_at - vc->inactivity_timeout_in)) / HRTIME_SECOND; + auto diff = std::chrono::duration_cast(now - (vc->next_inactivity_timeout_at - vc->inactivity_timeout_in)).count(); if (diff > 0) { total_idle_time += diff; ++total_idle_count; @@ -685,8 +693,11 @@ NetHandler::_close_vc(UnixNetVConnection *vc, ink_hrtime now, int &handle_event, NET_INCREMENT_DYN_STAT(keep_alive_queue_timeout_count_stat); } Debug("net_queue", "closing connection NetVC=%p idle: %u now: %" PRId64 " at: %" PRId64 " in: %" PRId64 " diff: %" PRId64, vc, - keep_alive_queue_size, ink_hrtime_to_sec(now), ink_hrtime_to_sec(vc->next_inactivity_timeout_at), - ink_hrtime_to_sec(vc->inactivity_timeout_in), diff); + keep_alive_queue_size, + std::chrono::duration_cast(now.time_since_epoch()).count(), + std::chrono::duration_cast(vc->next_inactivity_timeout_at.time_since_epoch()).count(), + std::chrono::duration_cast(vc->inactivity_timeout_in).count(), + diff); if (vc->closed) { close_UnixNetVConnection(vc, this_ethread()); ++closed; diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc index 068b594ad88..9fedef0c4bd 100644 --- a/iocore/net/UnixNetAccept.cc +++ b/iocore/net/UnixNetAccept.cc @@ -250,7 +250,7 @@ NetAccept::do_blocking_accept(EThread *t) // do-while for accepting all the connections // added by YTS Team, yamsat do { - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); // Throttle accepts diff --git a/iocore/net/UnixNetPages.cc b/iocore/net/UnixNetPages.cc index 782ebac3dd4..6f114ba4f5f 100644 --- a/iocore/net/UnixNetPages.cc +++ b/iocore/net/UnixNetPages.cc @@ -60,7 +60,7 @@ struct ShowNet : public ShowCont { return EVENT_DONE; } - ink_hrtime now = Thread::get_hrtime(); + ts_hrtick now = Thread::get_hrtime(); forl_LL(UnixNetVConnection, vc, nh->open_list) { // uint16_t port = ats_ip_port_host_order(&addr.sa); diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc index 8dfd3e975d1..ee746d0fd87 100644 --- a/iocore/net/UnixNetProcessor.cc +++ b/iocore/net/UnixNetProcessor.cc @@ -283,7 +283,7 @@ struct CheckConnect : public Continuation { IOBufferReader *reader; int connect_status; int recursion; - ink_hrtime timeout; + ts_hrtick timeout; int handle_connect(int event, Event *e) diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc index e36e5786289..6b4a702970b 100644 --- a/iocore/net/UnixNetVConnection.cc +++ b/iocore/net/UnixNetVConnection.cc @@ -70,7 +70,7 @@ write_reschedule(NetHandler *nh, UnixNetVConnection *vc) void net_activity(UnixNetVConnection *vc, EThread *thread) { - Debug("socket", "net_activity updating inactivity %" PRId64 ", NetVC=%p", vc->inactivity_timeout_in, vc); + Debug("socket", "net_activity updating inactivity %" PRId64 ", NetVC=%p", vc->inactivity_timeout_in.count(), vc); (void)thread; #ifdef INACTIVITY_TIMEOUT if (vc->inactivity_timeout && vc->inactivity_timeout_in && vc->inactivity_timeout->ethread == thread) @@ -84,10 +84,10 @@ net_activity(UnixNetVConnection *vc, EThread *thread) vc->inactivity_timeout = 0; } #else - if (vc->inactivity_timeout_in) + if (vc->inactivity_timeout_in.count()) vc->next_inactivity_timeout_at = Thread::get_hrtime() + vc->inactivity_timeout_in; else - vc->next_inactivity_timeout_at = 0; + vc->next_inactivity_timeout_at = TS_HRTICK_ZERO; #endif } @@ -117,12 +117,12 @@ close_UnixNetVConnection(UnixNetVConnection *vc, EThread *t) vc->active_timeout = NULL; } #else - vc->next_inactivity_timeout_at = 0; - vc->next_activity_timeout_at = 0; + vc->next_inactivity_timeout_at = TS_HRTICK_ZERO; + vc->next_activity_timeout_at = TS_HRTICK_ZERO; #endif - vc->inactivity_timeout_in = 0; + vc->inactivity_timeout_in = ts_nanoseconds::zero(); - vc->active_timeout_in = 0; + vc->active_timeout_in = ts_nanoseconds::zero(); if (nh) { nh->open_list.remove(vc); nh->cop_list.remove(vc); @@ -774,7 +774,7 @@ UnixNetVConnection::send_OOB(Continuation *cont, char *buf, int len) } if (written > 0 && written < len) { u->oob_ptr = new OOB_callback(mutex, this, cont, buf + written, len - written); - u->oob_ptr->trigger = mutex->thread_holding->schedule_in_local(u->oob_ptr, HRTIME_MSECONDS(10)); + u->oob_ptr->trigger = mutex->thread_holding->schedule_in_local(u->oob_ptr, ts_milliseconds(10)); return u->oob_ptr->trigger; } else { // should be a rare case : taking a new continuation should not be @@ -782,7 +782,7 @@ UnixNetVConnection::send_OOB(Continuation *cont, char *buf, int len) written = -errno; ink_assert(written == -EAGAIN || written == -ENOTCONN); u->oob_ptr = new OOB_callback(mutex, this, cont, buf, len); - u->oob_ptr->trigger = mutex->thread_holding->schedule_in_local(u->oob_ptr, HRTIME_MSECONDS(10)); + u->oob_ptr->trigger = mutex->thread_holding->schedule_in_local(u->oob_ptr, ts_milliseconds(10)); return u->oob_ptr->trigger; } } @@ -884,20 +884,14 @@ UnixNetVConnection::reenable_re(VIO *vio) UnixNetVConnection::UnixNetVConnection() : closed(0), - inactivity_timeout_in(0), - active_timeout_in(0), #ifdef INACTIVITY_TIMEOUT inactivity_timeout(NULL), active_timeout(NULL), -#else - next_inactivity_timeout_at(0), - next_activity_timeout_at(0), #endif nh(NULL), id(0), flags(0), recursion(0), - submit_time(0), oob_ptr(0), from_accept_thread(false), origin_trace(false), @@ -925,7 +919,7 @@ UnixNetVConnection::set_enabled(VIO *vio) inactivity_timeout = thread->schedule_in(this, inactivity_timeout_in); } #else - if (!next_inactivity_timeout_at && inactivity_timeout_in) + if (next_inactivity_timeout_at != TS_HRTICK_ZERO && inactivity_timeout_in.count()) next_inactivity_timeout_at = Thread::get_hrtime() + inactivity_timeout_in; #endif } @@ -1062,7 +1056,7 @@ UnixNetVConnection::startEvent(int /* event ATS_UNUSED */, Event *e) { MUTEX_TRY_LOCK(lock, get_NetHandler(e->ethread)->mutex, e->ethread); if (!lock.is_locked()) { - e->schedule_in(HRTIME_MSECONDS(net_retry_delay)); + e->schedule_in(ts_milliseconds(net_retry_delay)); return EVENT_CONT; } if (!action_.cancelled) @@ -1080,10 +1074,10 @@ UnixNetVConnection::acceptEvent(int event, Event *e) MUTEX_TRY_LOCK(lock, get_NetHandler(thread)->mutex, e->ethread); if (!lock.is_locked()) { if (event == EVENT_NONE) { - thread->schedule_in(this, HRTIME_MSECONDS(net_retry_delay)); + thread->schedule_in(this, ts_milliseconds(net_retry_delay)); return EVENT_DONE; } else { - e->schedule_in(HRTIME_MSECONDS(net_retry_delay)); + e->schedule_in(ts_milliseconds(net_retry_delay)); return EVENT_CONT; } } @@ -1112,11 +1106,11 @@ UnixNetVConnection::acceptEvent(int event, Event *e) nh->read_ready_list.enqueue(this); #endif - if (inactivity_timeout_in) { + if (inactivity_timeout_in.count()) { UnixNetVConnection::set_inactivity_timeout(inactivity_timeout_in); } - if (active_timeout_in) { + if (active_timeout_in.count()) { UnixNetVConnection::set_active_timeout(active_timeout_in); } @@ -1144,7 +1138,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) (write.vio.mutex && wlock.get_mutex() != write.vio.mutex.get())) { #ifdef INACTIVITY_TIMEOUT if (e == active_timeout) - e->schedule_in(HRTIME_MSECONDS(net_retry_delay)); + e->schedule_in(ts_milliseconds(net_retry_delay)); #endif return EVENT_CONT; } @@ -1157,7 +1151,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) Event **signal_timeout; Continuation *reader_cont = NULL; Continuation *writer_cont = NULL; - ink_hrtime *signal_timeout_at = NULL; + ts_hrtick *signal_timeout_at = NULL; Event *t = NULL; signal_timeout = &t; @@ -1175,7 +1169,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) /* BZ 49408 */ // ink_assert(inactivity_timeout_in); // ink_assert(next_inactivity_timeout_at < Thread::get_hrtime()); - if (!inactivity_timeout_in || next_inactivity_timeout_at > Thread::get_hrtime()) + if (!inactivity_timeout_in.count() || next_inactivity_timeout_at > Thread::get_hrtime()) return EVENT_CONT; signal_event = VC_EVENT_INACTIVITY_TIMEOUT; signal_timeout_at = &next_inactivity_timeout_at; @@ -1186,7 +1180,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) #endif *signal_timeout = 0; - *signal_timeout_at = 0; + *signal_timeout_at = TS_HRTICK_ZERO; writer_cont = write.vio._cont; if (closed) { @@ -1200,7 +1194,7 @@ UnixNetVConnection::mainEvent(int event, Event *e) return EVENT_DONE; } - if (!*signal_timeout && !*signal_timeout_at && !closed && write.vio.op == VIO::WRITE && !(f.shutdown & NET_VC_SHUTDOWN_WRITE) && + if (!*signal_timeout && TS_HRTICK_ZERO != *signal_timeout_at && !closed && write.vio.op == VIO::WRITE && !(f.shutdown & NET_VC_SHUTDOWN_WRITE) && reader_cont != write.vio._cont && writer_cont == write.vio._cont) if (write_signal_and_update(signal_event, this) == EVENT_DONE) return EVENT_DONE; @@ -1311,8 +1305,8 @@ UnixNetVConnection::connectUp(EThread *t, int fd) nh = get_NetHandler(t); nh->open_list.enqueue(this); - ink_assert(!inactivity_timeout_in); - ink_assert(!active_timeout_in); + ink_assert(!inactivity_timeout_in.count()); + ink_assert(!active_timeout_in.count()); this->set_local_addr(); action_.continuation->handleEvent(NET_EVENT_OPEN, this); return CONNECT_SUCCESS; diff --git a/iocore/net/UnixUDPNet.cc b/iocore/net/UnixUDPNet.cc index 809000ad2ac..16f94bbb74a 100644 --- a/iocore/net/UnixUDPNet.cc +++ b/iocore/net/UnixUDPNet.cc @@ -207,9 +207,9 @@ class UDPReadContinuation : public Continuation socklen_t *fromaddrlen; int fd; // fd we are reading from int ifd; // poll fd index - ink_hrtime period; // polling period - ink_hrtime elapsed_time; - ink_hrtime timeout_interval; + ts_hrtick period; // polling period + ts_hrtick elapsed_time; + ts_hrtick timeout_interval; }; ClassAllocator udpReadContAllocator("udpReadContAllocator"); @@ -642,11 +642,11 @@ void UDPQueue::service(UDPNetHandler *nh) { (void)nh; - ink_hrtime now = Thread::get_hrtime_updated(); + ts_hrtick now = Thread::get_hrtime_updated(); uint64_t timeSpent = 0; uint64_t pktSendStartTime; UDPPacketInternal *p; - ink_hrtime pktSendTime; + ts_hrtick pktSendTime; p = (UDPPacketInternal *)ink_atomiclist_popall(&atomicQueue); if (p) { @@ -683,7 +683,7 @@ UDPQueue::service(UDPNetHandler *nh) pipeInfo.advanceNow(now); SendPackets(); - timeSpent = ink_hrtime_to_msec(now - last_report); + timeSpent = ts_hrtick_to_msec(now - last_report); if (timeSpent > 10000) { last_report = now; added = 0; @@ -696,9 +696,9 @@ void UDPQueue::SendPackets() { UDPPacketInternal *p; - static ink_hrtime lastCleanupTime = Thread::get_hrtime_updated(); - ink_hrtime now = Thread::get_hrtime_updated(); - ink_hrtime send_threshold_time = now + SLOT_TIME; + static ts_hrtick lastCleanupTime = Thread::get_hrtime_updated(); + ts_hrtick now = Thread::get_hrtime_updated(); + ts_hrtick send_threshold_time = now + SLOT_TIME; int32_t bytesThisSlot = INT_MAX, bytesUsed = 0; int32_t bytesThisPipe, sentOne; int64_t pktLen; @@ -740,7 +740,7 @@ UDPQueue::SendPackets() goto sendPackets; } - if ((g_udp_periodicFreeCancelledPkts) && (now - lastCleanupTime > ink_hrtime_from_sec(g_udp_periodicFreeCancelledPkts))) { + if ((g_udp_periodicFreeCancelledPkts) && (now - lastCleanupTime > ts_hrtick_from_sec(g_udp_periodicFreeCancelledPkts))) { pipeInfo.FreeCancelledPackets(g_udp_periodicCleanupSlots); lastCleanupTime = now; } @@ -857,7 +857,7 @@ UDPNetHandler::mainNetEvent(int event, Event *e) } // end for // remove dead UDP connections - ink_hrtime now = Thread::get_hrtime_updated(); + ts_hrtick now = Thread::get_hrtime_updated(); if (now >= nextCheck) { for (uc = udp_polling.head; uc; uc = next) { ink_assert(uc->mutex && uc->continuation); diff --git a/lib/ts/Diags.cc b/lib/ts/Diags.cc index 7bf37bdf16c..5f710463db4 100644 --- a/lib/ts/Diags.cc +++ b/lib/ts/Diags.cc @@ -278,8 +278,9 @@ Diags::print_va(const char *debug_tag, DiagsLevel diags_level, const SourceLocat // prepend timestamp into the timestamped version of the buffer // ////////////////////////////////////////////////////////////////// - tp = ink_gettimeofday(); - time_t cur_clock = (time_t)tp.tv_sec; + // tp = ink_gettimeofday(); + time_t cur_clock = ts_get_current_time_t(); + // time_t cur_clock = (time_t)tp.tv_sec; buffer = ink_ctime_r(&cur_clock, timestamp_buf); snprintf(&(timestamp_buf[19]), (sizeof(timestamp_buf) - 20), ".%03d", (int)(tp.tv_usec / 1000)); diff --git a/lib/ts/EventNotify.cc b/lib/ts/EventNotify.cc index 1ba666a041b..d0c82ea139f 100644 --- a/lib/ts/EventNotify.cc +++ b/lib/ts/EventNotify.cc @@ -134,7 +134,7 @@ int EventNotify::timedwait(int timeout) // milliseconds #else ink_timestruc abstime; - abstime = ink_hrtime_to_timespec(ink_get_hrtime_internal() + HRTIME_SECONDS(timeout)); + abstime = ts_hrtick_to_timespec(ink_get_hrtime_internal() + HRTIME_SECONDS(timeout)); return ink_cond_timedwait(&m_cond, &m_mutex, &abstime); #endif } diff --git a/lib/ts/ink_hrtime.cc b/lib/ts/ink_hrtime.cc index b56730f193f..e7dfc3a5005 100644 --- a/lib/ts/ink_hrtime.cc +++ b/lib/ts/ink_hrtime.cc @@ -23,7 +23,7 @@ /************************************************************************** - ink_hrtime.cc + ts_hrtick.cc This file contains code supporting the Inktomi high-resolution timer. **************************************************************************/ @@ -166,57 +166,3 @@ squid_timestamp_to_buf(char *buf, unsigned int buf_size, long timestamp_sec, lon return res; } -#ifdef USE_TIME_STAMP_COUNTER_HRTIME -uint32_t -init_hrtime_TCS() -{ - int freqlen = sizeof(hrtime_freq); - if (sysctlbyname("machdep.tsc_freq", &hrtime_freq, (size_t *)&freqlen, NULL, 0) < 0) { - perror("sysctl: machdep.tsc_freq"); - exit(1); - } - hrtime_freq_float = (double)1000000000 / (double)hrtime_freq; - return hrtime_freq; -} - -double hrtime_freq_float = 0.5; // 500 Mhz -uint32_t hrtime_freq = init_hrtime_TCS(); -#endif - -#ifdef NEED_HRTIME_BASIS -timespec timespec_basis; -ink_hrtime hrtime_offset; -ink_hrtime hrtime_basis = init_hrtime_basis(); - -ink_hrtime -init_hrtime_basis() -{ - ink_hrtime t1, t2, b, now; - timespec ts; -#ifdef USE_TIME_STAMP_COUNTER_HRTIME - init_hrtime_TCS(); -#endif - do { - t1 = ink_get_hrtime_internal(); -#if HAVE_CLOCK_GETTIME - ink_assert(!clock_gettime(CLOCK_REALTIME, ×pec_basis)); -#else - { - struct timeval tnow; - ink_assert(!gettimeofday(&tnow, NULL)); - timespec_basis.tv_sec = tnow.tv_sec; - timespec_basis.tv_nsec = tnow.tv_usec * 1000; - } -#endif - t2 = ink_get_hrtime_internal(); - // accuracy must be at least 100 microseconds - } while (t2 - t1 > HRTIME_USECONDS(100)); - b = (t2 + t1) / 2; - now = ink_hrtime_from_timespec(×pec_basis); - ts = ink_hrtime_to_timespec(now); - ink_assert(ts.tv_sec == timespec_basis.tv_sec && ts.tv_nsec == timespec_basis.tv_nsec); - hrtime_offset = now - b; - hrtime_basis = b; - return b; -} -#endif diff --git a/lib/ts/ink_hrtime.h b/lib/ts/ink_hrtime.h index 968ddbb0861..3d2cefa7b22 100644 --- a/lib/ts/ink_hrtime.h +++ b/lib/ts/ink_hrtime.h @@ -28,15 +28,53 @@ This file contains code supporting the Inktomi high-resolution timer. **************************************************************************/ -#if !defined(_ink_hrtime_h_) -#define _ink_hrtime_h_ +#if !defined(_ts_hrtick_h_) +#define _ts_hrtick_h_ #include "ts/ink_config.h" #include "ts/ink_assert.h" #include #include +#include #include -typedef int64_t ink_hrtime; + +/** Instaneous time in high resolution. + + @internal @c system_clock or @c high_resolution_clock ? AFAICT the latter is just an alias for + @c system_clock or @c monotonic_clock and I have had bad experiences with the flakiness of + @c monotonic_clock ( or @c CLOCK_MONOTONIC for @c clock_gettime ). The key advantage of going direct + to @c system_clock is not worrying about the epoch when converting to absolute time for system calls. + Testing on Fedora 23 indicates that uses @c system_clock for @c high_resolution_clock and I suspect + that will be the case for any OS recent enough to be supported for ATS 7.0. + + @internal Also this is probably the same as @c std::chrono::system_clock::time_point but better + to be explicit about the time metric. + **/ +typedef std::chrono::time_point ts_hrtick; +/// Durations of specific units. +/// @internal Note durations have no association with any clock. +typedef std::chrono::nanoseconds ts_nanoseconds; ///< Duration in nanoseconds. +typedef std::chrono::microseconds ts_microseconds; ///< During in micro seconds. +typedef std::chrono::milliseconds ts_milliseconds; ///< Duration in milliseconds. +typedef std::chrono::seconds ts_seconds; ///< Duration in seconds. +typedef std::chrono::minutes ts_minutes; ///< Duration in minutes. +typedef std::chrono::hours ts_hours; ///< Duration in hours. + +/// Value to use for the equivalent of '0'. +/// @internal Default constructor iniitializes to zero. +static const ts_hrtick TS_HRTICK_ZERO; + +/// Extraction of the raw integer in a clock time. +static inline ts_hrtick::rep raw_ticks(ts_hrtick t) { return t.time_since_epoch().count(); } +/// Extraction of the raw count in a duration. +/// This is really a convenience since it's idential to calling @c count on the duration, +/// but this lets @c raw ticks work on a time point or a duration. +template < typename I, typename R > + static inline I raw_ticks(std::chrono::duration d) { return d.count(); } + +//#include +//typedef ts::NumericType ts_hrtick; +//typedef int64_t ts_hrtick; int squid_timestamp_to_buf(char *buf, unsigned int buf_size, long timestamp_sec, long timestamp_usec); char *int64_to_str(char *buf, unsigned int buf_size, int64_t val, unsigned int *total_chars, unsigned int req_width = 0, @@ -44,235 +82,76 @@ char *int64_to_str(char *buf, unsigned int buf_size, int64_t val, unsigned int * ////////////////////////////////////////////////////////////////////////////// // -// Factors to multiply units by to obtain coresponding ink_hrtime values. -// -////////////////////////////////////////////////////////////////////////////// - -#define HRTIME_FOREVER (10 * HRTIME_DECADE) -#define HRTIME_DECADE (10 * HRTIME_YEAR) -#define HRTIME_YEAR (365 * HRTIME_DAY + HRTIME_DAY / 4) -#define HRTIME_WEEK (7 * HRTIME_DAY) -#define HRTIME_DAY (24 * HRTIME_HOUR) -#define HRTIME_HOUR (60 * HRTIME_MINUTE) -#define HRTIME_MINUTE (60 * HRTIME_SECOND) -#define HRTIME_SECOND (1000 * HRTIME_MSECOND) -#define HRTIME_MSECOND (1000 * HRTIME_USECOND) -#define HRTIME_USECOND (1000 * HRTIME_NSECOND) -#define HRTIME_NSECOND (static_cast(1)) - -#define HRTIME_APPROX_SECONDS(_x) ((_x) >> 30) // off by 7.3% -#define HRTIME_APPROX_FACTOR (((float)(1 << 30)) / (((float)HRTIME_SECOND))) - -////////////////////////////////////////////////////////////////////////////// -// -// Map from units to ink_hrtime values -// -////////////////////////////////////////////////////////////////////////////// - -// simple macros - -#define HRTIME_YEARS(_x) ((_x)*HRTIME_YEAR) -#define HRTIME_WEEKS(_x) ((_x)*HRTIME_WEEK) -#define HRTIME_DAYS(_x) ((_x)*HRTIME_DAY) -#define HRTIME_HOURS(_x) ((_x)*HRTIME_HOUR) -#define HRTIME_MINUTES(_x) ((_x)*HRTIME_MINUTE) -#define HRTIME_SECONDS(_x) ((_x)*HRTIME_SECOND) -#define HRTIME_MSECONDS(_x) ((_x)*HRTIME_MSECOND) -#define HRTIME_USECONDS(_x) ((_x)*HRTIME_USECOND) -#define HRTIME_NSECONDS(_x) ((_x)*HRTIME_NSECOND) - -// gratuituous wrappers - -static inline ink_hrtime -ink_hrtime_from_years(unsigned int years) -{ - return (HRTIME_YEARS(years)); -} - -static inline ink_hrtime -ink_hrtime_from_weeks(unsigned int weeks) -{ - return (HRTIME_WEEKS(weeks)); -} - -static inline ink_hrtime -ink_hrtime_from_days(unsigned int days) -{ - return (HRTIME_DAYS(days)); -} - -static inline ink_hrtime -ink_hrtime_from_mins(unsigned int mins) -{ - return (HRTIME_MINUTES(mins)); -} - -static inline ink_hrtime -ink_hrtime_from_sec(unsigned int sec) -{ - return (HRTIME_SECONDS(sec)); -} - -static inline ink_hrtime -ink_hrtime_from_msec(unsigned int msec) -{ - return (HRTIME_MSECONDS(msec)); -} - -static inline ink_hrtime -ink_hrtime_from_usec(unsigned int usec) -{ - return (HRTIME_USECONDS(usec)); -} - -static inline ink_hrtime -ink_hrtime_from_nsec(unsigned int nsec) -{ - return (HRTIME_NSECONDS(nsec)); -} - -static inline ink_hrtime -ink_hrtime_from_timespec(const struct timespec *ts) -{ - return ink_hrtime_from_sec(ts->tv_sec) + ink_hrtime_from_nsec(ts->tv_nsec); -} - -static inline ink_hrtime -ink_hrtime_from_timeval(const struct timeval *tv) -{ - return ink_hrtime_from_sec(tv->tv_sec) + ink_hrtime_from_usec(tv->tv_usec); -} - -////////////////////////////////////////////////////////////////////////////// -// -// Map from ink_hrtime values to other units +// Map from units to ts_hrtick values // ////////////////////////////////////////////////////////////////////////////// -static inline ink_hrtime -ink_hrtime_to_years(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_YEAR)); -} - -static inline ink_hrtime -ink_hrtime_to_weeks(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_WEEK)); -} - -static inline ink_hrtime -ink_hrtime_to_days(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_DAY)); -} - -static inline ink_hrtime -ink_hrtime_to_mins(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_MINUTE)); -} - -static inline ink_hrtime -ink_hrtime_to_sec(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_SECOND)); -} - -static inline ink_hrtime -ink_hrtime_to_msec(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_MSECOND)); -} - -static inline ink_hrtime -ink_hrtime_to_usec(ink_hrtime t) -{ - return ((ink_hrtime)(t / HRTIME_USECOND)); -} - -static inline ink_hrtime -ink_hrtime_to_nsec(ink_hrtime t) +inline struct timespec +ts_hrtick_to_timespec(ts_hrtick t) { - return ((ink_hrtime)(t / HRTIME_NSECOND)); + struct timespec zret; + auto ct_s = std::chrono::time_point_cast(t); + + zret.tv_sec = ct_s.time_since_epoch().count(); + zret.tv_nsec = (t - std::chrono::time_point_cast(ct_s)).count(); + return zret; } +# if 0 static inline struct timespec -ink_hrtime_to_timespec(ink_hrtime t) +ts_hrtick_to_timespec(ts_hrtick t) { struct timespec ts; - ts.tv_sec = ink_hrtime_to_sec(t); + ts.tv_sec = ts_hrtick_to_sec(t); ts.tv_nsec = t % HRTIME_SECOND; return (ts); } static inline struct timeval -ink_hrtime_to_timeval(ink_hrtime t) +ts_hrtick_to_timeval(ts_hrtick t) { int64_t usecs; struct timeval tv; - usecs = ink_hrtime_to_usec(t); + usecs = ts_hrtick_to_usec(t); tv.tv_sec = usecs / 1000000; tv.tv_usec = usecs % 1000000; return (tv); } +# endif + +# if 0 /* using Jan 1 1970 as the base year, instead of Jan 1 1601, which translates to (365 + 0.25)369*24*60*60 seconds */ -#define NT_TIMEBASE_DIFFERENCE_100NSECS 116444736000000000i64 +//#define NT_TIMEBASE_DIFFERENCE_100NSECS 116444736000000000i64 -static inline ink_hrtime +static inline ts_hrtick ink_get_hrtime_internal() { #if defined(freebsd) || HAVE_CLOCK_GETTIME timespec ts; clock_gettime(CLOCK_REALTIME, &ts); - return ink_hrtime_from_timespec(&ts); + return ts_hrtick_from_timespec(&ts); #else timeval tv; gettimeofday(&tv, NULL); - return ink_hrtime_from_timeval(&tv); + return ts_hrtick_from_timeval(&tv); #endif } static inline struct timeval ink_gettimeofday() { - return ink_hrtime_to_timeval(ink_get_hrtime_internal()); -} - -static inline int -ink_time() -{ - return (int)ink_hrtime_to_sec(ink_get_hrtime_internal()); -} - -static inline int -ink_hrtime_diff_msec(ink_hrtime t1, ink_hrtime t2) -{ - return (int)ink_hrtime_to_msec(t1 - t2); -} - -static inline ink_hrtime -ink_hrtime_diff(ink_hrtime t1, ink_hrtime t2) -{ - return (t1 - t2); -} - -static inline ink_hrtime -ink_hrtime_add(ink_hrtime t1, ink_hrtime t2) -{ - return (t1 + t2); + return ts_hrtick_to_timeval(ink_get_hrtime_internal()); } +# endif -static inline void -ink_hrtime_sleep(ink_hrtime delay) +static inline std::time_t ts_get_current_time_t() { - struct timespec ts = ink_hrtime_to_timespec(delay); - nanosleep(&ts, NULL); + return std::chrono::system_clock::to_time_t(std::chrono::system_clock::now()); } -#endif /* _ink_hrtime_h_ */ +#endif /* _ts_hrtick_h_ */ diff --git a/plugins/experimental/memcache/tsmemcache.cc b/plugins/experimental/memcache/tsmemcache.cc index 9268b0255fe..d2f57e7aa83 100644 --- a/plugins/experimental/memcache/tsmemcache.cc +++ b/plugins/experimental/memcache/tsmemcache.cc @@ -40,7 +40,7 @@ static time_t base_day_time; // These should be persistent. volatile int32_t MC::verbosity = 0; -volatile ink_hrtime MC::last_flush = 0; +volatile ts_hrtick MC::last_flush = 0; volatile int64_t MC::next_cas = 1; static void @@ -441,9 +441,9 @@ MC::cache_read_event(int event, void *data) goto Lfail; } { - ink_hrtime t = Thread::get_hrtime(); - if (((ink_hrtime)rcache_header->settime) <= last_flush || - t >= ((ink_hrtime)rcache_header->settime) + HRTIME_SECONDS(rcache_header->exptime)) { + ts_hrtick t = Thread::get_hrtime(); + if (((ts_hrtick)rcache_header->settime) <= last_flush || + t >= ((ts_hrtick)rcache_header->settime) + HRTIME_SECONDS(rcache_header->exptime)) { goto Lfail; } } @@ -774,9 +774,9 @@ MC::ascii_set_event(int event, void *data) if (header.nkey != wcache_header->nkey || hlen < (int)(sizeof(MCCacheHeader) + wcache_header->nkey)) { goto Lfail; } - ink_hrtime t = Thread::get_hrtime(); - if (((ink_hrtime)wcache_header->settime) <= last_flush || - t >= ((ink_hrtime)wcache_header->settime) + HRTIME_SECONDS(wcache_header->exptime)) { + ts_hrtick t = Thread::get_hrtime(); + if (((ts_hrtick)wcache_header->settime) <= last_flush || + t >= ((ts_hrtick)wcache_header->settime) + HRTIME_SECONDS(wcache_header->exptime)) { goto Lstale; } if (f.set_add) { @@ -792,7 +792,7 @@ MC::ascii_set_event(int event, void *data) header.settime = Thread::get_hrtime(); if (exptime) { if (exptime > REALTIME_MAXDELTA) { - if (HRTIME_SECONDS(exptime) <= ((ink_hrtime)header.settime)) { + if (HRTIME_SECONDS(exptime) <= ((ts_hrtick)header.settime)) { header.exptime = 0; } else { header.exptime = (int32_t)(exptime - (header.settime / HRTIME_SECOND)); @@ -934,9 +934,9 @@ MC::ascii_incr_decr_event(int event, void *data) if (header.nkey != wcache_header->nkey || hlen < (int)(sizeof(MCCacheHeader) + wcache_header->nkey)) { goto Lfail; } - ink_hrtime t = Thread::get_hrtime(); - if (((ink_hrtime)wcache_header->settime) <= last_flush || - t >= ((ink_hrtime)wcache_header->settime) + HRTIME_SECONDS(wcache_header->exptime)) { + ts_hrtick t = Thread::get_hrtime(); + if (((ts_hrtick)wcache_header->settime) <= last_flush || + t >= ((ts_hrtick)wcache_header->settime) + HRTIME_SECONDS(wcache_header->exptime)) { goto Lfail; } } else { @@ -946,7 +946,7 @@ MC::ascii_incr_decr_event(int event, void *data) header.settime = Thread::get_hrtime(); if (exptime) { if (exptime > REALTIME_MAXDELTA) { - if (HRTIME_SECONDS(exptime) <= ((ink_hrtime)header.settime)) { + if (HRTIME_SECONDS(exptime) <= ((ts_hrtick)header.settime)) { header.exptime = 0; } else { header.exptime = (int32_t)(exptime - (header.settime / HRTIME_SECOND)); @@ -1382,7 +1382,7 @@ MC::read_ascii_from_client_event(int event, void *data) GET_NUM(time_offset); } f.noreply = is_noreply(&s, e); - ink_hrtime new_last_flush = Thread::get_hrtime() + HRTIME_SECONDS(time_offset); + ts_hrtick new_last_flush = Thread::get_hrtime() + HRTIME_SECONDS(time_offset); #if __WORDSIZE == 64 last_flush = new_last_flush; // this will be atomic for native word size #else diff --git a/plugins/experimental/memcache/tsmemcache.h b/plugins/experimental/memcache/tsmemcache.h index c86c6f9e73e..9e8ff0a548e 100644 --- a/plugins/experimental/memcache/tsmemcache.h +++ b/plugins/experimental/memcache/tsmemcache.h @@ -153,7 +153,7 @@ struct MC : Continuation { uint64_t delta; static volatile int32_t verbosity; - static volatile ink_hrtime last_flush; + static volatile ts_hrtick last_flush; static volatile int64_t next_cas; int write_to_client(int64_t ntowrite = -1); diff --git a/proxy/ICP.cc b/proxy/ICP.cc index 4080a177c1c..88445a6dae2 100644 --- a/proxy/ICP.cc +++ b/proxy/ICP.cc @@ -881,7 +881,7 @@ ICPPeerReadCont::PeerReadStateMachine(PeerReadData *s, Event *e) ink_assert(s->_ICPReqCont); Ptr ICPReqContMutex(s->_ICPReqCont->mutex); EThread *ethread = this_ethread(); - ink_hrtime request_start_time; + ts_hrtick request_start_time; if (!MUTEX_TAKE_TRY_LOCK(ICPReqContMutex, ethread)) { ICP_INCREMENT_DYN_STAT(icp_response_request_nolock_stat); diff --git a/proxy/ICP.h b/proxy/ICP.h index a7fe97862b9..ede9db65145 100644 --- a/proxy/ICP.h +++ b/proxy/ICP.h @@ -622,8 +622,8 @@ class Peer : public RefCountObj // Peer Statistics //------------------- struct PeerStats { - ink_hrtime last_send; - ink_hrtime last_receive; + ts_hrtick last_send; + ts_hrtick last_receive; int sent[ICP_OP_LAST + 1]; int recv[ICP_OP_LAST + 1]; int total_sent; @@ -1129,7 +1129,7 @@ class ICPPeerReadCont : public Continuation ~PeerReadData(); void reset(int full_reset = 0); - ink_hrtime _start_time; + ts_hrtick _start_time; ICPPeerReadCont *_mycont; Ptr _peer; PeerReadState_t _next_state; @@ -1171,7 +1171,7 @@ class ICPPeerReadCont : public Continuation // Class data ICPProcessor *_ICPpr; PeerReadData *_state; - ink_hrtime _start_time; + ts_hrtick _start_time; int _recursion_depth; }; @@ -1192,7 +1192,7 @@ class ICPRequestCont : public Continuation { _start_time = Thread::get_hrtime(); } - inline ink_hrtime + inline ts_hrtick GetRequestStartTime() { return _start_time; @@ -1267,7 +1267,7 @@ class ICPRequestCont : public Continuation class Action _act; // Internal working data - ink_hrtime _start_time; + ts_hrtick _start_time; ICPProcessor *_ICPpr; Event *_timeout; diff --git a/proxy/ICPConfig.cc b/proxy/ICPConfig.cc index 6052489ae72..8684dbaea2f 100644 --- a/proxy/ICPConfig.cc +++ b/proxy/ICPConfig.cc @@ -1305,7 +1305,7 @@ ICPPeriodicCont::DoReconfigAction(int event, Event *e) // Basic accessor object used by the new logging subsystem // for squid access log data for ICP queries. //---------------------------------------------------------------- -ink_hrtime +ts_hrtick ICPlog::GetElapsedTime() { return (Thread::get_hrtime() - _s->_start_time); diff --git a/proxy/ICPlog.h b/proxy/ICPlog.h index cb744491c9f..8a515e189de 100644 --- a/proxy/ICPlog.h +++ b/proxy/ICPlog.h @@ -43,7 +43,7 @@ class ICPlog public: inline ICPlog(ICPPeerReadCont::PeerReadData *s) { _s = s; } ~ICPlog() {} - ink_hrtime GetElapsedTime(); + ts_hrtick GetElapsedTime(); sockaddr const *GetClientIP(); in_port_t GetClientPort(); SquidLogCode GetAction(); diff --git a/proxy/InkAPI.cc b/proxy/InkAPI.cc index aac44fc1aca..6bdcc1d792f 100644 --- a/proxy/InkAPI.cc +++ b/proxy/InkAPI.cc @@ -1736,7 +1736,7 @@ TSdrandom() return this_ethread()->generator.drandom(); } -ink_hrtime +ts_hrtick TShrtime() { return Thread::get_hrtime(); @@ -4341,7 +4341,7 @@ TSContDataGet(TSCont contp) } TSAction -TSContSchedule(TSCont contp, ink_hrtime timeout, TSThreadPool tp) +TSContSchedule(TSCont contp, ts_hrtick timeout, TSThreadPool tp) { sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS); @@ -4396,7 +4396,7 @@ TSContSchedule(TSCont contp, ink_hrtime timeout, TSThreadPool tp) } TSAction -TSContScheduleEvery(TSCont contp, ink_hrtime every, TSThreadPool tp) +TSContScheduleEvery(TSCont contp, ts_hrtick every, TSThreadPool tp) { sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS); @@ -4432,7 +4432,7 @@ TSContScheduleEvery(TSCont contp, ink_hrtime every, TSThreadPool tp) } TSAction -TSHttpSchedule(TSCont contp, TSHttpTxn txnp, ink_hrtime timeout) +TSHttpSchedule(TSCont contp, TSHttpTxn txnp, ts_hrtick timeout) { sdk_assert(sdk_sanity_check_iocore_structure(contp) == TS_SUCCESS); @@ -6099,7 +6099,7 @@ TSHttpTxnPushedRespBodyBytesGet(TSHttpTxn txnp) // Get a particular milestone hrtime'r. Note that this can return 0, which means it has not // been set yet. TSReturnCode -TSHttpTxnMilestoneGet(TSHttpTxn txnp, TSMilestonesType milestone, ink_hrtime *time) +TSHttpTxnMilestoneGet(TSHttpTxn txnp, TSMilestonesType milestone, ts_hrtick *time) { sdk_assert(sdk_sanity_check_txn(txnp) == TS_SUCCESS); sdk_assert(sdk_sanity_check_null_ptr(time) == TS_SUCCESS); diff --git a/proxy/InkIOCoreAPI.cc b/proxy/InkIOCoreAPI.cc index 5ecfa246d9d..54f59ad4d94 100644 --- a/proxy/InkIOCoreAPI.cc +++ b/proxy/InkIOCoreAPI.cc @@ -351,7 +351,7 @@ TSVIOMutexGet(TSVIO viop) /* High Resolution Time */ -ink_hrtime +ts_hrtick INKBasedTimeGet() { return Thread::get_hrtime(); diff --git a/proxy/Milestones.h b/proxy/Milestones.h index 0472aff9512..d96252b7275 100644 --- a/proxy/Milestones.h +++ b/proxy/Milestones.h @@ -38,21 +38,21 @@ class TransactionMilestones { public: TransactionMilestones() { ink_zero(milestones); } - ink_hrtime &operator[](TSMilestonesType ms) { return milestones[ms]; } - ink_hrtime operator[](TSMilestonesType ms) const { return milestones[ms]; } + ts_hrtick &operator[](TSMilestonesType ms) { return milestones[ms]; } + ts_hrtick operator[](TSMilestonesType ms) const { return milestones[ms]; } /** * Takes two milestones and returns the difference. * @param start The start time * @param end The end time * @return The difference time in milliseconds */ - int64_t + ts_milliseconds::rep difference_msec(TSMilestonesType ms_start, TSMilestonesType ms_end) const { - if (milestones[ms_end] == 0) { + if (milestones[ms_end] == TS_HRTICK_ZERO) { return -1; } - return ink_hrtime_to_msec(milestones[ms_end] - milestones[ms_start]); + return std::chrono::duration_cast(milestones[ms_end] - milestones[ms_start]).count(); } /** @@ -64,17 +64,17 @@ class TransactionMilestones double difference_sec(TSMilestonesType ms_start, TSMilestonesType ms_end) const { - return (double)difference_msec(ms_start, ms_end) / 1000.0; + return milestones[ms_end] == TS_HRTICK_ZERO ? -1 : std::chrono::duration>(milestones[ms_end] - milestones[ms_start]).count(); } - ink_hrtime + ts_nanoseconds elapsed(TSMilestonesType ms_start, TSMilestonesType ms_end) const { return milestones[ms_end] - milestones[ms_start]; } private: - ink_hrtime milestones[TS_MILESTONE_LAST_ENTRY]; + ts_hrtick milestones[TS_MILESTONE_LAST_ENTRY]; }; #endif /* _Milestones_h_ */ diff --git a/proxy/PluginVC.cc b/proxy/PluginVC.cc index 5658efa348e..801498783cb 100644 --- a/proxy/PluginVC.cc +++ b/proxy/PluginVC.cc @@ -783,14 +783,14 @@ PluginVC::update_inactive_time() } } -// void PluginVC::setup_event_cb(ink_hrtime in) +// void PluginVC::setup_event_cb(ts_hrtick in) // // Setup up the event processor to call us back. // We've got two different event pointers to handle // locking issues // void -PluginVC::setup_event_cb(ink_hrtime in, Event **e_ptr) +PluginVC::setup_event_cb(ts_hrtick in, Event **e_ptr) { ink_assert(magic == PLUGIN_VC_MAGIC_ALIVE); @@ -814,7 +814,7 @@ PluginVC::setup_event_cb(ink_hrtime in, Event **e_ptr) } void -PluginVC::set_active_timeout(ink_hrtime timeout_in) +PluginVC::set_active_timeout(ts_hrtick timeout_in) { active_timeout = timeout_in; @@ -832,7 +832,7 @@ PluginVC::set_active_timeout(ink_hrtime timeout_in) } void -PluginVC::set_inactivity_timeout(ink_hrtime timeout_in) +PluginVC::set_inactivity_timeout(ts_hrtick timeout_in) { inactive_timeout = timeout_in; if (inactive_timeout != 0) { @@ -861,13 +861,13 @@ PluginVC::cancel_inactivity_timeout() set_inactivity_timeout(0); } -ink_hrtime +ts_hrtick PluginVC::get_active_timeout() { return active_timeout; } -ink_hrtime +ts_hrtick PluginVC::get_inactivity_timeout() { return inactive_timeout; diff --git a/proxy/PluginVC.h b/proxy/PluginVC.h index daa9c0337a3..0447d2edca5 100644 --- a/proxy/PluginVC.h +++ b/proxy/PluginVC.h @@ -89,15 +89,15 @@ class PluginVC : public NetVConnection, public PluginIdentity virtual void reenable_re(VIO *vio); // Timeouts - virtual void set_active_timeout(ink_hrtime timeout_in); - virtual void set_inactivity_timeout(ink_hrtime timeout_in); + virtual void set_active_timeout(ts_hrtick timeout_in); + virtual void set_inactivity_timeout(ts_hrtick timeout_in); virtual void cancel_active_timeout(); virtual void cancel_inactivity_timeout(); virtual void add_to_keep_alive_queue(); virtual void remove_from_keep_alive_queue(); virtual bool add_to_active_queue(); - virtual ink_hrtime get_active_timeout(); - virtual ink_hrtime get_inactivity_timeout(); + virtual ts_hrtick get_active_timeout(); + virtual ts_hrtick get_inactivity_timeout(); // Pure virutal functions we need to compile virtual SOCKET get_socket(); @@ -153,7 +153,7 @@ class PluginVC : public NetVConnection, public PluginIdentity void process_close(); void process_timeout(Event **e, int event_to_send); - void setup_event_cb(ink_hrtime in, Event **e_ptr); + void setup_event_cb(ts_hrtick in, Event **e_ptr); void update_inactive_time(); int64_t transfer_bytes(MIOBuffer *transfer_to, IOBufferReader *transfer_from, int64_t act_on); @@ -177,11 +177,11 @@ class PluginVC : public NetVConnection, public PluginIdentity bool deletable; int reentrancy_count; - ink_hrtime active_timeout; + ts_hrtick active_timeout; Event *active_event; - ink_hrtime inactive_timeout; - ink_hrtime inactive_timeout_at; + ts_hrtick inactive_timeout; + ts_hrtick inactive_timeout_at; Event *inactive_event; char const *plugin_tag; diff --git a/proxy/ProxyClientSession.h b/proxy/ProxyClientSession.h index e235f71bdb9..bb4fd93e1fe 100644 --- a/proxy/ProxyClientSession.h +++ b/proxy/ProxyClientSession.h @@ -161,15 +161,15 @@ class ProxyClientSession : public VConnection return api_hookid; } - ink_hrtime ssn_start_time; - ink_hrtime ssn_last_txn_time; + ts_hrtick ssn_start_time; + ts_hrtick ssn_last_txn_time; virtual void - set_active_timeout(ink_hrtime timeout_in) + set_active_timeout(ts_hrtick timeout_in) { } virtual void - set_inactivity_timeout(ink_hrtime timeout_in) + set_inactivity_timeout(ts_hrtick timeout_in) { } virtual void diff --git a/proxy/ProxyClientTransaction.h b/proxy/ProxyClientTransaction.h index 1f2a717e66f..3e0f9317a02 100644 --- a/proxy/ProxyClientTransaction.h +++ b/proxy/ProxyClientTransaction.h @@ -43,8 +43,8 @@ class ProxyClientTransaction : public VConnection return (parent) ? parent->get_netvc() : NULL; } - virtual void set_active_timeout(ink_hrtime timeout_in) = 0; - virtual void set_inactivity_timeout(ink_hrtime timeout_in) = 0; + virtual void set_active_timeout(ts_hrtick timeout_in) = 0; + virtual void set_inactivity_timeout(ts_hrtick timeout_in) = 0; virtual void cancel_inactivity_timeout() = 0; virtual void attach_server_session(HttpServerSession *ssession, bool transaction_done = true); diff --git a/proxy/RegressionSM.cc b/proxy/RegressionSM.cc index ecfe0402f2d..e4cf2cd9b22 100644 --- a/proxy/RegressionSM.cc +++ b/proxy/RegressionSM.cc @@ -75,7 +75,7 @@ RegressionSM::xrun(RegressionSM *aparent) } void -RegressionSM::run_in(int *apstatus, ink_hrtime t) +RegressionSM::run_in(int *apstatus, ts_hrtick t) { pstatus = apstatus; SET_HANDLER(&RegressionSM::regression_sm_start); diff --git a/proxy/RegressionSM.h b/proxy/RegressionSM.h index 75513fa3bbf..2da9c8422bb 100644 --- a/proxy/RegressionSM.h +++ b/proxy/RegressionSM.h @@ -48,7 +48,7 @@ struct RegressionSM : public Continuation { // public API void done(int status = REGRESSION_TEST_NOT_RUN); void run(int *pstatus); - void run_in(int *pstatus, ink_hrtime t); + void run_in(int *pstatus, ts_hrtick t); // internal int status; diff --git a/proxy/TestClock.cc b/proxy/TestClock.cc index fc3de2e5694..82cc961bb10 100644 --- a/proxy/TestClock.cc +++ b/proxy/TestClock.cc @@ -27,13 +27,13 @@ void test() { - ink_hrtime t = ink_get_hrtime(); + ts_hrtick t = ink_get_hrtime(); int i = 1000000; timespec ts; while (i--) { clock_gettime(CLOCK_REALTIME, &ts); } - ink_hrtime t2 = ink_get_hrtime(); + ts_hrtick t2 = ink_get_hrtime(); printf("time for clock_gettime %" PRId64 " nsecs\n", (t2 - t) / 1000); t = ink_get_hrtime(); diff --git a/proxy/TestClusterHash.cc b/proxy/TestClusterHash.cc index 12cb9acc589..99bfd82d693 100644 --- a/proxy/TestClusterHash.cc +++ b/proxy/TestClusterHash.cc @@ -40,7 +40,7 @@ test() int i; Machine *m; int j; - ink_hrtime t, t2; + ts_hrtick t, t2; int total; int high, low, share; int version = 7; diff --git a/proxy/TestDNS.cc b/proxy/TestDNS.cc index 7dd9e398524..b0a626368b4 100644 --- a/proxy/TestDNS.cc +++ b/proxy/TestDNS.cc @@ -170,13 +170,13 @@ TestDnsStateMachine::processEvent(int event, void *data) } int state_machines_created, state_machines_finished, measurement_interval; -ink_hrtime start_time, last_measurement_time; +ts_hrtick start_time, last_measurement_time; // Following function is called to measure the throughput void complete() { float throughput, cumul_throughput; - ink_hrtime now; + ts_hrtick now; state_machines_finished++; if (!(state_machines_finished % measurement_interval)) { now = Thread::get_hrtime(); @@ -221,7 +221,7 @@ void test() { char host[100]; - ink_hrtime now; + ts_hrtick now; int i; TestDnsStateMachine *test_dns_state_machine; printf("removing file '%s'\n", out_file_name); diff --git a/proxy/TimeTrace.h b/proxy/TimeTrace.h index 40bc06480f4..a9e740a689f 100644 --- a/proxy/TimeTrace.h +++ b/proxy/TimeTrace.h @@ -68,7 +68,7 @@ extern int cluster_send_events; #ifdef ENABLE_TIME_TRACE #define LOG_EVENT_TIME(_start_time, _time_dist, _time_cnt) \ do { \ - ink_hrtime now = ink_get_hrtime(); \ + ts_hrtick now = ink_get_hrtime(); \ unsigned int bucket = (now - _start_time) / HRTIME_MSECONDS(10); \ if (bucket > TIME_DIST_BUCKETS) \ bucket = TIME_DIST_BUCKETS; \ diff --git a/proxy/congest/Congestion.cc b/proxy/congest/Congestion.cc index c25aa458f37..4a49dadb435 100644 --- a/proxy/congest/Congestion.cc +++ b/proxy/congest/Congestion.cc @@ -638,10 +638,10 @@ CongestionEntry::sprint(char *buf, int buflen, int format) char str_time[100] = " "; char addrbuf[INET6_ADDRSTRLEN]; int len = 0; - ink_hrtime timestamp = 0; + ts_hrtick timestamp = 0; char state; if (pRecord->max_connection >= 0 && m_num_connections >= pRecord->max_connection) { - timestamp = ink_hrtime_to_sec(Thread::get_hrtime()); + timestamp = ts_hrtick_to_sec(Thread::get_hrtime()); state = 'M'; } else { timestamp = m_last_congested; @@ -691,12 +691,12 @@ CongestionEntry::sprint(char *buf, int buflen, int format) // the lock, discard the event //------------------------------------------------------------- void -CongestionEntry::failed_at(ink_hrtime t) +CongestionEntry::failed_at(ts_hrtick t) { if (pRecord->max_connection_failures == -1) { return; } - // long time = ink_hrtime_to_sec(t); + // long time = ts_hrtick_to_sec(t); long time = t; Debug("congestion_control", "failed_at: %ld", time); MUTEX_TRY_LOCK(lock, m_hist_lock, this_ethread()); diff --git a/proxy/congest/Congestion.h b/proxy/congest/Congestion.h index 3507d105d3d..50ec6be92b9 100644 --- a/proxy/congest/Congestion.h +++ b/proxy/congest/Congestion.h @@ -200,12 +200,12 @@ struct CongestionEntry : public RequestData { // State -- connection failures FailHistory m_history; Ptr m_hist_lock; - ink_hrtime m_last_congested; + ts_hrtick m_last_congested; volatile int m_congested; // 0 | 1 int m_stat_congested_conn_failures; volatile int m_M_congested; - ink_hrtime m_last_M_congested; + ts_hrtick m_last_M_congested; // State -- concorrent connections int m_num_connections; @@ -250,17 +250,17 @@ struct CongestionEntry : public RequestData { /* congestion control functions */ // Is the server congested? bool F_congested(); - bool M_congested(ink_hrtime t); + bool M_congested(ts_hrtick t); bool congested(); // Update state info void go_alive(); - void failed_at(ink_hrtime t); + void failed_at(ts_hrtick t); void connection_opened(); void connection_closed(); // Connection controls - bool proxy_retry(ink_hrtime t); + bool proxy_retry(ts_hrtick t); int client_retry_after(); int connect_retries(); int connect_timeout(); @@ -279,14 +279,14 @@ struct CongestionEntry : public RequestData { bool compCongested(); // CongestionEntry and CongestionControl rules interaction helper functions - bool usefulInfo(ink_hrtime t); + bool usefulInfo(ts_hrtick t); bool validate(); void applyNewRule(CongestionControlRecord *rule); void init(CongestionControlRecord *rule); }; inline bool -CongestionEntry::usefulInfo(ink_hrtime t) +CongestionEntry::usefulInfo(ts_hrtick t) { return (m_ref_count > 1 || m_congested != 0 || m_num_connections > 0 || (m_history.last_event + pRecord->fail_window > t && m_history.events > 0)); @@ -297,7 +297,7 @@ CongestionEntry::client_retry_after() { int prat = 0; if (F_congested()) { - prat = pRecord->proxy_retry_interval + m_history.last_event - ink_hrtime_to_sec(Thread::get_hrtime()); + prat = pRecord->proxy_retry_interval + m_history.last_event - ts_hrtick_to_sec(Thread::get_hrtime()); if (prat < 0) prat = 0; } @@ -305,9 +305,9 @@ CongestionEntry::client_retry_after() } inline bool -CongestionEntry::proxy_retry(ink_hrtime t) +CongestionEntry::proxy_retry(ts_hrtick t) { - return ((ink_hrtime_to_sec(t) - m_history.last_event) >= pRecord->proxy_retry_interval); + return ((ts_hrtick_to_sec(t) - m_history.last_event) >= pRecord->proxy_retry_interval); } inline bool @@ -317,7 +317,7 @@ CongestionEntry::F_congested() } inline bool -CongestionEntry::M_congested(ink_hrtime t) +CongestionEntry::M_congested(ts_hrtick t) { if (pRecord->max_connection >= 0 && m_num_connections >= pRecord->max_connection) { if (ink_atomic_swap(&m_M_congested, 1) == 0) { diff --git a/proxy/congest/CongestionDB.cc b/proxy/congest/CongestionDB.cc index 32be9c936c7..c41f8006358 100644 --- a/proxy/congest/CongestionDB.cc +++ b/proxy/congest/CongestionDB.cc @@ -126,7 +126,7 @@ static long congestEntryGCTime = 0; void preCongestEntryGC(void) { - congestEntryGCTime = (long)ink_hrtime_to_sec(Thread::get_hrtime()); + congestEntryGCTime = (long)ts_hrtick_to_sec(Thread::get_hrtime()); } // if the entry contains useful info, return false -- keep it @@ -318,8 +318,8 @@ CongestionDBCont::GC(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */) { MUTEX_TRY_LOCK(lock, bucket_mutex, this_ethread()); if (lock.is_locked()) { - ink_hrtime now = Thread::get_hrtime(); - now = ink_hrtime_to_sec(now); + ts_hrtick now = Thread::get_hrtime(); + now = ts_hrtick_to_sec(now); theCongestionDB->RunTodoList(CDBC_pid); Iter it; CongestionEntry *pEntry = theCongestionDB->first_entry(CDBC_pid, &it); diff --git a/proxy/http/Http1ClientSession.h b/proxy/http/Http1ClientSession.h index 8e963eb9e78..bccbe097d34 100644 --- a/proxy/http/Http1ClientSession.h +++ b/proxy/http/Http1ClientSession.h @@ -143,13 +143,13 @@ class Http1ClientSession : public ProxyClientSession } void - set_active_timeout(ink_hrtime timeout_in) + set_active_timeout(ts_hrtick timeout_in) { if (client_vc) client_vc->set_active_timeout(timeout_in); } void - set_inactivity_timeout(ink_hrtime timeout_in) + set_inactivity_timeout(ts_hrtick timeout_in) { if (client_vc) client_vc->set_inactivity_timeout(timeout_in); diff --git a/proxy/http/Http1ClientTransaction.h b/proxy/http/Http1ClientTransaction.h index 8fa73190b3e..a043cdebd65 100644 --- a/proxy/http/Http1ClientTransaction.h +++ b/proxy/http/Http1ClientTransaction.h @@ -144,13 +144,13 @@ class Http1ClientTransaction : public ProxyClientTransaction // Pass on the timeouts to the netvc virtual void - set_active_timeout(ink_hrtime timeout_in) + set_active_timeout(ts_hrtick timeout_in) { if (parent) parent->set_active_timeout(timeout_in); } virtual void - set_inactivity_timeout(ink_hrtime timeout_in) + set_inactivity_timeout(ts_hrtick timeout_in) { if (parent) parent->set_inactivity_timeout(timeout_in); diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc index a25ba07a9af..b82142197fe 100644 --- a/proxy/http/HttpSM.cc +++ b/proxy/http/HttpSM.cc @@ -85,13 +85,13 @@ namespace { /// Update the milestone state given the milestones and timer. inline void -milestone_update_api_time(TransactionMilestones &milestones, ink_hrtime &api_timer) +milestone_update_api_time(TransactionMilestones &milestones, ts_hrtick &api_timer) { // Bit of funkiness - we set @a api_timer to be the negative value when we're tracking // non-active API time. In that case we need to make a note of it and flip the value back // to positive. if (api_timer) { - ink_hrtime delta; + ts_hrtick delta; bool active = api_timer >= 0; if (!active) { api_timer = -api_timer; @@ -4734,7 +4734,7 @@ HttpSM::do_http_server_open(bool raw) CONGEST_INCREMENT_DYN_STAT(congested_on_F_stat); handleEvent(CONGESTION_EVENT_CONGESTED_ON_F, NULL); return; - } else if (t_state.pCongestionEntry->M_congested(ink_hrtime_to_sec(milestones[TS_MILESTONE_SERVER_CONNECT]))) { + } else if (t_state.pCongestionEntry->M_congested(ts_hrtick_to_sec(milestones[TS_MILESTONE_SERVER_CONNECT]))) { t_state.pCongestionEntry->stat_inc_M(); t_state.congestion_congested_or_failed = 1; CONGEST_INCREMENT_DYN_STAT(congested_on_M_stat); @@ -5202,11 +5202,11 @@ HttpSM::mark_server_down_on_client_abort() if (milestones[TS_MILESTONE_SERVER_FIRST_CONNECT] != 0 && milestones[TS_MILESTONE_SERVER_FIRST_READ] == 0) { // Check to see if client waited for the threshold // to declare the origin server as down - ink_hrtime wait = Thread::get_hrtime() - milestones[TS_MILESTONE_SERVER_FIRST_CONNECT]; + ts_hrtick wait = Thread::get_hrtime() - milestones[TS_MILESTONE_SERVER_FIRST_CONNECT]; if (wait < 0) { wait = 0; } - if (ink_hrtime_to_sec(wait) > t_state.txn_conf->client_abort_threshold) { + if (ts_hrtick_to_sec(wait) > t_state.txn_conf->client_abort_threshold) { t_state.current.server->set_connect_fail(ETIMEDOUT); do_hostdb_update_if_necessary(); } @@ -6895,7 +6895,7 @@ HttpSM::update_stats() } } - ink_hrtime total_time = milestones.elapsed(TS_MILESTONE_SM_START, TS_MILESTONE_SM_FINISH); + ts_hrtick total_time = milestones.elapsed(TS_MILESTONE_SM_START, TS_MILESTONE_SM_FINISH); // ua_close will not be assigned properly in some exceptional situation. // TODO: Assign ua_close with suitable value when HttpTunnel terminates abnormally. @@ -6904,18 +6904,18 @@ HttpSM::update_stats() } // request_process_time = The time after the header is parsed to the completion of the transaction - ink_hrtime request_process_time = milestones[TS_MILESTONE_UA_CLOSE] - milestones[TS_MILESTONE_UA_READ_HEADER_DONE]; + ts_hrtick request_process_time = milestones[TS_MILESTONE_UA_CLOSE] - milestones[TS_MILESTONE_UA_READ_HEADER_DONE]; HttpTransact::client_result_stat(&t_state, total_time, request_process_time); - ink_hrtime ua_write_time; + ts_hrtick ua_write_time; if (milestones[TS_MILESTONE_UA_BEGIN_WRITE] != 0 && milestones[TS_MILESTONE_UA_CLOSE] != 0) { ua_write_time = milestones.elapsed(TS_MILESTONE_UA_BEGIN_WRITE, TS_MILESTONE_UA_CLOSE); } else { ua_write_time = -1; } - ink_hrtime os_read_time; + ts_hrtick os_read_time; if (milestones[TS_MILESTONE_SERVER_READ_HEADER_DONE] != 0 && milestones[TS_MILESTONE_SERVER_CLOSE] != 0) { os_read_time = milestones.elapsed(TS_MILESTONE_SERVER_READ_HEADER_DONE, TS_MILESTONE_SERVER_CLOSE); } else { @@ -6933,7 +6933,7 @@ HttpSM::update_stats() */ // print slow requests if the threshold is set (> 0) and if we are over the time threshold - if (t_state.txn_conf->slow_log_threshold != 0 && ink_hrtime_from_msec(t_state.txn_conf->slow_log_threshold) < total_time) { + if (t_state.txn_conf->slow_log_threshold != 0 && ts_hrtick_from_msec(t_state.txn_conf->slow_log_threshold) < total_time) { URL *url = t_state.hdr_info.client_request.url_get(); char url_string[256] = ""; int offset = 0; diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h index c590779a48c..405d4308209 100644 --- a/proxy/http/HttpSM.h +++ b/proxy/http/HttpSM.h @@ -502,7 +502,7 @@ class HttpSM : public Continuation bool server_connection_is_ssl; TransactionMilestones milestones; - ink_hrtime api_timer; + ts_hrtick api_timer; // The next two enable plugins to tag the state machine for // the purposes of logging so the instances can be correlated // with the source plugin. diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc index 23b6824f1f1..761d026f120 100644 --- a/proxy/http/HttpTransact.cc +++ b/proxy/http/HttpTransact.cc @@ -8490,7 +8490,7 @@ HttpTransact::histogram_request_document_size(State *s, int64_t doc_size) } void -HttpTransact::user_agent_connection_speed(State *s, ink_hrtime transfer_time, int64_t nbytes) +HttpTransact::user_agent_connection_speed(State *s, ts_hrtick transfer_time, int64_t nbytes) { float bytes_per_hrtime = (transfer_time == 0) ? (nbytes) : ((float)nbytes / (float)(int64_t)transfer_time); int bytes_per_sec = (int)(bytes_per_hrtime * HRTIME_SECOND); @@ -8518,7 +8518,7 @@ HttpTransact::user_agent_connection_speed(State *s, ink_hrtime transfer_time, in * added request_process_time stat for loadshedding foo */ void -HttpTransact::client_result_stat(State *s, ink_hrtime total_time, ink_hrtime request_process_time) +HttpTransact::client_result_stat(State *s, ts_hrtick total_time, ts_hrtick request_process_time) { ClientTransactionResult_t client_transaction_result = CLIENT_TRANSACTION_RESULT_UNDEFINED; @@ -8766,8 +8766,8 @@ HttpTransact::client_result_stat(State *s, ink_hrtime total_time, ink_hrtime req HTTP_INCREMENT_DYN_STAT(http_completed_requests_stat); // Set the stat now that we know what happend - ink_hrtime total_msec = ink_hrtime_to_msec(total_time); - ink_hrtime process_msec = ink_hrtime_to_msec(request_process_time); + ts_hrtick total_msec = ts_hrtick_to_msec(total_time); + ts_hrtick process_msec = ts_hrtick_to_msec(request_process_time); switch (client_transaction_result) { case CLIENT_TRANSACTION_RESULT_HIT_FRESH: HTTP_SUM_DYN_STAT(http_ua_msecs_counts_hit_fresh_stat, total_msec); @@ -8809,7 +8809,7 @@ HttpTransact::client_result_stat(State *s, ink_hrtime total_time, ink_hrtime req } void -HttpTransact::origin_server_connection_speed(State *s, ink_hrtime transfer_time, int64_t nbytes) +HttpTransact::origin_server_connection_speed(State *s, ts_hrtick transfer_time, int64_t nbytes) { float bytes_per_hrtime = (transfer_time == 0) ? (nbytes) : ((float)nbytes / (float)(int64_t)transfer_time); int bytes_per_sec = (int)(bytes_per_hrtime * HRTIME_SECOND); @@ -8834,8 +8834,8 @@ HttpTransact::origin_server_connection_speed(State *s, ink_hrtime transfer_time, } void -HttpTransact::update_size_and_time_stats(State *s, ink_hrtime total_time, ink_hrtime user_agent_write_time, - ink_hrtime origin_server_read_time, int user_agent_request_header_size, +HttpTransact::update_size_and_time_stats(State *s, ts_hrtick total_time, ts_hrtick user_agent_write_time, + ts_hrtick origin_server_read_time, int user_agent_request_header_size, int64_t user_agent_request_body_size, int user_agent_response_header_size, int64_t user_agent_response_body_size, int origin_server_request_header_size, int64_t origin_server_request_body_size, int origin_server_response_header_size, diff --git a/proxy/http/HttpTransact.h b/proxy/http/HttpTransact.h index 7dca0458408..64ab4893f96 100644 --- a/proxy/http/HttpTransact.h +++ b/proxy/http/HttpTransact.h @@ -1321,8 +1321,8 @@ class HttpTransact static const char *get_error_string(int erno); // the stat functions - static void update_size_and_time_stats(State *s, ink_hrtime total_time, ink_hrtime user_agent_write_time, - ink_hrtime origin_server_read_time, int user_agent_request_header_size, + static void update_size_and_time_stats(State *s, ts_hrtick total_time, ts_hrtick user_agent_write_time, + ts_hrtick origin_server_read_time, int user_agent_request_header_size, int64_t user_agent_request_body_size, int user_agent_response_header_size, int64_t user_agent_response_body_size, int origin_server_request_header_size, int64_t origin_server_request_body_size, int origin_server_response_header_size, @@ -1330,9 +1330,9 @@ class HttpTransact int64_t pushed_response_body_size, const TransactionMilestones &milestones); static void histogram_request_document_size(State *s, int64_t size); static void histogram_response_document_size(State *s, int64_t size); - static void user_agent_connection_speed(State *s, ink_hrtime transfer_time, int64_t nbytes); - static void origin_server_connection_speed(State *s, ink_hrtime transfer_time, int64_t nbytes); - static void client_result_stat(State *s, ink_hrtime total_time, ink_hrtime request_process_time); + static void user_agent_connection_speed(State *s, ts_hrtick transfer_time, int64_t nbytes); + static void origin_server_connection_speed(State *s, ts_hrtick transfer_time, int64_t nbytes); + static void client_result_stat(State *s, ts_hrtick total_time, ts_hrtick request_process_time); static void delete_warning_value(HTTPHdr *to_warn, HTTPWarningCode warning_code); static bool is_connection_collapse_checks_success(State *s); // YTS Team, yamsat }; diff --git a/proxy/http2/Http2Stream.cc b/proxy/http2/Http2Stream.cc index 321dda0007a..e0ddf571a6b 100644 --- a/proxy/http2/Http2Stream.cc +++ b/proxy/http2/Http2Stream.cc @@ -607,7 +607,7 @@ Http2Stream::destroy() this->do_io_write(NULL, 0, NULL); HTTP2_DECREMENT_THREAD_DYN_STAT(HTTP2_STAT_CURRENT_CLIENT_STREAM_COUNT, _thread); - ink_hrtime end_time = Thread::get_hrtime(); + ts_hrtick end_time = Thread::get_hrtime(); HTTP2_SUM_THREAD_DYN_STAT(HTTP2_STAT_TOTAL_TRANSACTIONS_TIME, _thread, end_time - _start_time); _req_header.destroy(); response_header.destroy(); @@ -697,7 +697,7 @@ Http2Stream::response_get_data_reader() const } void -Http2Stream::set_active_timeout(ink_hrtime timeout_in) +Http2Stream::set_active_timeout(ts_hrtick timeout_in) { active_timeout = timeout_in; clear_active_timer(); @@ -707,7 +707,7 @@ Http2Stream::set_active_timeout(ink_hrtime timeout_in) } void -Http2Stream::set_inactivity_timeout(ink_hrtime timeout_in) +Http2Stream::set_inactivity_timeout(ts_hrtick timeout_in) { inactive_timeout = timeout_in; if (inactive_timeout > 0) { diff --git a/proxy/http2/Http2Stream.h b/proxy/http2/Http2Stream.h index 5d18b7eb893..0e392fdae0c 100644 --- a/proxy/http2/Http2Stream.h +++ b/proxy/http2/Http2Stream.h @@ -223,8 +223,8 @@ class Http2Stream : public ProxyClientTransaction return false; } - virtual void set_active_timeout(ink_hrtime timeout_in); - virtual void set_inactivity_timeout(ink_hrtime timeout_in); + virtual void set_active_timeout(ts_hrtick timeout_in); + virtual void set_inactivity_timeout(ts_hrtick timeout_in); virtual void cancel_inactivity_timeout(); void clear_inactive_timer(); void clear_active_timer(); @@ -237,7 +237,7 @@ class Http2Stream : public ProxyClientTransaction bool response_is_data_available() const; Event *send_tracked_event(Event *event, int send_event, VIO *vio); HTTPParser http_parser; - ink_hrtime _start_time; + ts_hrtick _start_time; EThread *_thread; Http2StreamId _id; Http2StreamState _state; @@ -260,11 +260,11 @@ class Http2Stream : public ProxyClientTransaction Event *cross_thread_event; // Support stream-specific timeouts - ink_hrtime active_timeout; + ts_hrtick active_timeout; Event *active_event; - ink_hrtime inactive_timeout; - ink_hrtime inactive_timeout_at; + ts_hrtick inactive_timeout; + ts_hrtick inactive_timeout_at; Event *inactive_event; Event *read_event; diff --git a/proxy/logging/Log.cc b/proxy/logging/Log.cc index 8f0ace10d7b..a943cf9eadc 100644 --- a/proxy/logging/Log.cc +++ b/proxy/logging/Log.cc @@ -1239,7 +1239,7 @@ Log::flush_thread_main(void * /* args ATS_UNUSED */) { LogBuffer *logbuffer; LogFlushData *fdata; - ink_hrtime now, last_time = 0; + ts_hrtick now, last_time = 0; int len, total_bytes; SLL link, invert_link; ProxyMutex *mutex = this_thread()->mutex.get(); diff --git a/proxy/logging/LogAccessHttp.cc b/proxy/logging/LogAccessHttp.cc index 977baaf6657..28bb0f3177b 100644 --- a/proxy/logging/LogAccessHttp.cc +++ b/proxy/logging/LogAccessHttp.cc @@ -1207,8 +1207,8 @@ int LogAccessHttp::marshal_server_resp_time_ms(char *buf) { if (buf) { - ink_hrtime elapsed = m_http_sm->milestones[TS_MILESTONE_SERVER_CLOSE] - m_http_sm->milestones[TS_MILESTONE_SERVER_CONNECT]; - int64_t val = (int64_t)ink_hrtime_to_msec(elapsed); + ts_hrtick elapsed = m_http_sm->milestones[TS_MILESTONE_SERVER_CLOSE] - m_http_sm->milestones[TS_MILESTONE_SERVER_CONNECT]; + int64_t val = (int64_t)ts_hrtick_to_msec(elapsed); marshal_int(buf, val); } return INK_MIN_ALIGN; @@ -1218,8 +1218,8 @@ int LogAccessHttp::marshal_server_resp_time_s(char *buf) { if (buf) { - ink_hrtime elapsed = m_http_sm->milestones[TS_MILESTONE_SERVER_CLOSE] - m_http_sm->milestones[TS_MILESTONE_SERVER_CONNECT]; - int64_t val = (int64_t)ink_hrtime_to_sec(elapsed); + ts_hrtick elapsed = m_http_sm->milestones[TS_MILESTONE_SERVER_CLOSE] - m_http_sm->milestones[TS_MILESTONE_SERVER_CONNECT]; + int64_t val = (int64_t)ts_hrtick_to_sec(elapsed); marshal_int(buf, val); } return INK_MIN_ALIGN; @@ -1403,8 +1403,8 @@ int LogAccessHttp::marshal_transfer_time_ms(char *buf) { if (buf) { - ink_hrtime elapsed = m_http_sm->milestones[TS_MILESTONE_SM_FINISH] - m_http_sm->milestones[TS_MILESTONE_SM_START]; - int64_t val = (int64_t)ink_hrtime_to_msec(elapsed); + ts_hrtick elapsed = m_http_sm->milestones[TS_MILESTONE_SM_FINISH] - m_http_sm->milestones[TS_MILESTONE_SM_START]; + int64_t val = (int64_t)ts_hrtick_to_msec(elapsed); marshal_int(buf, val); } return INK_MIN_ALIGN; @@ -1414,8 +1414,8 @@ int LogAccessHttp::marshal_transfer_time_s(char *buf) { if (buf) { - ink_hrtime elapsed = m_http_sm->milestones[TS_MILESTONE_SM_FINISH] - m_http_sm->milestones[TS_MILESTONE_SM_START]; - int64_t val = (int64_t)ink_hrtime_to_sec(elapsed); + ts_hrtick elapsed = m_http_sm->milestones[TS_MILESTONE_SM_FINISH] - m_http_sm->milestones[TS_MILESTONE_SM_START]; + int64_t val = (int64_t)ts_hrtick_to_sec(elapsed); marshal_int(buf, val); } return INK_MIN_ALIGN; @@ -1665,7 +1665,7 @@ int LogAccessHttp::marshal_milestone(TSMilestonesType ms, char *buf) { if (buf) { - int64_t val = ink_hrtime_to_msec(m_http_sm->milestones[ms]); + int64_t val = ts_hrtick_to_msec(m_http_sm->milestones[ms]); marshal_int(buf, val); } return INK_MIN_ALIGN; @@ -1675,8 +1675,8 @@ int LogAccessHttp::marshal_milestone_diff(TSMilestonesType ms1, TSMilestonesType ms2, char *buf) { if (buf) { - ink_hrtime elapsed = m_http_sm->milestones.elapsed(ms2, ms1); - int64_t val = (int64_t)ink_hrtime_to_msec(elapsed); + ts_hrtick elapsed = m_http_sm->milestones.elapsed(ms2, ms1); + int64_t val = (int64_t)ts_hrtick_to_msec(elapsed); marshal_int(buf, val); } return INK_MIN_ALIGN; diff --git a/proxy/logging/LogAccessICP.cc b/proxy/logging/LogAccessICP.cc index 80f11ca6eec..91baa402549 100644 --- a/proxy/logging/LogAccessICP.cc +++ b/proxy/logging/LogAccessICP.cc @@ -272,7 +272,7 @@ int LogAccessICP::marshal_transfer_time_ms(char *buf) { if (buf) { - ink_hrtime elapsed = m_icp_log->GetElapsedTime(); + ts_hrtick elapsed = m_icp_log->GetElapsedTime(); elapsed /= HRTIME_MSECOND; int64_t val = (int64_t)elapsed; marshal_int(buf, val); @@ -284,7 +284,7 @@ int LogAccessICP::marshal_transfer_time_s(char *buf) { if (buf) { - ink_hrtime elapsed = m_icp_log->GetElapsedTime(); + ts_hrtick elapsed = m_icp_log->GetElapsedTime(); elapsed /= HRTIME_SECOND; int64_t val = (int64_t)elapsed; marshal_int(buf, val); diff --git a/tools/jtest/jtest.cc b/tools/jtest/jtest.cc index ef9bdd08cc5..ab49945ef6c 100644 --- a/tools/jtest/jtest.cc +++ b/tools/jtest/jtest.cc @@ -216,7 +216,7 @@ static uint64_t total_server_response_body_bytes = 0; static uint64_t total_server_response_header_bytes = 0; static uint64_t total_proxy_response_body_bytes = 0; static uint64_t total_proxy_response_header_bytes = 0; -static ink_hrtime now = 0, start_time = 0; +static ts_hrtick now = 0, start_time = 0; static int extra_headers = 0; static int alternates = 0; static int abort_retry_speed = 0; @@ -305,9 +305,9 @@ struct FD { int fd; poll_cb read_cb; poll_cb write_cb; - ink_hrtime start; - ink_hrtime active; - ink_hrtime ready; + ts_hrtick start; + ts_hrtick active; + ts_hrtick ready; double doc; int doc_length; @@ -605,11 +605,11 @@ fast(int sock, int speed, int d) } // Return the number of milliseconds elapsed since the start of the request. -static ink_hrtime +static ts_hrtick elapsed_from_start(int sock) { - ink_hrtime now = ink_get_hrtime_internal(); - return ink_hrtime_diff_msec(now, fd[sock].start); + ts_hrtick now = ink_get_hrtime_internal(); + return ts_hrtick_diff_msec(now, fd[sock].start); } static int From 8592ab7857d97f4b5e0b17ccfd38d6e7d4d9bbc5 Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Mon, 5 Sep 2016 21:53:05 -0500 Subject: [PATCH 2/3] TS-4532; Trying out using ink_zero overload to set durations to zero. --- iocore/net/I_NetVConnection.h | 4 ++-- iocore/net/P_UnixNetVConnection.h | 16 ++++++++-------- iocore/net/UnixNet.cc | 6 +++--- iocore/net/UnixNetVConnection.cc | 4 ++-- lib/ts/ink_hrtime.h | 15 +++++++++++++++ 5 files changed, 30 insertions(+), 15 deletions(-) diff --git a/iocore/net/I_NetVConnection.h b/iocore/net/I_NetVConnection.h index b75ad86298c..4e45f08d618 100644 --- a/iocore/net/I_NetVConnection.h +++ b/iocore/net/I_NetVConnection.h @@ -399,7 +399,7 @@ class NetVConnection : public VConnection interfaces. */ - virtual void set_active_timeout(ts_nanoseconds timeout_in) = 0; + virtual void set_active_timeout(ts_seconds timeout_in) = 0; /** Sets time after which SM should be notified if the requested @@ -413,7 +413,7 @@ class NetVConnection : public VConnection is currently active. See section on timeout semantics above. */ - virtual void set_inactivity_timeout(ts_nanoseconds timeout_in) = 0; + virtual void set_inactivity_timeout(ts_seconds timeout_in) = 0; /** Clears the active timeout. No active timeouts will be sent until diff --git a/iocore/net/P_UnixNetVConnection.h b/iocore/net/P_UnixNetVConnection.h index d82aee598eb..7bc64131d26 100644 --- a/iocore/net/P_UnixNetVConnection.h +++ b/iocore/net/P_UnixNetVConnection.h @@ -145,8 +145,8 @@ class UnixNetVConnection : public NetVConnection // called when handing an event from this NetVConnection,// // or the NetVConnection creation callback. // //////////////////////////////////////////////////////////// - virtual void set_active_timeout(ts_nanoseconds timeout_in); - virtual void set_inactivity_timeout(ts_nanoseconds timeout_in); + virtual void set_active_timeout(ts_seconds timeout_in); + virtual void set_inactivity_timeout(ts_seconds timeout_in); virtual void cancel_active_timeout(); virtual void cancel_inactivity_timeout(); virtual void set_action(Continuation *c); @@ -240,8 +240,8 @@ class UnixNetVConnection : public NetVConnection LINK(UnixNetVConnection, keep_alive_queue_link); LINK(UnixNetVConnection, active_queue_link); - ts_nanoseconds inactivity_timeout_in; - ts_nanoseconds active_timeout_in; + ts_seconds inactivity_timeout_in; + ts_seconds active_timeout_in; #ifdef INACTIVITY_TIMEOUT Event *inactivity_timeout; Event *activity_timeout; @@ -349,7 +349,7 @@ UnixNetVConnection::get_inactivity_timeout() } TS_INLINE void -UnixNetVConnection::set_inactivity_timeout(ts_hrtick timeout_in) +UnixNetVConnection::set_inactivity_timeout(ts_seconds timeout_in) { Debug("socket", "Set inactive timeout=%" PRId64 ", for NetVC=%p", timeout_in.count(), this); inactivity_timeout_in = timeout_in; @@ -384,7 +384,7 @@ UnixNetVConnection::set_inactivity_timeout(ts_hrtick timeout_in) } TS_INLINE void -UnixNetVConnection::set_active_timeout(ts_nanoseconds timeout_in) +UnixNetVConnection::set_active_timeout(ts_seconds timeout_in) { Debug("socket", "Set active timeout=%" PRId64 ", NetVC=%p", timeout_in.count(), this); active_timeout_in = timeout_in; @@ -417,7 +417,7 @@ TS_INLINE void UnixNetVConnection::cancel_inactivity_timeout() { Debug("socket", "Cancel inactive timeout for NetVC=%p", this); - inactivity_timeout_in = ts_nanoseconds::zero(); + inactivity_timeout_in = inactivity_timeout_in.zero(); #ifdef INACTIVITY_TIMEOUT if (inactivity_timeout) { Debug("socket", "Cancel inactive timeout for NetVC=%p", this); @@ -433,7 +433,7 @@ TS_INLINE void UnixNetVConnection::cancel_active_timeout() { Debug("socket", "Cancel active timeout for NetVC=%p", this); - active_timeout_in = ts_nanoseconds::zero(); + active_timeout_in = active_timeout_in.zero(); #ifdef INACTIVITY_TIMEOUT if (active_timeout) { Debug("socket", "Cancel active timeout for NetVC=%p", this); diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc index 6c14e455dcb..9fbce027f3d 100644 --- a/iocore/net/UnixNet.cc +++ b/iocore/net/UnixNet.cc @@ -119,13 +119,13 @@ class InactivityCop : public Continuation } void - set_default_timeout(ts_nanoseconds x) + set_default_timeout(ts_seconds x) { - default_inactivity_timeout = x; + default_inactivity_timeout = ts_seconds(x); } private: - ts_nanoseconds default_inactivity_timeout; // only used when one is not set for some bad reason + ts_seconds default_inactivity_timeout; // only used when one is not set for some bad reason }; int diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc index 6b4a702970b..6dc86edf23e 100644 --- a/iocore/net/UnixNetVConnection.cc +++ b/iocore/net/UnixNetVConnection.cc @@ -120,9 +120,9 @@ close_UnixNetVConnection(UnixNetVConnection *vc, EThread *t) vc->next_inactivity_timeout_at = TS_HRTICK_ZERO; vc->next_activity_timeout_at = TS_HRTICK_ZERO; #endif - vc->inactivity_timeout_in = ts_nanoseconds::zero(); + ink_zero(vc->inactivity_timeout_in); - vc->active_timeout_in = ts_nanoseconds::zero(); + ink_zero(vc->active_timeout_in); if (nh) { nh->open_list.remove(vc); nh->cop_list.remove(vc); diff --git a/lib/ts/ink_hrtime.h b/lib/ts/ink_hrtime.h index 3d2cefa7b22..fc17e73933b 100644 --- a/lib/ts/ink_hrtime.h +++ b/lib/ts/ink_hrtime.h @@ -60,6 +60,21 @@ typedef std::chrono::seconds ts_seconds; ///< Duration in seconds. typedef std::chrono::minutes ts_minutes; ///< Duration in minutes. typedef std::chrono::hours ts_hours; ///< Duration in hours. +/** Set a duration to zero. + + @note This is deliberately modeled on the pre-existing @c ink_zero and has the same semantic. + The utility is more obvious for arguments that are long pointer paths to the actual duration. + @code + ts_milliseconds timeout; + ink_zero(timeout); + @endcode +*/ +template < typename R, typename D> +inline void ink_zero(std::chrono::duration& d) +{ + d = d.zero(); +} + /// Value to use for the equivalent of '0'. /// @internal Default constructor iniitializes to zero. static const ts_hrtick TS_HRTICK_ZERO; From 3a7d43ee98343794e191baef169239e380ef71cd Mon Sep 17 00:00:00 2001 From: "Alan M. Carroll" Date: Tue, 6 Sep 2016 19:30:41 -0500 Subject: [PATCH 3/3] TS-4532: More tweaks. --- iocore/aio/P_AIO.h | 2 +- iocore/net/SSLConfig.cc | 6 +++--- iocore/net/SSLNetProcessor.cc | 2 +- iocore/net/Socks.cc | 8 ++++---- proxy/IPAllow.h | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/iocore/aio/P_AIO.h b/iocore/aio/P_AIO.h index 27b69021c67..53bc3d82d26 100644 --- a/iocore/aio/P_AIO.h +++ b/iocore/aio/P_AIO.h @@ -98,7 +98,7 @@ struct AIO_Reqs; struct AIOCallbackInternal : public AIOCallback { AIOCallback *first; AIO_Reqs *aio_req; - ts_hrtick sleep_time; + ts_nanoseconds sleep_time; int io_complete(int event, void *data); AIOCallbackInternal() { diff --git a/iocore/net/SSLConfig.cc b/iocore/net/SSLConfig.cc index 37d8bffb7b2..c9e12c7587d 100644 --- a/iocore/net/SSLConfig.cc +++ b/iocore/net/SSLConfig.cc @@ -376,9 +376,9 @@ SSLCertificateConfig::reconfigure() // Test SSL certificate loading startup. With large numbers of certificates, reloading can take time, so delay // twice the healthcheck period to simulate a loading a large certificate set. if (is_action_tag_set("test.multicert.delay")) { - const int secs = 60; - Debug("ssl", "delaying certificate reload by %dsecs", secs); - ts_hrtick_sleep(HRTIME_SECONDS(secs)); + static const int delay = 60; + Debug("ssl", "delaying certificate reload by %d secs", delay); + sleep(delay); } SSLParseCertificateConfiguration(params, lookup); diff --git a/iocore/net/SSLNetProcessor.cc b/iocore/net/SSLNetProcessor.cc index 9d2ef470b75..242d9d3e085 100644 --- a/iocore/net/SSLNetProcessor.cc +++ b/iocore/net/SSLNetProcessor.cc @@ -88,7 +88,7 @@ SSLNetProcessor::start(int number_of_ssl_threads, size_t stacksize) #ifdef HAVE_OPENSSL_OCSP_STAPLING if (SSLConfigParams::ssl_ocsp_enabled) { EventType ET_OCSP = eventProcessor.spawn_event_threads(1, "ET_OCSP", stacksize); - eventProcessor.schedule_every(new OCSPContinuation(), HRTIME_SECONDS(SSLConfigParams::ssl_ocsp_update_period), ET_OCSP); + eventProcessor.schedule_every(new OCSPContinuation(), ts_seconds(SSLConfigParams::ssl_ocsp_update_period), ET_OCSP); } #endif /* HAVE_OPENSSL_OCSP_STAPLING */ diff --git a/iocore/net/Socks.cc b/iocore/net/Socks.cc index a62a6e1cf79..b2a3fbec90c 100644 --- a/iocore/net/Socks.cc +++ b/iocore/net/Socks.cc @@ -76,7 +76,7 @@ SocksEntry::init(Ptr &m, SocksNetVC *vc, unsigned char socks_support nattempts = 0; findServer(); - timeout = this_ethread()->schedule_in(this, HRTIME_SECONDS(netProcessor.socks_conf_stuff->server_connect_timeout)); + timeout = this_ethread()->schedule_in(this, ts_seconds(netProcessor.socks_conf_stuff->server_connect_timeout)); write_done = false; } @@ -215,7 +215,7 @@ SocksEntry::startEvent(int event, void *data) netVConnection = 0; } - timeout = this_ethread()->schedule_in(this, HRTIME_SECONDS(netProcessor.socks_conf_stuff->server_connect_timeout)); + timeout = this_ethread()->schedule_in(this, ts_seconds(netProcessor.socks_conf_stuff->server_connect_timeout)); write_done = false; @@ -285,7 +285,7 @@ SocksEntry::mainEvent(int event, void *data) if (!timeout) { /* timeout would be already set when we come here from StartEvent() */ - timeout = this_ethread()->schedule_in(this, HRTIME_SECONDS(netProcessor.socks_conf_stuff->socks_timeout)); + timeout = this_ethread()->schedule_in(this, ts_seconds(netProcessor.socks_conf_stuff->socks_timeout)); } netVConnection->do_io_write(this, n_bytes, reader, 0); @@ -319,7 +319,7 @@ SocksEntry::mainEvent(int event, void *data) break; } - timeout = this_ethread()->schedule_in(this, HRTIME_SECONDS(netProcessor.socks_conf_stuff->socks_timeout)); + timeout = this_ethread()->schedule_in(this, ts_seconds(netProcessor.socks_conf_stuff->socks_timeout)); netVConnection->do_io_read(this, n_bytes, buf); diff --git a/proxy/IPAllow.h b/proxy/IPAllow.h index 5fee699b2e2..b3e17b8ed61 100644 --- a/proxy/IPAllow.h +++ b/proxy/IPAllow.h @@ -48,7 +48,7 @@ struct IpAllowUpdate; // a reconfig event happens that the old table gets thrown // away // -static uint64_t const IP_ALLOW_TIMEOUT = HRTIME_HOUR; +static ts_seconds const IP_ALLOW_TIMEOUT = ts_hours(1); /** An access control record. It has the methods permitted and the source line.