From 1097b545b809181b14ceb567167777caa9f0c3a9 Mon Sep 17 00:00:00 2001 From: William Woodall Date: Fri, 4 Dec 2015 12:21:03 -0800 Subject: [PATCH] implemented/updated some time utilities --- rcl/include/rcl/time.h | 42 +++++++++++- rcl/src/rcl/time.c | 147 ++++++++++++++++++++++++++++++++++++++++- rcl/src/rcl/wait.c | 2 +- 3 files changed, 186 insertions(+), 5 deletions(-) diff --git a/rcl/include/rcl/time.h b/rcl/include/rcl/time.h index e25c923..36d0d71 100644 --- a/rcl/include/rcl/time.h +++ b/rcl/include/rcl/time.h @@ -20,6 +20,7 @@ extern "C" { #endif +#include "rcl/types.h" #include "rmw/rmw.h" #define RCL_S_TO_NS(seconds) seconds * 1000000000 @@ -32,11 +33,48 @@ extern "C" typedef rmw_time_t rcl_time_t; +/// Create a rcl_time_t struct with a given signed number of nanoseconds. rcl_time_t -rcl_time_t_from_int64_t_nanoseconds(int64_t nanoseconds); +rcl_time_from_int64_t_nanoseconds(int64_t nanoseconds); +/// Create a rcl_time_t struct with a given unsigned number of nanoseconds. rcl_time_t -rcl_time_t_from_uint64_t_nanoseconds(uint64_t nanoseconds); +rcl_time_from_uint64_t_nanoseconds(uint64_t nanoseconds); + +/// Retrieve the current time as a rcl_time_t struct. +/* The now argument must point to an allocated rcl_time_t struct, as the + * result is copied into this variable. + * + * This function is thread-safe. + * This function is lock-free, with an exception on Windows. + * On Windows this is lock-free if the C11's stdatomic.h function + * atomic_is_lock_free() returns true for atomic_int_least64_t. + * + * \param[out] now a rcl_time_t struct in which the current time is stored + * \return RCL_RET_OK if the current time was successfully obtained, or + * RCL_RET_INVALID_ARGUMENT if any arguments are invalid, or + * RCL_RET_ERROR an unspecified error occur. + */ +rcl_ret_t +rcl_time_now(rcl_time_t * now); + +/// Normalize a time struct. +/* If there are more than 10^9 nanoseconds in the nsec field, the extra seconds + * are removed from the nsec field and added to the sec field. + * Overflow of the sec field due to this normalization is ignored. + * + * This function is thread-safe. + * This function is lock-free, with an exception on Windows. + * On Windows this is lock-free if the C11's stdatomic.h function + * atomic_is_lock_free() returns true for atomic_int_least64_t. + * + * \param[out] now a rcl_time_t struct to be normalized + * \return RCL_RET_OK if the struct was normalized successfully, or + * RCL_RET_INVALID_ARGUMENT if any arguments are invalid, or + * RCL_RET_ERROR an unspecified error occur. + */ +rcl_ret_t +rcl_time_normalize(rcl_time_t * now); #if __cplusplus } diff --git a/rcl/src/rcl/time.c b/rcl/src/rcl/time.c index 429d7c8..eab5fdf 100644 --- a/rcl/src/rcl/time.c +++ b/rcl/src/rcl/time.c @@ -17,10 +17,30 @@ extern "C" { #endif +// Appropriate check accorind to: +// http://man7.org/linux/man-pages/man2/clock_gettime.2.html +#define HAS_CLOCK_GETTIME (_POSIX_C_SOURCE >= 199309L) + #include "rcl/time.h" +#include +#include +#include + +#if defined(__MACH__) +#include +#include +#endif +#if defined(WIN32) +#include +#include +#endif + +#include "./common.h" +#include "rcl/error_handling.h" + rcl_time_t -rcl_time_t_from_int64_t_nanoseconds(int64_t nanoseconds) +rcl_time_from_int64_t_nanoseconds(int64_t nanoseconds) { rcl_time_t result; result.sec = RCL_NS_TO_S(nanoseconds); @@ -29,7 +49,7 @@ rcl_time_t_from_int64_t_nanoseconds(int64_t nanoseconds) } rcl_time_t -rcl_time_t_from_uint64_t_nanoseconds(uint64_t nanoseconds) +rcl_time_from_uint64_t_nanoseconds(uint64_t nanoseconds) { rcl_time_t result; result.sec = RCL_NS_TO_S(nanoseconds); @@ -37,6 +57,129 @@ rcl_time_t_from_uint64_t_nanoseconds(uint64_t nanoseconds) return result; } +static void +__timespec_get_now(struct timespec * timespec_now) +{ +#if defined(__MACH__) + // On OS X use clock_get_time. + clock_serv_t cclock; + mach_timespec_t mts; + host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); + clock_get_time(cclock, &mts); + mach_port_deallocate(mach_task_self(), cclock); + timespec_now->tv_sec = mts.tv_sec; + timespec_now->tv_nsec = mts.tv_nsec; +#else // if defined(__MACH__) + // Otherwise use clock_gettime. + clock_gettime(CLOCK_REALTIME, timespec_now); +#endif // if defined(__MACH__) +} + +rcl_ret_t +rcl_time_now(rcl_time_t * now) +{ + RCL_CHECK_ARGUMENT_FOR_NULL(now, RCL_RET_INVALID_ARGUMENT); +#ifndef WIN32 + // Unix implementations +#if HAS_CLOCK_GETTIME || defined(__MACH__) + // If clock_gettime is available or on OS X, use a timespec. + struct timespec timespec_now; + __timespec_get_now(×pec_now); + now->sec = timespec_now.tv_sec; + now->nsec = timespec_now.tv_nsec; +#else // if HAS_CLOCK_GETTIME || defined(__MACH__) + // Otherwise we have to fallback to gettimeofday. + struct timeval timeofday; + gettimeofday(&timeofday, NULL); + now->sec = timeofday.tv_sec; + now->nsec = timeofday.tv_usec * 1000; +#endif // if HAS_CLOCK_GETTIME || defined(__MACH__) +#else + // Windows implementation adapted from roscpp_core (3-clause BSD), see: + // https://github.com/ros/roscpp_core/blob/0.5.6/rostime/src/time.cpp#L96 + // Win32 implementation + // unless I've missed something obvious, the only way to get high-precision + // time on Windows is via the QueryPerformanceCounter() call. However, + // this is somewhat problematic in Windows XP on some processors, especially + // AMD, because the Windows implementation can freak out when the CPU clocks + // down to save power. Time can jump or even go backwards. Microsoft has + // fixed this bug for most systems now, but it can still show up if you have + // not installed the latest CPU drivers (an oxymoron). They fixed all these + // problems in Windows Vista, and this API is by far the most accurate that + // I know of in Windows, so I'll use it here despite all these caveats + LARGE_INTEGER cpu_freq; + static LARGE_INTEGER cpu_freq_global; + LARGE_INTEGER init_cpu_time; + static LARGE_INTEGER init_cpu_time_global; + static atomic_uint_least64_t start_ns = ATOMIC_VAR_INIT(0); + rcl_time_t start = {0, 0}; + // If start_ns (static/global) is 0, then set it up on the first call. + uint64_t start_ns_loaded = atomic_load(&start_ns); + if (start_ns_loaded == 0) { + QueryPerformanceFrequency(&cpu_freq); + if (cpu_freq.QuadPart == 0) { + RCL_SET_ERROR_MSG("no high performance timer found"); + return RCL_RET_ERROR; + } + QueryPerformanceCounter(&init_cpu_time); + // compute an offset from the Epoch using the lower-performance timer API + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + LARGE_INTEGER start_li; + start_li.LowPart = ft.dwLowDateTime; + start_li.HighPart = ft.dwHighDateTime; + // why did they choose 1601 as the time zero, instead of 1970? + // there were no outstanding hard rock bands in 1601. +#ifdef _MSC_VER + start_li.QuadPart -= 116444736000000000Ui64; +#else + start_li.QuadPart -= 116444736000000000ULL; +#endif + start.sec = (uint64_t)(start_li.QuadPart / 10000000); // 100-ns units. odd. + start.nsec = (start_li.LowPart % 10000000) * 100; + if (atomic_compare_exchange_strong(&start_ns, 0, (start.sec * 1000000000) + start.nsec)) { + // If it matched 0 this call was first to setup, set the cpu_freq and init_cpu_time globals. + init_cpu_time_global = init_cpu_time; + cpu_freq_global = cpu_freq; + } else { + // Another concurrent first call managed to set this up first; reset start so it gets set. + start = {0, 0}; + } + } + if (start.sec == 0 && start.nsec == 0) { + start.sec = RCL_NS_TO_S(start_ns_loaded); + start.nsec = start_ns_loaded % 1000000000; + } + LARGE_INTEGER cur_time; + QueryPerformanceCounter(&cur_time); + LARGE_INTEGER delta_cpu_time; + delta_cpu_time.QuadPart = cur_time.QuadPart - init_cpu_time_global.QuadPart; + double d_delta_cpu_time = delta_cpu_time.QuadPart / (double) cpu_freq_global.QuadPart; + uint64_t delta_sec = (uint64_t) floor(d_delta_cpu_time); + uint64_t delta_nsec = (uint64_t) floor((d_delta_cpu_time - delta_sec) * 1e9); + + *now = start; + now->sec += delta_sec; + now->nsec += delta_nsec; + + // Normalize the time struct. + rcl_ret_t ret = rcl_time_normalize(now); + if (ret != RCL_RET_OK) { + return ret; // rcl error state should already be set. + } +#endif + return RCL_RET_OK; +} + +rcl_ret_t +rcl_time_normalize(rcl_time_t * now) +{ + RCL_CHECK_ARGUMENT_FOR_NULL(now, RCL_RET_INVALID_ARGUMENT); + now->sec += now->nsec / 1000000000; + now->nsec = now->nsec % 1000000000; + return RCL_RET_OK; +} + #if __cplusplus } #endif diff --git a/rcl/src/rcl/wait.c b/rcl/src/rcl/wait.c index cc9c10f..c892d73 100644 --- a/rcl/src/rcl/wait.c +++ b/rcl/src/rcl/wait.c @@ -281,7 +281,7 @@ rcl_wait(rcl_wait_set_t * wait_set, int64_t timeout) // Create dummy sets for currently unsupported wait-ables. static rmw_services_t dummy_services = {0, NULL}; static rmw_clients_t dummy_clients = {0, NULL}; - rmw_time_t rmw_timeout = rcl_time_t_from_int64_t_nanoseconds(timeout); + rmw_time_t rmw_timeout = rcl_time_from_int64_t_nanoseconds(timeout); // Wait. rmw_ret_t ret = rmw_wait( &wait_set->impl->rmw_subscriptions,