diff --git a/README.md b/README.md index b434486..299f2c8 100755 --- a/README.md +++ b/README.md @@ -1 +1 @@ -This contains the priority_executor package that adds an Executor with deadline and chain-aware priority support. The `usage_example.cpp` file shows the needed steps to use the executor. \ No newline at end of file +This contains the priority_executor package that adds an Executor with deadline and chain-aware priority support. The current usage example is out-of-date diff --git a/src/priority_executor/CMakeLists.txt b/src/priority_executor/CMakeLists.txt index e6ec3da..f65a112 100755 --- a/src/priority_executor/CMakeLists.txt +++ b/src/priority_executor/CMakeLists.txt @@ -1,55 +1,71 @@ -cmake_minimum_required(VERSION 3.8) -project(priority_executor VERSION 0.1.0) +cmake_minimum_required(VERSION 3.5) +project(priority_executor) -# Set C++ standards -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() -# Compiler options if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) endif() -# Find dependencies +# find dependencies find_package(ament_cmake REQUIRED) find_package(rclcpp REQUIRED) find_package(rcl REQUIRED) -find_package(rmw REQUIRED) find_package(std_msgs REQUIRED) find_package(std_srvs REQUIRED) find_package(simple_timer REQUIRED) -find_package(nlohmann_json REQUIRED) -# Library targets -add_library(priority_executor - src/priority_executor.cpp - src/priority_memory_strategy.cpp - src/performance_monitor.cpp - src/default_executor.cpp - src/usage_example.cpp - src/multithread_priority_executor.cpp -) +add_library(priority_executor src/priority_executor.cpp src/priority_memory_strategy.cpp) target_include_directories(priority_executor PUBLIC $ $ ) +# target_link_libraries(priority_executor +# simple_timer +# ) ament_target_dependencies(priority_executor rmw rclcpp rcl simple_timer - nlohmann_json ) -# Example executable -add_executable(usage_example - src/usage_example.cpp -) -target_include_directories(usage_example PUBLIC +add_library(multithread_priority_executor src/multithread_priority_executor.cpp src/priority_memory_strategy.cpp) +target_include_directories(multithread_priority_executor PUBLIC $ $ ) +ament_target_dependencies(multithread_priority_executor + rmw + rclcpp + rcl + simple_timer +) + +add_library(default_executor src/default_executor.cpp) +target_include_directories(default_executor PUBLIC + $ + $ +) +ament_target_dependencies(default_executor + rmw + rclcpp + rcl + simple_timer +) + +add_executable(usage_example src/usage_example.cpp) +target_include_directories(usage_example PUBLIC + $ + $) target_link_libraries(usage_example priority_executor ) @@ -57,37 +73,36 @@ ament_target_dependencies(usage_example rclcpp std_msgs std_srvs - simple_timer ) -# Testing -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - ament_lint_auto_find_test_dependencies() -endif() - -# Installation install( DIRECTORY include/ DESTINATION include ) -install( - TARGETS priority_executor - EXPORT export_${PROJECT_NAME} +install(TARGETS usage_example + DESTINATION lib/${PROJECT_NAME} +) + +install(TARGETS priority_executor multithread_priority_executor default_executor ARCHIVE DESTINATION lib LIBRARY DESTINATION lib RUNTIME DESTINATION bin - INCLUDES DESTINATION include ) + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # uncomment the line when a copyright and license is not present in all source files + #set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # uncomment the line when this package is not in a git repo + #set(ament_cmake_cpplint_FOUND TRUE) + ament_lint_auto_find_test_dependencies() +endif() -install( - TARGETS usage_example - RUNTIME DESTINATION lib/${PROJECT_NAME} -) - -# Export and package configuration ament_export_include_directories(include) -ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) -ament_export_dependencies(rclcpp rcl rmw simple_timer nlohmann_json) +ament_export_libraries(priority_executor) +ament_export_libraries(multithread_priority_executor) +ament_export_libraries(default_executor) ament_package() diff --git a/src/priority_executor/include/priority_executor/default_executor.hpp b/src/priority_executor/include/priority_executor/default_executor.hpp index 090687f..3359e92 100755 --- a/src/priority_executor/include/priority_executor/default_executor.hpp +++ b/src/priority_executor/include/priority_executor/default_executor.hpp @@ -12,100 +12,96 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef PRIORITY_EXECUTOR__DEFAULT_EXECUTOR_HPP_ -#define PRIORITY_EXECUTOR__DEFAULT_EXECUTOR_HPP_ +#ifndef RTIS_DEFAULT_EXECUTOR +#define RTIS_DEFAULT_EXECUTOR #include -#include -#include #include #include -#include +#include +#include +#include #include "rclcpp/executor.hpp" +#include "rclcpp/rclcpp.hpp" #include "rclcpp/macros.hpp" +#include "rclcpp/memory_strategies.hpp" +#include "rclcpp/node.hpp" +#include "rclcpp/utilities.hpp" +#include "rclcpp/rate.hpp" #include "rclcpp/visibility_control.hpp" #include "rclcpp/detail/mutex_two_priorities.hpp" +#include "priority_executor/priority_memory_strategy.hpp" #include "simple_timer/rt-sched.hpp" -namespace priority_executor { - -class RTISTimed { +class RTISTimed +{ public: - node_time_logger logger_; + node_time_logger logger_; }; -class ROSDefaultMultithreadedExecutor : public rclcpp::Executor, public RTISTimed { +class ROSDefaultMultithreadedExecutor : public rclcpp::Executor, public RTISTimed +{ public: - RCLCPP_SMART_PTR_DEFINITIONS(ROSDefaultMultithreadedExecutor) + RCLCPP_PUBLIC + explicit ROSDefaultMultithreadedExecutor( + const rclcpp::ExecutorOptions &options = rclcpp::ExecutorOptions(), int number_of_threads = 2, std::chrono::nanoseconds next_exec_timeout = std::chrono::nanoseconds(-1)); - RCLCPP_PUBLIC - explicit ROSDefaultMultithreadedExecutor( - rclcpp::ExecutorOptions const &options = rclcpp::ExecutorOptions(), - int number_of_threads = 2, - std::chrono::nanoseconds next_exec_timeout = std::chrono::nanoseconds(-1)); - - RCLCPP_PUBLIC size_t get_number_of_threads(); - RCLCPP_PUBLIC void spin() override; - - bool get_next_executable(rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); + RCLCPP_PUBLIC size_t get_number_of_threads(); + RCLCPP_PUBLIC void spin() override; + bool get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); protected: - RCLCPP_PUBLIC void run(size_t thread_number); + RCLCPP_PUBLIC void run(size_t thread_number); private: - RCLCPP_DISABLE_COPY(ROSDefaultMultithreadedExecutor) - - size_t number_of_threads_; - std::set scheduled_timers_; - std::chrono::nanoseconds next_exec_timeout_{std::chrono::nanoseconds(-1)}; - - static std::unordered_map> - wait_mutex_set_; - static std::mutex shared_wait_mutex_; + size_t number_of_threads_; + std::set scheduled_timers_; + static std::unordered_map> + wait_mutex_set_; + static std::mutex shared_wait_mutex_; + std::chrono::nanoseconds next_exec_timeout_ = std::chrono::nanoseconds(-1); }; /// Single-threaded executor implementation. /** * This is the default executor created by rclcpp::spin. */ -class ROSDefaultExecutor : public rclcpp::Executor, public RTISTimed { +class ROSDefaultExecutor : public rclcpp::Executor, public RTISTimed +{ public: - RCLCPP_SMART_PTR_DEFINITIONS(ROSDefaultExecutor) + RCLCPP_SMART_PTR_DEFINITIONS(ROSDefaultExecutor) - /// Default constructor. See the default constructor for Executor. - RCLCPP_PUBLIC - explicit ROSDefaultExecutor( - rclcpp::ExecutorOptions const &options = rclcpp::ExecutorOptions()); + /// Default constructor. See the default constructor for Executor. + RCLCPP_PUBLIC + explicit ROSDefaultExecutor( + const rclcpp::ExecutorOptions &options = rclcpp::ExecutorOptions()); - /// Default destructor. - RCLCPP_PUBLIC - virtual ~ROSDefaultExecutor(); + /// Default destructor. + RCLCPP_PUBLIC + virtual ~ROSDefaultExecutor(); - /// Single-threaded implementation of spin. - /** - * This function will block until work comes in, execute it, and then repeat - * the process until canceled. - * It may be interrupt by a call to rclcpp::Executor::cancel() or by ctrl-c - * if the associated context is configured to shutdown on SIGINT. - * \throws std::runtime_error when spin() called while already spinning - */ - RCLCPP_PUBLIC - void spin() override; + /// Single-threaded implementation of spin. + /** + * This function will block until work comes in, execute it, and then repeat + * the process until canceled. + * It may be interrupt by a call to rclcpp::Executor::cancel() or by ctrl-c + * if the associated context is configured to shutdown on SIGINT. + * \throws std::runtime_error when spin() called while already spinning + */ + RCLCPP_PUBLIC + void + spin() override; + bool get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); - bool get_next_executable(rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); - - RCLCPP_PUBLIC - void wait_for_work(std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); + RCLCPP_PUBLIC + void + wait_for_work(std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); private: - RCLCPP_DISABLE_COPY(ROSDefaultExecutor) + RCLCPP_DISABLE_COPY(ROSDefaultExecutor) }; -} // namespace priority_executor - -#endif // PRIORITY_EXECUTOR__DEFAULT_EXECUTOR_HPP_ \ No newline at end of file +#endif // RCLCPP__EXECUTORS__SINGLE_THREADED_EXECUTOR_HPP_ \ No newline at end of file diff --git a/src/priority_executor/include/priority_executor/multithread_priority_executor.hpp b/src/priority_executor/include/priority_executor/multithread_priority_executor.hpp index a722b53..72b3b9e 100755 --- a/src/priority_executor/include/priority_executor/multithread_priority_executor.hpp +++ b/src/priority_executor/include/priority_executor/multithread_priority_executor.hpp @@ -1,56 +1,34 @@ -#ifndef PRIORITY_EXECUTOR__MULTITHREAD_PRIORITY_EXECUTOR_HPP_ -#define PRIORITY_EXECUTOR__MULTITHREAD_PRIORITY_EXECUTOR_HPP_ +#ifndef RTIS_MULTITHREAD_EXECUTOR +#define RTIS_MULTITHREAD_EXECUTOR +#include +#include "rclcpp/detail/mutex_two_priorities.hpp" #include -#include -#include -#include -#include -#include -#include "priority_executor/default_executor.hpp" -#include "priority_executor/priority_executor.hpp" - -#include "rclcpp/executor.hpp" -#include "rclcpp/macros.hpp" -#include "rclcpp/memory_strategies.hpp" -#include "rclcpp/node.hpp" -#include "rclcpp/rate.hpp" -#include "rclcpp/utilities.hpp" -#include "rclcpp/visibility_control.hpp" - -namespace priority_executor { - -class MultithreadTimedExecutor : public TimedExecutor { -public: +namespace timed_executor +{ + class MultithreadTimedExecutor : public TimedExecutor + { + public: RCLCPP_PUBLIC explicit MultithreadTimedExecutor( - rclcpp::ExecutorOptions const &options = rclcpp::ExecutorOptions(), - std::string const &name = "unnamed executor", - int number_of_threads = 2, - std::chrono::nanoseconds next_exec_timeout = std::chrono::nanoseconds(-1)); - - RCLCPP_PUBLIC - virtual ~MultithreadTimedExecutor() = default; + const rclcpp::ExecutorOptions &options = rclcpp::ExecutorOptions(), std::string name = "unnamed executor", int number_of_threads = 2, std::chrono::nanoseconds next_exec_timeout = std::chrono::nanoseconds(-1)); RCLCPP_PUBLIC size_t get_number_of_threads(); RCLCPP_PUBLIC void spin() override; -protected: + protected: RCLCPP_PUBLIC void run(size_t thread_number); -private: - RCLCPP_DISABLE_COPY(MultithreadTimedExecutor) - std::set scheduled_timers_; + private: size_t number_of_threads_; - std::chrono::nanoseconds next_exec_timeout_{std::chrono::nanoseconds(-1)}; - - static std::unordered_map> + std::set scheduled_timers_; + static std::unordered_map> wait_mutex_set_; static std::mutex shared_wait_mutex_; -}; + std::chrono::nanoseconds next_exec_timeout_ = std::chrono::nanoseconds(-1); + }; +} -} // namespace priority_executor - -#endif // PRIORITY_EXECUTOR__MULTITHREAD_PRIORITY_EXECUTOR_HPP_ \ No newline at end of file +#endif \ No newline at end of file diff --git a/src/priority_executor/include/priority_executor/performance_monitor.hpp b/src/priority_executor/include/priority_executor/performance_monitor.hpp deleted file mode 100644 index beece1f..0000000 --- a/src/priority_executor/include/priority_executor/performance_monitor.hpp +++ /dev/null @@ -1,237 +0,0 @@ -#ifndef PERFORMANCE_MONITOR_HPP_ -#define PERFORMANCE_MONITOR_HPP_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "rclcpp/rclcpp.hpp" -#include "simple_timer/rt-sched.hpp" - -namespace priority_executor { - -enum class PerformanceEventType { - CALLBACK_READY, - CALLBACK_START, - CALLBACK_END, - DEADLINE_MISSED, - DEADLINE_MET, - CHAIN_START, - CHAIN_END -}; - -struct PerformanceEvent { - uint64_t timestamp; // High-resolution timestamp (microseconds) - PerformanceEventType type; // Type of event - std::string node_name; // Name of the node - std::string callback_name; // Name of the callback - int chain_id{-1}; // Chain identifier - bool is_first_in_chain{false}; // First callback in chain - bool is_last_in_chain{false}; // Last callback in chain - uint64_t deadline{0}; // Deadline (if applicable) - uint64_t processing_time{0}; // Time spent executing (if applicable) - int executor_id{0}; // Executor identifier - std::string additional_data; // JSON string for extensible data - - // Helper method to extract callback type from additional_data - std::string get_callback_type() const { - if (additional_data.empty()) return "unknown"; - - try { - nlohmann::json json = nlohmann::json::parse(additional_data); - if (json.contains("callback_type")) { - return json["callback_type"].get(); - } - } catch (...) {} - - return "unknown"; - } - - // Helper method to extract thread info from additional_data - int get_thread_id() const { - if (additional_data.empty()) return -1; - - try { - nlohmann::json json = nlohmann::json::parse(additional_data); - if (json.contains("thread_info") && json["thread_info"].is_object() && - json["thread_info"].contains("thread_id")) { - return json["thread_info"]["thread_id"].get(); - } - // Direct thread_id if present - if (json.contains("thread_id")) { - return json["thread_id"].get(); - } - } catch (...) {} - - return -1; - } - - // Helper method to create a descriptive identifier for this callback - std::string get_descriptive_id() const { - std::stringstream ss; - ss << node_name; - - if (!callback_name.empty()) { - ss << "/" << callback_name; - } - - if (chain_id >= 0) { - ss << " (chain:" << chain_id; - if (is_first_in_chain) ss << " start"; - if (is_last_in_chain) ss << " end"; - ss << ")"; - } - - return ss.str(); - } - - // Convert additional_data to JSON object - nlohmann::json get_additional_data_json() const { - try { - if (!additional_data.empty()) { - return nlohmann::json::parse(additional_data); - } - } catch (...) {} - - return nlohmann::json::object(); - } -}; - -class PerformanceMonitor { -public: - static PerformanceMonitor& getInstance(); - - void recordEvent(const PerformanceEvent& event); - void setBufferSize(size_t size); - void setAutoDumpThreshold(size_t threshold); - void setDumpPath(const std::string& path); - bool dumpToFile(const std::string& filename); - void enable(bool enabled = true); - void clear(); - - // Helper method to set monitoring options in one call - void setMonitoringOptions(size_t buffer_size, size_t auto_dump_threshold, - const std::string& dump_path) { - setBufferSize(buffer_size); - setAutoDumpThreshold(auto_dump_threshold); - setDumpPath(dump_path); - } - - // Get all events for a specific callback - std::vector getEventsForCallback(const std::string& node_name, - const std::string& callback_name) const { - std::vector result; - std::lock_guard lock(event_mutex_); - - for (const auto& event : event_buffer_) { - if (event.node_name == node_name && event.callback_name == callback_name) { - result.push_back(event); - } - } - - return result; - } - - // Get all events for a specific chain - std::vector getEventsForChain(int chain_id) const { - std::vector result; - std::lock_guard lock(event_mutex_); - - for (const auto& event : event_buffer_) { - if (event.chain_id == chain_id) { - result.push_back(event); - } - } - - return result; - } - - // Get all events by type - std::vector getEventsByType(PerformanceEventType type) const { - std::vector result; - std::lock_guard lock(event_mutex_); - - for (const auto& event : event_buffer_) { - if (event.type == type) { - result.push_back(event); - } - } - - return result; - } - - // Get a formatted report of callbacks and their execution times - std::string getCallbackExecutionReport() const { - std::lock_guard lock(event_mutex_); - std::stringstream ss; - - ss << "Callback Execution Report:\n"; - ss << "=========================\n\n"; - - // Maps to store total execution time and count per callback - std::unordered_map total_time; - std::unordered_map call_count; - std::unordered_map max_time; - std::unordered_map min_time; - - // Process all CALLBACK_END events (they contain processing_time) - for (const auto& event : event_buffer_) { - if (event.type == PerformanceEventType::CALLBACK_END && event.processing_time > 0) { - std::string callback_id = event.get_descriptive_id(); - total_time[callback_id] += event.processing_time; - call_count[callback_id]++; - max_time[callback_id] = std::max(max_time[callback_id], event.processing_time); - if (min_time[callback_id] == 0 || event.processing_time < min_time[callback_id]) { - min_time[callback_id] = event.processing_time; - } - } - } - - // Print results sorted by callback id - std::vector callback_ids; - for (const auto& entry : call_count) { - callback_ids.push_back(entry.first); - } - - std::sort(callback_ids.begin(), callback_ids.end()); - - for (const auto& id : callback_ids) { - double avg_time = static_cast(total_time[id]) / call_count[id]; - ss << id << "\n"; - ss << " Calls: " << call_count[id] << "\n"; - ss << " Avg time: " << avg_time << " µs\n"; - ss << " Min time: " << min_time[id] << " µs\n"; - ss << " Max time: " << max_time[id] << " µs\n"; - ss << " Total time: " << total_time[id] << " µs\n\n"; - } - - return ss.str(); - } - -private: - PerformanceMonitor(); - ~PerformanceMonitor() = default; - - PerformanceMonitor(const PerformanceMonitor&) = delete; - PerformanceMonitor& operator=(const PerformanceMonitor&) = delete; - PerformanceMonitor(PerformanceMonitor&&) = delete; - PerformanceMonitor& operator=(PerformanceMonitor&&) = delete; - - mutable std::mutex event_mutex_; - std::vector event_buffer_; - size_t buffer_size_{10000}; - size_t auto_dump_threshold_{0}; - std::string dump_path_{"performance_logs"}; - std::atomic enabled_{true}; - - std::string eventsToJson() const; - void checkAndAutoDump(); -}; - -} // namespace priority_executor - -#endif // PERFORMANCE_MONITOR_HPP_ \ No newline at end of file diff --git a/src/priority_executor/include/priority_executor/priority_executor.hpp b/src/priority_executor/include/priority_executor/priority_executor.hpp index 62d7248..a942d08 100755 --- a/src/priority_executor/include/priority_executor/priority_executor.hpp +++ b/src/priority_executor/include/priority_executor/priority_executor.hpp @@ -12,39 +12,42 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef PRIORITY_EXECUTOR__PRIORITY_EXECUTOR_HPP_ -#define PRIORITY_EXECUTOR__PRIORITY_EXECUTOR_HPP_ +#ifndef RTIS_TIMED_EXECUTOR +#define RTIS_TIMED_EXECUTOR #include #include #include #include -#include +#include +#include #include "rclcpp/executor.hpp" #include "rclcpp/macros.hpp" +#include "rclcpp/memory_strategies.hpp" +#include "rclcpp/node.hpp" +#include "rclcpp/utilities.hpp" +#include "rclcpp/rate.hpp" #include "rclcpp/visibility_control.hpp" - #include "priority_executor/priority_memory_strategy.hpp" -#include "priority_executor/default_executor.hpp" -#include "priority_executor/performance_monitor.hpp" +#include +namespace timed_executor +{ -namespace priority_executor { - -/// Single-threaded executor implementation. -/** + /// Single-threaded executor implementation. + /** * This is the default executor created by rclcpp::spin. */ -class TimedExecutor : public rclcpp::Executor, public RTISTimed { -public: + class TimedExecutor : public rclcpp::Executor, public RTISTimed + { + public: RCLCPP_SMART_PTR_DEFINITIONS(TimedExecutor) /// Default constructor. See the default constructor for Executor. RCLCPP_PUBLIC explicit TimedExecutor( - rclcpp::ExecutorOptions const &options = rclcpp::ExecutorOptions(), - std::string name = "unnamed executor"); + const rclcpp::ExecutorOptions &options = rclcpp::ExecutorOptions(), std::string name = "unnamed executor"); /// Default destructor. RCLCPP_PUBLIC @@ -52,75 +55,35 @@ public: /// Single-threaded implementation of spin. /** - * This function will block until work comes in, execute it, and then repeat - * the process until canceled. - * It may be interrupt by a call to rclcpp::Executor::cancel() or by ctrl-c - * if the associated context is configured to shutdown on SIGINT. - * \throws std::runtime_error when spin() called while already spinning - */ + * This function will block until work comes in, execute it, and then repeat + * the process until canceled. + * It may be interrupt by a call to rclcpp::Executor::cancel() or by ctrl-c + * if the associated context is configured to shutdown on SIGINT. + * \throws std::runtime_error when spin() called while already spinning + */ RCLCPP_PUBLIC - void spin() override; - + void + spin() override; std::string name; + void set_use_priorities(bool use_prio); - std::shared_ptr> prio_memory_strategy_{nullptr}; + std::shared_ptr> prio_memory_strategy_ = nullptr; - // Performance monitoring control - void enableMonitoring(bool enable = true) { - PerformanceMonitor::getInstance().enable(enable); - } + protected: + bool + get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); - void setMonitoringOptions( - size_t buffer_size, - size_t auto_dump_threshold, - const std::string& dump_path) { - auto& monitor = PerformanceMonitor::getInstance(); - monitor.setBufferSize(buffer_size); - monitor.setAutoDumpThreshold(auto_dump_threshold); - monitor.setDumpPath(dump_path); - } - - // Get a formatted report of all callback execution times - std::string getCallbackExecutionReport() const { - auto& monitor = PerformanceMonitor::getInstance(); - return monitor.getCallbackExecutionReport(); - } - - // Manually trigger a log dump with a specific filename - bool dumpMonitoringData(const std::string& filename) { - auto& monitor = PerformanceMonitor::getInstance(); - return monitor.dumpToFile(filename); - } - - // Get executor ID for identification in logs - int getExecutorId() const { - return executor_id_; - } - - // Get executor name for identification in logs - const std::string& getExecutorName() const { - return name; - } - -protected: - bool get_next_executable(rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout = std::chrono::nanoseconds(-1)); - -private: + private: RCLCPP_DISABLE_COPY(TimedExecutor) + void + wait_for_work(std::chrono::nanoseconds timeout); - void wait_for_work(std::chrono::nanoseconds timeout); - bool get_next_ready_executable(rclcpp::AnyExecutable& any_executable); - void recordExecutionEvent(const rclcpp::AnyExecutable& any_exec, - PerformanceEventType event_type, - uint64_t timestamp, - uint64_t processing_time = 0); + bool + get_next_ready_executable(rclcpp::AnyExecutable &any_executable); - bool use_priorities{true}; - static std::atomic next_executor_id_; - int executor_id_; -}; + bool use_priorities = true; + }; -} // namespace priority_executor +} // namespace timed_executor -#endif // PRIORITY_EXECUTOR__PRIORITY_EXECUTOR_HPP_ +#endif // RCLCPP__EXECUTORS__SINGLE_THREADED_EXECUTOR_HPP_ diff --git a/src/priority_executor/include/priority_executor/priority_memory_strategy.hpp b/src/priority_executor/include/priority_executor/priority_memory_strategy.hpp index 6033862..4c78abc 100755 --- a/src/priority_executor/include/priority_executor/priority_memory_strategy.hpp +++ b/src/priority_executor/include/priority_executor/priority_memory_strategy.hpp @@ -12,34 +12,37 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef PRIORITY_EXECUTOR__PRIORITY_MEMORY_STRATEGY_HPP_ -#define PRIORITY_EXECUTOR__PRIORITY_MEMORY_STRATEGY_HPP_ +#ifndef RTIS_PRIORITY_STRATEGY +#define RTIS_PRIORITY_STRATEGY -#include -#include #include -#include -#include -#include #include +#include +#include +#include #include "rcl/allocator.h" #include "rclcpp/allocator/allocator_common.hpp" #include "rclcpp/memory_strategy.hpp" +#include "rclcpp/node.hpp" +#include "rclcpp/visibility_control.hpp" + +#include "rcutils/logging_macros.h" + +#include "rmw/types.h" #include "simple_timer/rt-sched.hpp" /// Delegate for handling memory allocations while the Executor is executing. /** - * By default, the memory strategy dynamically allocates memory for structures - * that come in from the rmw implementation after the executor waits for work, - * based on the number of entities that come through. + * By default, the memory strategy dynamically allocates memory for structures that come in from + * the rmw implementation after the executor waits for work, based on the number of entities that + * come through. */ -namespace priority_executor { - -enum class ExecutableType { +enum ExecutableType +{ SUBSCRIPTION, SERVICE, CLIENT, @@ -47,139 +50,303 @@ enum class ExecutableType { WAITABLE }; -std::ostream& operator<<(std::ostream& os, const ExecutableType& obj); - -enum class ExecutableScheduleType { +enum ExecutableScheduleType +{ DEADLINE = 0, CHAIN_AWARE_PRIORITY, - CHAIN_INDEPENDENT_PRIORITY, // not used here - DEFAULT, // not used here + CHAIN_INDEPENDENT_PRIORITY, // not used here + DEFAULT, // not used here }; -std::ostream& operator<<(std::ostream& os, const ExecutableScheduleType& obj); - -class PriorityExecutable { +class PriorityExecutable +{ public: - static size_t num_executables; - - explicit PriorityExecutable( - std::shared_ptr h, - int p, - ExecutableType t, - ExecutableScheduleType sched_type = ExecutableScheduleType::CHAIN_INDEPENDENT_PRIORITY); - - PriorityExecutable(); - - void dont_run(); - void allow_run(); - void increment_counter(); - bool operator==(PriorityExecutable const &other) const; - friend std::ostream& operator<<(std::ostream& os, PriorityExecutable const &pe); - std::shared_ptr handle; ExecutableType type; bool can_be_run = false; std::shared_ptr waitable; ExecutableScheduleType sched_type; - int priority = 0; - long period = 1000; // milliseconds + int priority; + long period = 1000; // milliseconds bool is_first_in_chain = false; bool is_last_in_chain = false; - std::deque* deadlines = nullptr; // chain aware deadlines + // chain aware deadlines + std::deque *deadlines = nullptr; std::shared_ptr timer_handle; - int chain_id = 0; // just used for logging - int counter = 0; // chain aware priority + // just used for logging + int chain_id = 0; + + // chain aware priority + int counter = 0; + + PriorityExecutable(std::shared_ptr h, int p, ExecutableType t, ExecutableScheduleType sched_type = CHAIN_INDEPENDENT_PRIORITY); + + void dont_run(); + + void allow_run(); + + PriorityExecutable(); + + void increment_counter(); + + bool operator==(const PriorityExecutable &other) const; + + static size_t num_executables; int executable_id = 0; + std::string name = ""; }; -class PriorityExecutableComparator { +class PriorityExecutableComparator +{ public: - bool operator()(PriorityExecutable const *p1, PriorityExecutable const *p2) const; + bool operator()(const PriorityExecutable *p1, const PriorityExecutable *p2); }; template > -class PriorityMemoryStrategy : public rclcpp::memory_strategy::MemoryStrategy { +class PriorityMemoryStrategy : public rclcpp::memory_strategy::MemoryStrategy +{ public: RCLCPP_SMART_PTR_DEFINITIONS(PriorityMemoryStrategy) - using VoidAllocTraits = typename rclcpp::allocator::AllocRebind; + using VoidAllocTraits = typename rclcpp::allocator::AllocRebind; using VoidAlloc = typename VoidAllocTraits::allocator_type; - explicit PriorityMemoryStrategy(std::shared_ptr allocator); - PriorityMemoryStrategy(); + explicit PriorityMemoryStrategy(std::shared_ptr allocator) + { + allocator_ = std::make_shared(*allocator.get()); + logger_ = create_logger(); + } + PriorityMemoryStrategy() + { + allocator_ = std::make_shared(); + logger_ = create_logger(); + } node_time_logger logger_; - // Override methods - void add_guard_condition(const rcl_guard_condition_t* guard_condition) override; - void remove_guard_condition(const rcl_guard_condition_t* guard_condition) override; - void clear_handles() override; - void remove_null_handles(rcl_wait_set_t* wait_set) override; - bool collect_entities(const WeakNodeList& weak_nodes) override; - void add_waitable_handle(const rclcpp::Waitable::SharedPtr& waitable) override; - bool add_handles_to_wait_set(rcl_wait_set_t* wait_set) override; + void add_guard_condition(const rcl_guard_condition_t *guard_condition) override; - // Class-specific methods - void get_next_executable(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes); + void remove_guard_condition(const rcl_guard_condition_t *guard_condition) override; + + void clear_handles() override; + + void remove_null_handles(rcl_wait_set_t *wait_set) override; + + bool collect_entities(const WeakNodeList &weak_nodes) override; + + void add_waitable_handle(const rclcpp::Waitable::SharedPtr &waitable) override; + + bool add_handles_to_wait_set(rcl_wait_set_t *wait_set) override; + + void + get_next_executable( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes); + + /** + * thread-id is used for logging. if -1, then don't log the thread id + */ void post_execute(rclcpp::AnyExecutable any_exec, int thread_id = -1); - // Override virtual methods - void get_next_subscription(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes) override; - void get_next_service(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes) override; - void get_next_client(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes) override; - void get_next_timer(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes) override; - void get_next_waitable(rclcpp::AnyExecutable& any_exec, const WeakNodeList& weak_nodes) override; + void + get_next_subscription( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) override; - // Priority handling methods - std::shared_ptr get_priority_settings(std::shared_ptr executable); - rcl_allocator_t get_allocator() override; + void + get_next_service( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) override; - // Counter methods - size_t number_of_ready_subscriptions() const override; - size_t number_of_ready_services() const override; - size_t number_of_ready_events() const override; - size_t number_of_ready_clients() const override; - size_t number_of_guard_conditions() const override; - size_t number_of_ready_timers() const override; - size_t number_of_waitables() const override; + void + get_next_client(rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) override; - // Executable priority methods - void set_executable_priority(std::shared_ptr handle, int priority, ExecutableType t); - void set_executable_priority(std::shared_ptr handle, int priority, ExecutableType t, - ExecutableScheduleType sc, int chain_index); - void set_executable_deadline(std::shared_ptr handle, int period, ExecutableType t, - int chain_id = 0, std::string name = ""); - int get_priority(std::shared_ptr executable); - void set_first_in_chain(std::shared_ptr exec_handle); - void set_last_in_chain(std::shared_ptr exec_handle); - void assign_deadlines_queue(std::shared_ptr exec_handle, std::deque* deadlines); - std::shared_ptr> get_chain_deadlines(int chain_id); + void + get_next_timer( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) override; + + void + get_next_waitable(rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) override; + + std::shared_ptr get_priority_settings(std::shared_ptr executable) + { + auto search = priority_map.find(executable); + if (search != priority_map.end()) + { + return search->second; + } + else + { + return nullptr; + } + } + + rcl_allocator_t get_allocator() override + { + return rclcpp::allocator::get_rcl_allocator(*allocator_.get()); + } + + size_t number_of_ready_subscriptions() const override + { + size_t number_of_subscriptions = subscription_handles_.size(); + // std::cout << "ready_raw: " << number_of_subscriptions << std::endl; + for (auto waitable : waitable_handles_) + { + number_of_subscriptions += waitable->get_number_of_ready_subscriptions(); + } + return number_of_subscriptions; + } + + size_t number_of_ready_services() const override + { + size_t number_of_services = service_handles_.size(); + for (auto waitable : waitable_handles_) + { + number_of_services += waitable->get_number_of_ready_services(); + } + return number_of_services; + } + + size_t number_of_ready_events() const override + { + size_t number_of_events = 0; + for (auto waitable : waitable_handles_) + { + number_of_events += waitable->get_number_of_ready_events(); + } + return number_of_events; + } + + size_t number_of_ready_clients() const override + { + size_t number_of_clients = client_handles_.size(); + for (auto waitable : waitable_handles_) + { + number_of_clients += waitable->get_number_of_ready_clients(); + } + return number_of_clients; + } + + size_t number_of_guard_conditions() const override + { + size_t number_of_guard_conditions = guard_conditions_.size(); + for (auto waitable : waitable_handles_) + { + number_of_guard_conditions += waitable->get_number_of_ready_guard_conditions(); + } + return number_of_guard_conditions; + } + + size_t number_of_ready_timers() const override + { + size_t number_of_timers = timer_handles_.size(); + for (auto waitable : waitable_handles_) + { + number_of_timers += waitable->get_number_of_ready_timers(); + } + return number_of_timers; + } + + size_t number_of_waitables() const override + { + return waitable_handles_.size(); + } + + void set_executable_priority(std::shared_ptr handle, int priority, ExecutableType t) + { + // TODO: any sanity checks should go here + priority_map[handle] = std::make_shared(handle, priority, t); + } + void set_executable_priority(std::shared_ptr handle, int priority, ExecutableType t, ExecutableScheduleType sc, int chain_index) + { + // TODO: any sanity checks should go here + priority_map[handle] = std::make_shared(handle, priority, t, sc); + priority_map[handle]->chain_id = chain_index; + } + + void set_executable_deadline(std::shared_ptr handle, int period, ExecutableType t, int chain_id = 0, std::string name = "") + { + // TODO: any sanity checks should go here + priority_map[handle] = std::make_shared(handle, period, t, DEADLINE); + priority_map[handle]->chain_id = chain_id; + priority_map[handle]->name = name; + } + + int get_priority(std::shared_ptr executable) + { + auto search = priority_map.find(executable); + if (search != priority_map.end()) + { + return search->second->priority; + } + else + { + return 0; + } + } + + + void set_first_in_chain(std::shared_ptr exec_handle) + { + get_priority_settings(exec_handle)->is_first_in_chain = true; + } + + void set_last_in_chain(std::shared_ptr exec_handle) + { + get_priority_settings(exec_handle)->is_last_in_chain = true; + } + + void assign_deadlines_queue(std::shared_ptr exec_handle, std::deque *deadlines) + { + get_priority_settings(exec_handle)->deadlines = deadlines; + } private: - std::shared_ptr get_and_reset_priority(std::shared_ptr executable, - ExecutableType t); + std::shared_ptr get_and_reset_priority(std::shared_ptr executable, ExecutableType t) + { + // PriorityExecutable *p = get_priority_settings(executable); + std::shared_ptr p = get_priority_settings(executable); + if (p == nullptr) + { + // priority_map[executable] = PriorityExecutable(executable, 0, t); + priority_map[executable] = std::make_shared(executable, 0, t); + p = priority_map[executable]; + } + // p->can_be_run = true; + return p; + } template - using VectorRebind = std::vector::template rebind_alloc>; + using VectorRebind = + std::vector::template rebind_alloc>; + + VectorRebind guard_conditions_; - // Member variables - VectorRebind guard_conditions_; VectorRebind> subscription_handles_; VectorRebind> service_handles_; VectorRebind> client_handles_; VectorRebind> timer_handles_; VectorRebind> waitable_handles_; - std::shared_ptr allocator_; - std::map, std::shared_ptr> priority_map; - std::map>> chain_deadlines; - std::set all_executables_; + std::shared_ptr allocator_; + + // TODO: evaluate using node/subscription namespaced strings as keys + + // holds *all* handle->priority mappings + std::map, std::shared_ptr> priority_map; + + // hold *only valid* executable+priorities + // std::priority_queue, PriorityExecutableComparator> all_executables_; + std::set all_executables_; + + // priority queue doesn't allow iteration. fortunately, std::map is sorted by key, so we can replace the priority queue with a map + // the key will be the priority. the value doesn't matter. + // std::map all_executables_ = std::map(PriorityExecutableComparator()); void add_executable_to_queue(std::shared_ptr p); }; -} // namespace priority_executor - -#endif // PRIORITY_EXECUTOR__PRIORITY_MEMORY_STRATEGY_HPP_ \ No newline at end of file +#endif // RCLCPP__STRATEGIES__ALLOCATOR_MEMORY_STRATEGY_HPP_ diff --git a/src/priority_executor/package.xml b/src/priority_executor/package.xml index faaedb5..15635ac 100755 --- a/src/priority_executor/package.xml +++ b/src/priority_executor/package.xml @@ -2,18 +2,15 @@ priority_executor - 0.1.0 - - ROS 2 package implementing priority-based executors for real-time task scheduling - + 0.0.0 + TODO: Package description kurt - Apache License 2.0 + TODO: License declaration ament_cmake rclcpp rcl - rmw std_msgs std_srvs simple_timer diff --git a/src/priority_executor/src/default_executor.cpp b/src/priority_executor/src/default_executor.cpp index 5210e23..c5ed9fa 100755 --- a/src/priority_executor/src/default_executor.cpp +++ b/src/priority_executor/src/default_executor.cpp @@ -13,253 +13,255 @@ // limitations under the License. #include "priority_executor/default_executor.hpp" - -#include -#include -#include -#include -#include -#include -#include - #include "rcpputils/scope_exit.hpp" #include "rclcpp/any_executable.hpp" #include "simple_timer/rt-sched.hpp" -namespace priority_executor { - -std::unordered_map> - ROSDefaultMultithreadedExecutor::wait_mutex_set_; -std::mutex ROSDefaultMultithreadedExecutor::shared_wait_mutex_; - -ROSDefaultExecutor::ROSDefaultExecutor(rclcpp::ExecutorOptions const &options) - : rclcpp::Executor(options) { - logger_ = create_logger(); +ROSDefaultExecutor::ROSDefaultExecutor(const rclcpp::ExecutorOptions &options) + : rclcpp::Executor(options) +{ + logger_ = create_logger(); } ROSDefaultExecutor::~ROSDefaultExecutor() {} -void ROSDefaultExecutor::wait_for_work(std::chrono::nanoseconds timeout) { +void ROSDefaultExecutor::wait_for_work(std::chrono::nanoseconds timeout) +{ + { + std::unique_lock lock(memory_strategy_mutex_); + + // Collect the subscriptions and timers to be waited on + memory_strategy_->clear_handles(); + bool has_invalid_weak_nodes = memory_strategy_->collect_entities(weak_nodes_); + + // Clean up any invalid nodes, if they were detected + if (has_invalid_weak_nodes) { - std::unique_lock lock(memory_strategy_mutex_); - - // Collect the subscriptions and timers to be waited on - memory_strategy_->clear_handles(); - bool const has_invalid_weak_nodes = memory_strategy_->collect_entities(weak_nodes_); - - // Clean up any invalid nodes, if they were detected - if (has_invalid_weak_nodes) { - auto node_it = weak_nodes_.begin(); - auto gc_it = guard_conditions_.begin(); - while (node_it != weak_nodes_.end()) { - if (node_it->expired()) { - node_it = weak_nodes_.erase(node_it); - memory_strategy_->remove_guard_condition(*gc_it); - gc_it = guard_conditions_.erase(gc_it); - } else { - ++node_it; - ++gc_it; - } - } - } - // clear wait set - rcl_ret_t ret = rcl_wait_set_clear(&wait_set_); - if (ret != RCL_RET_OK) { - rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't clear wait set"); - } - - // The size of waitables are accounted for in size of the other entities - ret = rcl_wait_set_resize( - &wait_set_, memory_strategy_->number_of_ready_subscriptions(), - memory_strategy_->number_of_guard_conditions(), - memory_strategy_->number_of_ready_timers(), - memory_strategy_->number_of_ready_clients(), - memory_strategy_->number_of_ready_services(), - memory_strategy_->number_of_ready_events()); - if (RCL_RET_OK != ret) { - rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't resize the wait set"); - } - - if (!memory_strategy_->add_handles_to_wait_set(&wait_set_)) { - throw std::runtime_error("Couldn't fill wait set"); - } - } - rcl_ret_t const status = - rcl_wait(&wait_set_, std::chrono::duration_cast(timeout).count()); - if (status == RCL_RET_WAIT_SET_EMPTY) { - RCUTILS_LOG_WARN_NAMED( - "rclcpp", - "empty wait set received in rcl_wait(). This should never happen."); - } else if (status != RCL_RET_OK && status != RCL_RET_TIMEOUT) { - using rclcpp::exceptions::throw_from_rcl_error; - throw_from_rcl_error(status, "rcl_wait() failed"); - } - - // check the null handles in the wait set and remove them from the handles in memory strategy - // for callback-based entities - memory_strategy_->remove_null_handles(&wait_set_); -} - -bool ROSDefaultExecutor::get_next_executable(rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout) { - bool success = false; - // Check to see if there are any subscriptions or timers needing service - // TODO(wjwwood): improve run to run efficiency of this function - // sched_yield(); - wait_for_work(timeout); - success = get_next_ready_executable(any_executable); - return success; -} - -void ROSDefaultExecutor::spin() { - if (spinning.exchange(true)) { - throw std::runtime_error("spin() called while already spinning"); - } - RCPPUTILS_SCOPE_EXIT(this->spinning.store(false);); - while (rclcpp::ok(this->context_) && spinning.load()) { - rclcpp::AnyExecutable any_executable; - if (get_next_executable(any_executable)) { - execute_any_executable(any_executable); - } - } -} - -size_t ROSDefaultMultithreadedExecutor::get_number_of_threads() { - return number_of_threads_; -} - -ROSDefaultMultithreadedExecutor::ROSDefaultMultithreadedExecutor( - rclcpp::ExecutorOptions const &options, - int number_of_threads, - std::chrono::nanoseconds next_exec_timeout) - : Executor(options) { - std::lock_guard wait_lock(ROSDefaultMultithreadedExecutor::shared_wait_mutex_); - wait_mutex_set_[this] = std::make_shared(); - number_of_threads_ = number_of_threads; - next_exec_timeout_ = next_exec_timeout; - logger_ = create_logger(); -} - -void ROSDefaultMultithreadedExecutor::spin() { - if (spinning.exchange(true)) { - throw std::runtime_error("spin() called while already spinning"); - } - - std::vector threads; - size_t thread_id = 0; - { - auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; - auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); - std::lock_guard - wait_lock(low_priority_wait_mutex); - for (; thread_id < number_of_threads_ - 1; ++thread_id) { - auto const func = std::bind(&ROSDefaultMultithreadedExecutor::run, this, thread_id); - threads.emplace_back(func); - } - } - run(thread_id); - for (auto& thread : threads) { - thread.join(); - } -} - -void ROSDefaultMultithreadedExecutor::run(size_t thread_number) { - while (rclcpp::ok(this->context_) && spinning.load()) { - rclcpp::AnyExecutable any_exec; + auto node_it = weak_nodes_.begin(); + auto gc_it = guard_conditions_.begin(); + while (node_it != weak_nodes_.end()) + { + if (node_it->expired()) { - auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; - auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); - std::lock_guard - wait_lock(low_priority_wait_mutex); - if (!rclcpp::ok(this->context_) || !spinning.load()) { - return; - } - if (!get_next_executable(any_exec, next_exec_timeout_)) { - continue; - } - if (any_exec.timer) { - // Guard against multiple threads getting the same timer. - if (scheduled_timers_.count(any_exec.timer) != 0) { - // Make sure that any_exec's callback group is reset before - // the lock is released. - if (any_exec.callback_group) { - any_exec.callback_group->can_be_taken_from().store(true); - } - continue; - } - scheduled_timers_.insert(any_exec.timer); - } + node_it = weak_nodes_.erase(node_it); + memory_strategy_->remove_guard_condition(*gc_it); + gc_it = guard_conditions_.erase(gc_it); } - - execute_any_executable(any_exec); - - if (any_exec.timer) { - auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; - auto high_priority_wait_mutex = wait_mutex->get_high_priority_lockable(); - std::lock_guard - wait_lock(high_priority_wait_mutex); - auto const it = scheduled_timers_.find(any_exec.timer); - if (it != scheduled_timers_.end()) { - scheduled_timers_.erase(it); - } + else + { + ++node_it; + ++gc_it; } - // Clear the callback_group to prevent the AnyExecutable destructor from - // resetting the callback group `can_be_taken_from` - any_exec.callback_group.reset(); + } } + // clear wait set + rcl_ret_t ret = rcl_wait_set_clear(&wait_set_); + if (ret != RCL_RET_OK) + { + rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't clear wait set"); + } + + // The size of waitables are accounted for in size of the other entities + ret = rcl_wait_set_resize( + &wait_set_, memory_strategy_->number_of_ready_subscriptions(), + memory_strategy_->number_of_guard_conditions(), memory_strategy_->number_of_ready_timers(), + memory_strategy_->number_of_ready_clients(), memory_strategy_->number_of_ready_services(), + memory_strategy_->number_of_ready_events()); + if (RCL_RET_OK != ret) + { + rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't resize the wait set"); + } + + if (!memory_strategy_->add_handles_to_wait_set(&wait_set_)) + { + throw std::runtime_error("Couldn't fill wait set"); + } + } + rcl_ret_t status = + rcl_wait(&wait_set_, std::chrono::duration_cast(timeout).count()); + if (status == RCL_RET_WAIT_SET_EMPTY) + { + RCUTILS_LOG_WARN_NAMED( + "rclcpp", + "empty wait set received in rcl_wait(). This should never happen."); + } + else if (status != RCL_RET_OK && status != RCL_RET_TIMEOUT) + { + using rclcpp::exceptions::throw_from_rcl_error; + throw_from_rcl_error(status, "rcl_wait() failed"); + } + + // check the null handles in the wait set and remove them from the handles in memory strategy + // for callback-based entities + memory_strategy_->remove_null_handles(&wait_set_); } -bool ROSDefaultMultithreadedExecutor::get_next_executable(rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout) { - bool success = false; - // Check to see if there are any subscriptions or timers needing service - // TODO(wjwwood): improve run to run efficiency of this function +bool ROSDefaultExecutor::get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout) +{ + bool success = false; + // Check to see if there are any subscriptions or timers needing service + // TODO(wjwwood): improve run to run efficiency of this function + // sched_yield(); + wait_for_work(timeout); + success = get_next_ready_executable(any_executable); + return success; +} - // try to get an executable - // record the start time - auto const start = std::chrono::steady_clock::now(); +void ROSDefaultExecutor::spin() +{ + if (spinning.exchange(true)) + { + throw std::runtime_error("spin() called while already spinning"); + } + RCPPUTILS_SCOPE_EXIT(this->spinning.store(false);); + while (rclcpp::ok(this->context_) && spinning.load()) + { + rclcpp::AnyExecutable any_executable; + if (get_next_executable(any_executable)) + { + execute_any_executable(any_executable); + } + } +} + +bool ROSDefaultMultithreadedExecutor::get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout) +{ + bool success = false; + // Check to see if there are any subscriptions or timers needing service + // TODO(wjwwood): improve run to run efficiency of this function + + // try to get an executable + // record the start time + auto start = std::chrono::steady_clock::now(); + success = get_next_ready_executable(any_executable); + // and the end time + auto end = std::chrono::steady_clock::now(); + std::stringstream oss; + oss << "{\"operation\":\"get_next_executable\", \"result\":\"" << success << "\", \"duration\":\"" << std::chrono::duration_cast(end - start).count() << "\"}"; + log_entry(logger_, oss.str()); + + // If there are none + if (!success) + { + // Wait for subscriptions or timers to work on + // queue refresh + start = std::chrono::steady_clock::now(); + wait_for_work(timeout); + // and the end time + end = std::chrono::steady_clock::now(); + auto wait_duration = std::chrono::duration_cast(end - start).count(); + oss.str(""); + oss << "{\"operation\":\"wait_for_work\", \"result\":\"" << success << "\", \"wait_duration\":\"" << wait_duration << "\"}"; + log_entry(logger_, oss.str()); + if (!spinning.load()) + { + return false; + } + // Try again + start = std::chrono::steady_clock::now(); success = get_next_ready_executable(any_executable); // and the end time - auto const end = std::chrono::steady_clock::now(); - std::stringstream oss; - oss << "{\"operation\":\"get_next_executable\", \"result\":\"" << success - << "\", \"duration\":\"" - << std::chrono::duration_cast(end - start).count() - << "\"}"; + end = std::chrono::steady_clock::now(); + oss.str(""); + oss << "{\"operation\":\"get_next_executable\", \"result\":\"" << success << "\", \"duration\":\"" << std::chrono::duration_cast(end - start).count() << "\"}"; log_entry(logger_, oss.str()); - - // If there are none - if (!success) { - // Wait for subscriptions or timers to work on - // queue refresh - auto const wait_start = std::chrono::steady_clock::now(); - wait_for_work(timeout); - // and the end time - auto const wait_end = std::chrono::steady_clock::now(); - auto const wait_duration = std::chrono::duration_cast( - wait_end - wait_start).count(); - oss.str(""); - oss << "{\"operation\":\"wait_for_work\", \"result\":\"" << success - << "\", \"wait_duration\":\"" << wait_duration << "\"}"; - log_entry(logger_, oss.str()); - if (!spinning.load()) { - return false; - } - // Try again - auto const retry_start = std::chrono::steady_clock::now(); - success = get_next_ready_executable(any_executable); - // and the end time - auto const retry_end = std::chrono::steady_clock::now(); - oss.str(""); - oss << "{\"operation\":\"get_next_executable\", \"result\":\"" << success - << "\", \"duration\":\"" - << std::chrono::duration_cast( - retry_end - retry_start).count() - << "\"}"; - log_entry(logger_, oss.str()); - } - return success; + } + return success; } -} // namespace priority_executor +std::unordered_map> ROSDefaultMultithreadedExecutor::wait_mutex_set_; +std::mutex ROSDefaultMultithreadedExecutor::shared_wait_mutex_; + +ROSDefaultMultithreadedExecutor::ROSDefaultMultithreadedExecutor( + const rclcpp::ExecutorOptions &options, + int number_of_threads, std::chrono::nanoseconds next_exec_timeout) + : Executor(options) +{ + std::lock_guard wait_lock(ROSDefaultMultithreadedExecutor::shared_wait_mutex_); + wait_mutex_set_[this] = std::make_shared(); + number_of_threads_ = number_of_threads; + next_exec_timeout_ = next_exec_timeout; + logger_ = create_logger(); +} + +void ROSDefaultMultithreadedExecutor::spin() +{ + if (spinning.exchange(true)) + { + throw std::runtime_error("spin() called while already spinning"); + } + + std::vector threads; + size_t thread_id = 0; + { + auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; + auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); + std::lock_guard wait_lock(low_priority_wait_mutex); + for (; thread_id < number_of_threads_ - 1; ++thread_id) + { + auto func = std::bind(&ROSDefaultMultithreadedExecutor::run, this, thread_id); + threads.emplace_back(func); + } + } + run(thread_id); + for (auto &thread : threads) + { + thread.join(); + } +} + +void ROSDefaultMultithreadedExecutor::run(__attribute__((unused)) size_t _thread_number) { + while (rclcpp::ok(this->context_) && spinning.load()) + { + rclcpp::AnyExecutable any_exec; + { + auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; + auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); + std::lock_guard wait_lock(low_priority_wait_mutex); + if (!rclcpp::ok(this->context_) || !spinning.load()) + { + return; + } + if (!get_next_executable(any_exec, next_exec_timeout_)) + { + continue; + } + if (any_exec.timer) + { + // Guard against multiple threads getting the same timer. + if (scheduled_timers_.count(any_exec.timer) != 0) + { + // Make sure that any_exec's callback group is reset before + // the lock is released. + if (any_exec.callback_group) + { + any_exec.callback_group->can_be_taken_from().store(true); + } + continue; + } + scheduled_timers_.insert(any_exec.timer); + } + } + // if (yield_before_execute_) + // { + // std::this_thread::yield(); + // } + + execute_any_executable(any_exec); + + if (any_exec.timer) + { + auto wait_mutex = ROSDefaultMultithreadedExecutor::wait_mutex_set_[this]; + auto high_priority_wait_mutex = wait_mutex->get_high_priority_lockable(); + std::lock_guard wait_lock(high_priority_wait_mutex); + auto it = scheduled_timers_.find(any_exec.timer); + if (it != scheduled_timers_.end()) + { + scheduled_timers_.erase(it); + } + } + // Clear the callback_group to prevent the AnyExecutable destructor from + // resetting the callback group `can_be_taken_from` + any_exec.callback_group.reset(); + } +} diff --git a/src/priority_executor/src/multithread_priority_executor.cpp b/src/priority_executor/src/multithread_priority_executor.cpp index 7619413..ed691c1 100755 --- a/src/priority_executor/src/multithread_priority_executor.cpp +++ b/src/priority_executor/src/multithread_priority_executor.cpp @@ -1,287 +1,114 @@ #include "priority_executor/multithread_priority_executor.hpp" -#include "priority_executor/performance_monitor.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "rcpputils/scope_exit.hpp" -#include "rclcpp/any_executable.hpp" -#include "simple_timer/rt-sched.hpp" - -namespace priority_executor { - -std::unordered_map> - MultithreadTimedExecutor::wait_mutex_set_; -std::mutex MultithreadTimedExecutor::shared_wait_mutex_; - -MultithreadTimedExecutor::MultithreadTimedExecutor( - rclcpp::ExecutorOptions const &options, - std::string const &name, - int number_of_threads, - std::chrono::nanoseconds next_exec_timeout) - : TimedExecutor(options, name) { - std::lock_guard wait_lock(MultithreadTimedExecutor::shared_wait_mutex_); - wait_mutex_set_[this] = std::make_shared(); - number_of_threads_ = number_of_threads; - next_exec_timeout_ = next_exec_timeout; - logger_ = create_logger(); -} - -size_t MultithreadTimedExecutor::get_number_of_threads() +namespace timed_executor { - return number_of_threads_; -} - -void MultithreadTimedExecutor::spin() { - if (spinning.exchange(true)) { - throw std::runtime_error("spin() called while already spinning"); - } - - std::vector threads; - size_t thread_id = 0; + std::unordered_map> + MultithreadTimedExecutor::wait_mutex_set_; + std::mutex MultithreadTimedExecutor::shared_wait_mutex_; + MultithreadTimedExecutor::MultithreadTimedExecutor( + const rclcpp::ExecutorOptions &options, + std::string name, + int number_of_threads, std::chrono::nanoseconds next_exec_timeout) + : TimedExecutor(options, name) { - auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; - auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); - std::lock_guard - wait_lock(low_priority_wait_mutex); - for (; thread_id < number_of_threads_ - 1; ++thread_id) { - auto const func = std::bind(&MultithreadTimedExecutor::run, this, thread_id); - threads.emplace_back(func); - } + std::lock_guard wait_lock(MultithreadTimedExecutor::shared_wait_mutex_); + wait_mutex_set_[this] = std::make_shared(); + number_of_threads_ = number_of_threads; + next_exec_timeout_ = next_exec_timeout; + logger_ = create_logger(); } - run(thread_id); - for (auto& thread : threads) { - thread.join(); - } - spinning.store(false); -} -void MultithreadTimedExecutor::run(size_t _thread_number) { - // set affinity - cpu_set_t cpuset; - CPU_ZERO(&cpuset); - CPU_SET(_thread_number, &cpuset); - int rc = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); - if (rc != 0) + size_t MultithreadTimedExecutor::get_number_of_threads() { - RCLCPP_ERROR(rclcpp::get_logger("priority_executor"), - "Error calling pthread_setaffinity_np: %d", rc); + return number_of_threads_; } - // Create thread context information for logs - nlohmann::json thread_info; - thread_info["thread_id"] = _thread_number; - thread_info["cpu_affinity"] = _thread_number; - std::string thread_info_str = thread_info.dump(); - - while (rclcpp::ok(this->context_) && spinning.load()) { - rclcpp::AnyExecutable any_exec; + void MultithreadTimedExecutor::spin() + { + if (spinning.exchange(true)) { - auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; - auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); - std::lock_guard - wait_lock(low_priority_wait_mutex); - if (!rclcpp::ok(this->context_) || !spinning.load()) { - return; - } - if (!get_next_executable(any_exec, next_exec_timeout_)) { - continue; - } - if (any_exec.timer) { - // Guard against multiple threads getting the same timer. - if (scheduled_timers_.count(any_exec.timer) != 0) { - // Make sure that any_exec's callback group is reset before - // the lock is released. - if (any_exec.callback_group) { - any_exec.callback_group->can_be_taken_from().store(true); - } - continue; - } - scheduled_timers_.insert(any_exec.timer); - } + throw std::runtime_error("spin() called while already spinning"); } - // Record execution start with thread info - auto start = std::chrono::steady_clock::now(); - uint64_t start_time = std::chrono::duration_cast( - start.time_since_epoch()).count(); - - PerformanceEvent start_event; - start_event.timestamp = start_time; - start_event.type = PerformanceEventType::CALLBACK_START; - start_event.executor_id = getExecutorId(); - - if (any_exec.node_base) { - start_event.node_name = any_exec.node_base->get_name(); - - // Enhanced callback identification based on executable type - if (any_exec.timer) { - // For timer callbacks - std::stringstream ss; - - // retrieve the timer period - int64_t period; - rcl_timer_get_period(any_exec.timer->get_timer_handle().get(), &period); - - // convert period (which is in nanoseconds) to milliseconds - std::chrono::nanoseconds period_ns(period); - std::chrono::milliseconds period_ms = std::chrono::duration_cast(period_ns); - - ss << "timer_" << period_ms.count() << "ms"; - start_event.callback_name = ss.str(); - - // Add memory address as unique identifier - std::stringstream addr; - addr << "@" << any_exec.timer.get(); - nlohmann::json additional; - additional["callback_type"] = "timer"; - additional["timer_addr"] = addr.str(); - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - else if (any_exec.subscription) { - // For subscription callbacks - start_event.callback_name = any_exec.subscription->get_topic_name(); - - nlohmann::json additional; - additional["callback_type"] = "subscription"; - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - else if (any_exec.service) { - start_event.callback_name = any_exec.service->get_service_name(); - - nlohmann::json additional; - additional["callback_type"] = "service"; - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - else if (any_exec.client) { - start_event.callback_name = "client_callback"; - - nlohmann::json additional; - additional["callback_type"] = "client"; - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - else if (any_exec.waitable) { - start_event.callback_name = "waitable_callback"; - - nlohmann::json additional; - additional["callback_type"] = "waitable"; - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - - // Add chain information - std::shared_ptr executable = nullptr; - if (prio_memory_strategy_) { - if (any_exec.timer) { - executable = prio_memory_strategy_->get_priority_settings( - any_exec.timer->get_timer_handle()); - } else if (any_exec.subscription) { - executable = prio_memory_strategy_->get_priority_settings( - any_exec.subscription->get_subscription_handle()); - } - } - - if (executable) { - start_event.chain_id = executable->chain_id; - start_event.is_first_in_chain = executable->is_first_in_chain; - start_event.is_last_in_chain = executable->is_last_in_chain; - - if (executable->deadlines && !executable->deadlines->empty()) { - start_event.deadline = executable->deadlines->front(); - } - - // Include the priority information - try { - nlohmann::json additional = start_event.additional_data.empty() ? - nlohmann::json() : nlohmann::json::parse(start_event.additional_data); - - additional["priority"] = executable->priority; - - if (executable->chain_id >= 0) { - additional["chain_id"] = executable->chain_id; - } - - if (executable->sched_type != ExecutableScheduleType::DEFAULT) { - additional["schedule_type"] = static_cast(executable->sched_type); - } - - start_event.additional_data = additional.dump(); - } catch (...) { - // If there was an error parsing the JSON, create new JSON object - nlohmann::json additional; - additional["priority"] = executable->priority; - additional["thread_info"] = thread_info; - start_event.additional_data = additional.dump(); - } - } - } else { - start_event.additional_data = thread_info_str; - } - - PerformanceMonitor::getInstance().recordEvent(start_event); - - // Execute the callback - execute_any_executable(any_exec); - - // Record execution completion - auto end = std::chrono::steady_clock::now(); - uint64_t end_time = std::chrono::duration_cast( - end.time_since_epoch()).count(); - uint64_t processing_time = end_time - start_time; - - // Create identical event structure but for end event - PerformanceEvent end_event = start_event; - end_event.timestamp = end_time; - end_event.type = PerformanceEventType::CALLBACK_END; - end_event.processing_time = processing_time; - - // Update the additional data to include processing time - try { - nlohmann::json additional = end_event.additional_data.empty() ? - nlohmann::json() : nlohmann::json::parse(end_event.additional_data); - additional["processing_time_us"] = processing_time; - end_event.additional_data = additional.dump(); - } catch (...) { - // If JSON parsing failed, create a new object - nlohmann::json additional; - additional["processing_time_us"] = processing_time; - additional["thread_info"] = thread_info; - end_event.additional_data = additional.dump(); - } - - PerformanceMonitor::getInstance().recordEvent(end_event); - - if (any_exec.timer) { - auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; - auto high_priority_wait_mutex = wait_mutex->get_high_priority_lockable(); - std::lock_guard - wait_lock(high_priority_wait_mutex); - auto const it = scheduled_timers_.find(any_exec.timer); - if (it != scheduled_timers_.end()) { - scheduled_timers_.erase(it); - } - } - // Clear the callback_group to prevent the AnyExecutable destructor from - // resetting the callback group `can_be_taken_from` - any_exec.callback_group.reset(); - if (prio_memory_strategy_ != nullptr) + std::vector threads; + size_t thread_id = 0; { auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); std::lock_guard wait_lock(low_priority_wait_mutex); - prio_memory_strategy_->post_execute(any_exec, _thread_number); + for (; thread_id < number_of_threads_ - 1; ++thread_id) + { + auto func = std::bind(&MultithreadTimedExecutor::run, this, thread_id); + threads.emplace_back(func); + } + } + run(thread_id); + for (auto &thread : threads) + { + thread.join(); + } + } + + void MultithreadTimedExecutor::run(size_t thread_number) + { + // set affinity + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(thread_number, &cpuset); + int rc = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); + if (rc != 0) + { + std::cout << "Error calling pthread_setaffinity_np: " << rc << "\n"; + } + + while (rclcpp::ok(this->context_) && spinning.load()) + { + rclcpp::AnyExecutable any_executable; + { + auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; + auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); + std::lock_guard wait_lock(low_priority_wait_mutex); + if (!rclcpp::ok(this->context_) || !spinning.load()) + { + return; + } + if (!get_next_executable(any_executable, next_exec_timeout_)) + { + continue; + } + if (any_executable.timer) + { + if (scheduled_timers_.count(any_executable.timer) != 0) + { + if (any_executable.callback_group) + { + any_executable.callback_group->can_be_taken_from().store(true); + } + continue; + } + scheduled_timers_.insert(any_executable.timer); + } + } + execute_any_executable(any_executable); + if (any_executable.timer) + { + auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; + auto high_priority_wait_mutex = wait_mutex->get_high_priority_lockable(); + std::lock_guard wait_lock(high_priority_wait_mutex); + auto it = scheduled_timers_.find(any_executable.timer); + if (it != scheduled_timers_.end()) + { + scheduled_timers_.erase(it); + } + } + any_executable.callback_group.reset(); + if (prio_memory_strategy_ != nullptr) + { + auto wait_mutex = MultithreadTimedExecutor::wait_mutex_set_[this]; + auto low_priority_wait_mutex = wait_mutex->get_low_priority_lockable(); + std::lock_guard wait_lock(low_priority_wait_mutex); + // std::shared_ptr> prio_memory_strategy_ = std::dynamic_pointer_cast(memory_strategy_); + prio_memory_strategy_->post_execute(any_executable, thread_number); + } } } } -} // namespace priority_executor diff --git a/src/priority_executor/src/performance_monitor.cpp b/src/priority_executor/src/performance_monitor.cpp deleted file mode 100644 index 1c5f569..0000000 --- a/src/priority_executor/src/performance_monitor.cpp +++ /dev/null @@ -1,149 +0,0 @@ -#include "priority_executor/performance_monitor.hpp" -#include -#include -#include -#include -#include - -namespace priority_executor { - -PerformanceMonitor& PerformanceMonitor::getInstance() { - static PerformanceMonitor instance; - return instance; -} - -PerformanceMonitor::PerformanceMonitor() { - event_buffer_.reserve(buffer_size_); - std::filesystem::create_directories(dump_path_); -} - -void PerformanceMonitor::recordEvent(const PerformanceEvent& event) { - if (!enabled_) return; - - { - std::lock_guard lock(event_mutex_); - event_buffer_.push_back(event); - - if (auto_dump_threshold_ > 0 && event_buffer_.size() >= auto_dump_threshold_) { - checkAndAutoDump(); - } - } -} - -void PerformanceMonitor::setBufferSize(size_t size) { - std::lock_guard lock(event_mutex_); - buffer_size_ = size; - event_buffer_.reserve(size); -} - -void PerformanceMonitor::setAutoDumpThreshold(size_t threshold) { - auto_dump_threshold_ = threshold; -} - -void PerformanceMonitor::setDumpPath(const std::string& path) { - dump_path_ = path; - std::filesystem::create_directories(dump_path_); -} - -void PerformanceMonitor::enable(bool enabled) { - enabled_ = enabled; -} - -void PerformanceMonitor::clear() { - std::lock_guard lock(event_mutex_); - event_buffer_.clear(); -} - -std::string PerformanceMonitor::eventsToJson() const { - nlohmann::json root = nlohmann::json::array(); - - for (const auto& event : event_buffer_) { - nlohmann::json event_json; - - event_json["timestamp"] = event.timestamp; - - switch (event.type) { - case PerformanceEventType::CALLBACK_READY: - event_json["type"] = "callback_ready"; - break; - case PerformanceEventType::CALLBACK_START: - event_json["type"] = "callback_start"; - break; - case PerformanceEventType::CALLBACK_END: - event_json["type"] = "callback_end"; - break; - case PerformanceEventType::DEADLINE_MISSED: - event_json["type"] = "deadline_missed"; - break; - case PerformanceEventType::DEADLINE_MET: - event_json["type"] = "deadline_met"; - break; - case PerformanceEventType::CHAIN_START: - event_json["type"] = "chain_start"; - break; - case PerformanceEventType::CHAIN_END: - event_json["type"] = "chain_end"; - break; - } - - event_json["node_name"] = event.node_name; - event_json["callback_name"] = event.callback_name; - event_json["chain_id"] = event.chain_id; - event_json["is_first_in_chain"] = event.is_first_in_chain; - event_json["is_last_in_chain"] = event.is_last_in_chain; - - if (event.deadline > 0) { - event_json["deadline"] = event.deadline; - } - - if (event.processing_time > 0) { - event_json["processing_time"] = event.processing_time; - } - - event_json["executor_id"] = event.executor_id; - - if (!event.additional_data.empty()) { - try { - event_json["additional_data"] = nlohmann::json::parse(event.additional_data); - } catch (...) { - event_json["additional_data"] = event.additional_data; - } - } - - root.push_back(event_json); - } - - return root.dump(2); // Indent with 2 spaces -} - -bool PerformanceMonitor::dumpToFile(const std::string& filename) { - std::lock_guard lock(event_mutex_); - - if (event_buffer_.empty()) { - return true; // Nothing to dump - } - - std::string full_path = dump_path_ + "/" + filename; - std::ofstream file(full_path); - if (!file.is_open()) { - return false; - } - - file << eventsToJson(); - event_buffer_.clear(); - return true; -} - -void PerformanceMonitor::checkAndAutoDump() { - if (event_buffer_.size() >= auto_dump_threshold_) { - auto now = std::chrono::system_clock::now(); - auto timestamp = std::chrono::duration_cast( - now.time_since_epoch()).count(); - - std::stringstream ss; - ss << "performance_log_" << timestamp << ".json"; - dumpToFile(ss.str()); - } -} - -} // namespace priority_executor \ No newline at end of file diff --git a/src/priority_executor/src/priority_executor.cpp b/src/priority_executor/src/priority_executor.cpp index f9eee3e..e305c77 100755 --- a/src/priority_executor/src/priority_executor.cpp +++ b/src/priority_executor/src/priority_executor.cpp @@ -19,239 +19,236 @@ #include "rclcpp/utilities.hpp" #include #include -#include // for sleep - -namespace priority_executor +// for sleep +#include +namespace timed_executor { -TimedExecutor::TimedExecutor(rclcpp::ExecutorOptions const &options, std::string name) - : rclcpp::Executor(options) { - this->name = std::move(name); + TimedExecutor::TimedExecutor(const rclcpp::ExecutorOptions &options, std::string name) + : rclcpp::Executor(options) + { + this->name = name; logger_ = create_logger(); -} + } -TimedExecutor::~TimedExecutor() = default; + TimedExecutor::~TimedExecutor() {} -void TimedExecutor::spin() { - if (spinning.exchange(true)) { - throw std::runtime_error("spin() called while already spinning"); + void + TimedExecutor::spin() + { + if (spinning.exchange(true)) + { + throw std::runtime_error("spin() called while already spinning"); } RCLCPP_SCOPE_EXIT(this->spinning.store(false);); + while (rclcpp::ok(this->context_) && spinning.load()) + { + rclcpp::AnyExecutable any_executable; + // std::cout<number_of_ready_timers()<name << std::endl; + // size_t ready = memory_strategy_->number_of_ready_subscriptions(); + // std::cout << "ready:" << ready << std::endl; - using namespace std::chrono_literals; - std::chrono::time_point start = std::chrono::steady_clock::now(); - - while (rclcpp::ok(this->context_) && spinning.load() && (std::chrono::steady_clock::now() - start) <= 5s) { - rclcpp::AnyExecutable any_executable; - // std::cout<number_of_ready_timers()<name << std::endl; - // size_t ready = memory_strategy_->number_of_ready_subscriptions(); - // std::cout << "ready:" << ready << std::endl; - - if (get_next_executable(any_executable, std::chrono::nanoseconds(-1))) { - execute_any_executable(any_executable); - // make sure memory_strategy_ is an instance of PriorityMemoryStrategy - if (prio_memory_strategy_ != nullptr) { - prio_memory_strategy_->post_execute(any_executable); - } + if (get_next_executable(any_executable, std::chrono::nanoseconds(-1))) + { + execute_any_executable(any_executable); + // make sure memory_strategy_ is an instance of PriorityMemoryStrategy + if (prio_memory_strategy_!=nullptr) + { + prio_memory_strategy_->post_execute(any_executable); } + } } - RCLCPP_INFO(rclcpp::get_logger("priority_executor"), "priority executor shutdown"); -} + std::cout << "shutdown" << std::endl; + } -bool TimedExecutor::get_next_executable( - rclcpp::AnyExecutable& any_executable, - std::chrono::nanoseconds timeout) { + bool TimedExecutor::get_next_executable(rclcpp::AnyExecutable &any_executable, std::chrono::nanoseconds timeout) + { bool success = false; // Check to see if there are any subscriptions or timers needing service // TODO(wjwwood): improve run to run efficiency of this function // sched_yield(); // sleep for 10us // usleep(20); - - auto const start = std::chrono::steady_clock::now(); + auto start = std::chrono::steady_clock::now(); wait_for_work(timeout); - auto const end = std::chrono::steady_clock::now(); - auto const wait_duration = std::chrono::duration_cast(end - start); - + auto end = std::chrono::steady_clock::now(); + auto wait_duration = std::chrono::duration_cast(end - start); std::ostringstream oss; oss << "{\"operation\": \"wait_for_work\", \"wait_duration\": " << wait_duration.count() << "}"; log_entry(logger_, oss.str()); - auto const exec_start = std::chrono::steady_clock::now(); + start = std::chrono::steady_clock::now(); success = get_next_ready_executable(any_executable); - auto const exec_end = std::chrono::steady_clock::now(); - auto const get_next_duration = std::chrono::duration_cast(exec_end - exec_start); - + end = std::chrono::steady_clock::now(); + auto get_next_duration = std::chrono::duration_cast(end - start); oss.str(""); - oss << "{\"operation\": \"get_next_executable\", \"duration\": " << get_next_duration.count() - << ", \"result\": " << success << "}"; + oss << "{\"operation\": \"get_next_executable\", \"duration\": " << get_next_duration.count() << ", \"result\": " << success << "}"; log_entry(logger_, oss.str()); - return success; -} + } -// TODO: since we're calling this more often, clean it up a bit -void -TimedExecutor::wait_for_work(std::chrono::nanoseconds timeout) -{ + // TODO: since we're calling this more often, clean it up a bit + void + TimedExecutor::wait_for_work(std::chrono::nanoseconds timeout) + { { - std::unique_lock lock(memory_strategy_mutex_); + std::unique_lock lock(memory_strategy_mutex_); - // Collect the subscriptions and timers to be waited on - memory_strategy_->clear_handles(); - bool has_invalid_weak_nodes = memory_strategy_->collect_entities(weak_nodes_); + // Collect the subscriptions and timers to be waited on + memory_strategy_->clear_handles(); + bool has_invalid_weak_nodes = memory_strategy_->collect_entities(weak_nodes_); - // Clean up any invalid nodes, if they were detected - if (has_invalid_weak_nodes) + // Clean up any invalid nodes, if they were detected + if (has_invalid_weak_nodes) + { + auto node_it = weak_nodes_.begin(); + auto gc_it = guard_conditions_.begin(); + while (node_it != weak_nodes_.end()) { - auto node_it = weak_nodes_.begin(); - auto gc_it = guard_conditions_.begin(); - while (node_it != weak_nodes_.end()) - { - if (node_it->expired()) - { - node_it = weak_nodes_.erase(node_it); - memory_strategy_->remove_guard_condition(*gc_it); - gc_it = guard_conditions_.erase(gc_it); - } - else - { - ++node_it; - ++gc_it; - } - } - } - // clear wait set - rcl_ret_t ret = rcl_wait_set_clear(&wait_set_); - if (ret != RCL_RET_OK) - { - rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't clear wait set"); + if (node_it->expired()) + { + node_it = weak_nodes_.erase(node_it); + memory_strategy_->remove_guard_condition(*gc_it); + gc_it = guard_conditions_.erase(gc_it); + } + else + { + ++node_it; + ++gc_it; + } } + } + // clear wait set + rcl_ret_t ret = rcl_wait_set_clear(&wait_set_); + if (ret != RCL_RET_OK) + { + rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't clear wait set"); + } - // The size of waitables are accounted for in size of the other entities - ret = rcl_wait_set_resize( - &wait_set_, memory_strategy_->number_of_ready_subscriptions(), - memory_strategy_->number_of_guard_conditions(), memory_strategy_->number_of_ready_timers(), - memory_strategy_->number_of_ready_clients(), memory_strategy_->number_of_ready_services(), - memory_strategy_->number_of_ready_events()); - if (RCL_RET_OK != ret) - { - rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't resize the wait set"); - } + // The size of waitables are accounted for in size of the other entities + ret = rcl_wait_set_resize( + &wait_set_, memory_strategy_->number_of_ready_subscriptions(), + memory_strategy_->number_of_guard_conditions(), memory_strategy_->number_of_ready_timers(), + memory_strategy_->number_of_ready_clients(), memory_strategy_->number_of_ready_services(), + memory_strategy_->number_of_ready_events()); + if (RCL_RET_OK != ret) + { + rclcpp::exceptions::throw_from_rcl_error(ret, "Couldn't resize the wait set"); + } - if (!memory_strategy_->add_handles_to_wait_set(&wait_set_)) - { - throw std::runtime_error("Couldn't fill wait set"); - } + if (!memory_strategy_->add_handles_to_wait_set(&wait_set_)) + { + throw std::runtime_error("Couldn't fill wait set"); + } } rcl_ret_t status = rcl_wait(&wait_set_, std::chrono::duration_cast(timeout).count()); if (status == RCL_RET_WAIT_SET_EMPTY) { - RCUTILS_LOG_WARN_NAMED( - "rclcpp", - "empty wait set received in rcl_wait(). This should never happen."); + RCUTILS_LOG_WARN_NAMED( + "rclcpp", + "empty wait set received in rcl_wait(). This should never happen."); } else if (status != RCL_RET_OK && status != RCL_RET_TIMEOUT) { - using rclcpp::exceptions::throw_from_rcl_error; - throw_from_rcl_error(status, "rcl_wait() failed"); + using rclcpp::exceptions::throw_from_rcl_error; + throw_from_rcl_error(status, "rcl_wait() failed"); } // check the null handles in the wait set and remove them from the handles in memory strategy // for callback-based entities memory_strategy_->remove_null_handles(&wait_set_); -} -bool -TimedExecutor::get_next_ready_executable(rclcpp::AnyExecutable &any_executable) -{ + } + bool + TimedExecutor::get_next_ready_executable(rclcpp::AnyExecutable &any_executable) + { bool success = false; if (use_priorities) { - std::shared_ptr> strat = std::dynamic_pointer_cast>(memory_strategy_); - strat->get_next_executable(any_executable, weak_nodes_); - if (any_executable.timer || any_executable.subscription || any_executable.service || any_executable.client || any_executable.waitable) - { - success = true; - } + std::shared_ptr> strat = std::dynamic_pointer_cast>(memory_strategy_); + strat->get_next_executable(any_executable, weak_nodes_); + if (any_executable.timer || any_executable.subscription || any_executable.service || any_executable.client || any_executable.waitable) + { + success = true; + } } else { - // Check the timers to see if there are any that are ready - memory_strategy_->get_next_timer(any_executable, weak_nodes_); - if (any_executable.timer) + // Check the timers to see if there are any that are ready + memory_strategy_->get_next_timer(any_executable, weak_nodes_); + if (any_executable.timer) + { + std::cout << "got timer" << std::endl; + success = true; + } + if (!success) + { + // Check the subscriptions to see if there are any that are ready + memory_strategy_->get_next_subscription(any_executable, weak_nodes_); + if (any_executable.subscription) { - std::cout << "got timer" << std::endl; - success = true; + // std::cout << "got subs" << std::endl; + success = true; } - if (!success) + } + if (!success) + { + // Check the services to see if there are any that are ready + memory_strategy_->get_next_service(any_executable, weak_nodes_); + if (any_executable.service) { - // Check the subscriptions to see if there are any that are ready - memory_strategy_->get_next_subscription(any_executable, weak_nodes_); - if (any_executable.subscription) - { - // std::cout << "got subs" << std::endl; - success = true; - } + std::cout << "got serv" << std::endl; + success = true; } - if (!success) + } + if (!success) + { + // Check the clients to see if there are any that are ready + memory_strategy_->get_next_client(any_executable, weak_nodes_); + if (any_executable.client) { - // Check the services to see if there are any that are ready - memory_strategy_->get_next_service(any_executable, weak_nodes_); - if (any_executable.service) - { - std::cout << "got serv" << std::endl; - success = true; - } + std::cout << "got client" << std::endl; + success = true; } - if (!success) + } + if (!success) + { + // Check the waitables to see if there are any that are ready + memory_strategy_->get_next_waitable(any_executable, weak_nodes_); + if (any_executable.waitable) { - // Check the clients to see if there are any that are ready - memory_strategy_->get_next_client(any_executable, weak_nodes_); - if (any_executable.client) - { - std::cout << "got client" << std::endl; - success = true; - } - } - if (!success) - { - // Check the waitables to see if there are any that are ready - memory_strategy_->get_next_waitable(any_executable, weak_nodes_); - if (any_executable.waitable) - { - std::cout << "got wait" << std::endl; - success = true; - } + std::cout << "got wait" << std::endl; + success = true; } + } } // At this point any_exec should be valid with either a valid subscription // or a valid timer, or it should be a null shared_ptr if (success) { - // If it is valid, check to see if the group is mutually exclusive or - // not, then mark it accordingly - using rclcpp::callback_group::CallbackGroupType; - if ( - any_executable.callback_group && - any_executable.callback_group->type() == rclcpp::CallbackGroupType::MutuallyExclusive) - { - // It should not have been taken otherwise - assert(any_executable.callback_group->can_be_taken_from().load()); - // Set to false to indicate something is being run from this group - // This is reset to true either when the any_exec is executed or when the - // any_exec is destructued - any_executable.callback_group->can_be_taken_from().store(false); - } + // If it is valid, check to see if the group is mutually exclusive or + // not, then mark it accordingly + using rclcpp::callback_group::CallbackGroupType; + if ( + any_executable.callback_group && + any_executable.callback_group->type() == rclcpp::CallbackGroupType::MutuallyExclusive) + { + // It should not have been taken otherwise + assert(any_executable.callback_group->can_be_taken_from().load()); + // Set to false to indicate something is being run from this group + // This is reset to true either when the any_exec is executed or when the + // any_exec is destructued + any_executable.callback_group->can_be_taken_from().store(false); + } } // If there is no ready executable, return false return success; -} + } -void TimedExecutor::set_use_priorities(bool use_prio) -{ + void TimedExecutor::set_use_priorities(bool use_prio) + { use_priorities = use_prio; -} + } } // namespace timed_executor diff --git a/src/priority_executor/src/priority_memory_strategy.cpp b/src/priority_executor/src/priority_memory_strategy.cpp index 68f6295..ba42379 100755 --- a/src/priority_executor/src/priority_memory_strategy.cpp +++ b/src/priority_executor/src/priority_memory_strategy.cpp @@ -1,380 +1,151 @@ #include "priority_executor/priority_memory_strategy.hpp" #include "simple_timer/rt-sched.hpp" - -#include #include -#include - -#include - -using namespace priority_executor; - -template -PriorityMemoryStrategy::PriorityMemoryStrategy(std::shared_ptr allocator) -{ - allocator_ = std::make_shared(*allocator.get()); - logger_ = create_logger(); -} - -template<> -PriorityMemoryStrategy<>::PriorityMemoryStrategy() -{ - allocator_ = std::make_shared(); - logger_ = create_logger(); -} - -std::ostream& priority_executor::operator<<(std::ostream& os, const ExecutableType& obj) -{ - os << static_cast::type>(obj); - return os; -} - -std::ostream& priority_executor::operator<<(std::ostream& os, const ExecutableScheduleType& obj) -{ - os << static_cast::type>(obj); - return os; -} - -std::ostream& priority_executor::operator<<(std::ostream &os, const PriorityExecutable &pe) -{ - os << "sched_type: " << pe.sched_type << ", "; - if (pe.sched_type == ExecutableScheduleType::DEADLINE) { - os << "period: " << pe.period << ", "; - } - os << "priority: " << pe.priority << ", "; - os << "executable_id: " << pe.executable_id << ", "; - os << "chain_id: " << pe.chain_id << ", "; - os << "is_first_in_chain: " << pe.is_first_in_chain << ", "; - os << "is_last_in_chain: " << pe.is_last_in_chain << ", "; - - return os; -} - size_t PriorityExecutable::num_executables; -template<> -std::shared_ptr -PriorityMemoryStrategy<>::get_priority_settings(std::shared_ptr executable) +PriorityExecutable::PriorityExecutable(std::shared_ptr h, int p, ExecutableType t, ExecutableScheduleType sched_type) { - auto search = priority_map.find(executable); - if (search != priority_map.end()) - { - return search->second; - } - return nullptr; -} - -template<> -std::shared_ptr -PriorityMemoryStrategy<>::get_and_reset_priority(std::shared_ptr executable, - ExecutableType t) -{ - // PriorityExecutable *p = get_priority_settings(executable); - std::shared_ptr p = get_priority_settings(executable); - if (p == nullptr) - { - // priority_map[executable] = PriorityExecutable(executable, 0, t); - priority_map[executable] = - std::make_shared(executable, 0, t); - p = priority_map[executable]; - } - // p->can_be_run = true; - return p; -} - -template<> -rcl_allocator_t PriorityMemoryStrategy<>::get_allocator() -{ - return rclcpp::allocator::get_rcl_allocator( - *allocator_.get()); -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_ready_subscriptions() const -{ - size_t number_of_subscriptions = subscription_handles_.size(); - // std::cout << "ready_raw: " << number_of_subscriptions << std::endl; - for (auto waitable : waitable_handles_) - { - number_of_subscriptions += waitable->get_number_of_ready_subscriptions(); - } - return number_of_subscriptions; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_ready_services() const -{ - size_t number_of_services = service_handles_.size(); - for (auto waitable : waitable_handles_) - { - number_of_services += waitable->get_number_of_ready_services(); - } - return number_of_services; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_ready_events() const -{ - size_t number_of_events = 0; - for (auto waitable : waitable_handles_) - { - number_of_events += waitable->get_number_of_ready_events(); - } - return number_of_events; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_ready_clients() const -{ - size_t number_of_clients = client_handles_.size(); - for (auto waitable : waitable_handles_) - { - number_of_clients += waitable->get_number_of_ready_clients(); - } - return number_of_clients; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_guard_conditions() const -{ - size_t number_of_guard_conditions = guard_conditions_.size(); - for (auto waitable : waitable_handles_) - { - number_of_guard_conditions += - waitable->get_number_of_ready_guard_conditions(); - } - return number_of_guard_conditions; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_ready_timers() const -{ - size_t number_of_timers = timer_handles_.size(); - for (auto waitable : waitable_handles_) - { - number_of_timers += waitable->get_number_of_ready_timers(); - } - return number_of_timers; -} - -template<> -size_t PriorityMemoryStrategy<>::number_of_waitables() const -{ - return waitable_handles_.size(); -} - -template<> -void PriorityMemoryStrategy<>::set_executable_priority(std::shared_ptr handle, int priority, - ExecutableType t) -{ - // TODO: any sanity checks should go here - priority_map[handle] = - std::make_shared(handle, priority, t); -} - -template<> -void PriorityMemoryStrategy<>::set_executable_priority(std::shared_ptr handle, int priority, - ExecutableType t, ExecutableScheduleType sc, - int chain_index) -{ - // TODO: any sanity checks should go here - priority_map[handle] = - std::make_shared(handle, priority, t, sc); - priority_map[handle]->chain_id = chain_index; -} - -template<> -void PriorityMemoryStrategy<>::set_executable_deadline(std::shared_ptr handle, int period, - ExecutableType t, int chain_id, - std::string name) -{ - // TODO: any sanity checks should go here - priority_map[handle] = - std::make_shared(handle, period, t, ExecutableScheduleType::DEADLINE); - priority_map[handle]->chain_id = chain_id; - priority_map[handle]->name = name; - // is there a deadline queue for this chain id? - auto search = chain_deadlines.find(chain_id); - if (search == chain_deadlines.end()) - { - chain_deadlines[chain_id] = std::make_shared>(); - } - priority_map[handle]->deadlines = chain_deadlines[chain_id].get(); -} - -template<> -int PriorityMemoryStrategy<>::get_priority(std::shared_ptr executable) -{ - auto search = priority_map.find(executable); - if (search != priority_map.end()) - { - return search->second->priority; - } - else - { - return 0; - } -} - -template<> -void PriorityMemoryStrategy<>::set_first_in_chain(std::shared_ptr exec_handle) -{ - get_priority_settings(exec_handle)->is_first_in_chain = true; -} - -template<> -void PriorityMemoryStrategy<>::set_last_in_chain(std::shared_ptr exec_handle) -{ - get_priority_settings(exec_handle)->is_last_in_chain = true; -} - -template<> -void PriorityMemoryStrategy<>::assign_deadlines_queue(std::shared_ptr exec_handle, - std::deque *deadlines) -{ - get_priority_settings(exec_handle)->deadlines = deadlines; -} - -template<> -std::shared_ptr> PriorityMemoryStrategy<>::get_chain_deadlines(int chain_id) -{ - auto search = chain_deadlines.find(chain_id); - if (search != chain_deadlines.end()) - { - return search->second; - } - else - { - return nullptr; - } -} - -PriorityExecutable::PriorityExecutable( - std::shared_ptr h, - int p, - ExecutableType t, - ExecutableScheduleType sched_type) { - handle = std::move(h); std::cout << "priority_executable constructor called" << std::endl; std::cout << "type: " << t << std::endl; + handle = h; type = t; - - if (sched_type == ExecutableScheduleType::CHAIN_INDEPENDENT_PRIORITY - || sched_type == ExecutableScheduleType::CHAIN_AWARE_PRIORITY) { + if (sched_type == CHAIN_INDEPENDENT_PRIORITY || sched_type == CHAIN_AWARE_PRIORITY) + { priority = p; - } else if (sched_type == ExecutableScheduleType::DEADLINE) { - period = p; - // as a tiebreaker - priority = num_executables; } - + else if (sched_type == DEADLINE) + { + period = p; + } this->sched_type = sched_type; + this->executable_id = num_executables; num_executables += 1; } -PriorityExecutable::PriorityExecutable() - : handle(nullptr) - , type(ExecutableType::SUBSCRIPTION) - , priority(0) { +PriorityExecutable::PriorityExecutable() +{ + handle = nullptr; + priority = 0; + type = SUBSCRIPTION; } -void PriorityExecutable::dont_run() { - this->can_be_run = false; +void PriorityExecutable::dont_run() +{ + + this->can_be_run = false; } -void PriorityExecutable::allow_run() { - this->can_be_run = true; +void PriorityExecutable::allow_run() +{ + this->can_be_run = true; } -void PriorityExecutable::increment_counter() { - this->counter += 1; +void PriorityExecutable::increment_counter() +{ + this->counter += 1; } -bool PriorityExecutable::operator==(PriorityExecutable const &other) const { +bool PriorityExecutable::operator==(const PriorityExecutable &other) const +{ std::cout << "PriorityExecutable::operator== called" << std::endl; - return this->handle == other.handle; + if (this->handle == other.handle) + { + return true; + } + else + { + return false; + } } -bool PriorityExecutableComparator::operator()( - PriorityExecutable const *p1, - PriorityExecutable const *p2) const +bool PriorityExecutableComparator::operator()(const PriorityExecutable *p1, const PriorityExecutable *p2) { // since this will be used in a std::set, also check for equality - if (p1 == nullptr || p2 == nullptr) { + if (p1 == nullptr || p2 == nullptr) + { // TODO: realistic value - std::cout << "PriorityExecutableComparator::operator() called with nullptr" - << std::endl; + std::cout << "PriorityExecutableComparator::operator() called with nullptr" << std::endl; return false; } - - if (p1->handle == p2->handle || p1->executable_id == p2->executable_id) { + if (p1->handle == p2->handle) + { + return false; + } + if (p1->executable_id == p2->executable_id) + { return false; } - if (p1->sched_type != p2->sched_type) { + if (p1->sched_type != p2->sched_type) + { // in order: DEADLINE, CHAIN_AWARE, CHAIN_INDEPENDENT return p1->sched_type < p2->sched_type; } - - if (p1->sched_type == ExecutableScheduleType::CHAIN_INDEPENDENT_PRIORITY) { - return p1->priority == p2->priority ? - p1->executable_id < p2->executable_id : - // tie-breaker: lower value runs first - p1->priority < p2->priority; + if (p1->sched_type == CHAIN_INDEPENDENT_PRIORITY) + { + if (p1->priority == p2->priority) + { + return p1->executable_id < p2->executable_id; + } + // lower value runs first + return p1->priority < p2->priority; } - - if (p1->sched_type == ExecutableScheduleType::CHAIN_AWARE_PRIORITY) { - return p1->priority == p2->priority ? - p1->executable_id < p2->executable_id : - // tie-breaker: lower value runs first - p1->priority < p2->priority; + if (p1->sched_type == CHAIN_AWARE_PRIORITY) + { + if (p1->priority == p2->priority) + { + return p1->executable_id < p2->executable_id; + } + return p1->priority < p2->priority; } - - if (p1->sched_type == ExecutableScheduleType::DEADLINE) { + if (p1->sched_type == DEADLINE) + { // TODO: use the counter logic here as well uint64_t p1_deadline = 0; uint64_t p2_deadline = 0; - - if (p1->deadlines != nullptr && !p1->deadlines->empty()) { + if (p1->deadlines != nullptr && !p1->deadlines->empty()) + { p1_deadline = p1->deadlines->front(); } - if (p2->deadlines != nullptr && !p2->deadlines->empty()) { + if (p2->deadlines != nullptr && !p2->deadlines->empty()) + { p2_deadline = p2->deadlines->front(); } - - if (p1_deadline == p2_deadline || p1->deadlines == p2->deadlines) { + if (p1_deadline == p2_deadline || p1->deadlines == p2->deadlines) + { // this looks bad and is bad, BUT - // if we tell std::set these are equal, only one will be added, and we - // will lose the other. we need _something_ to make them unique since we'd - // rather finish a chain than start a new one, select the higher id return - // p1->executable_id > p2->executable_id; - // return p1->priority < p2->priority; - if (p1->priority != p2->priority) { - return p1->priority < p2->priority; - } - return p1->executable_id < p2->executable_id; + // if we tell std::set these are equal, only one will be added, and we will lose the other. + // we need _something_ to make them unique + // since we'd rather finish a chain than start a new one, select the higher id + // return p1->executable_id > p2->executable_id; + return p1->priority < p2->priority; } - - if (p1_deadline == 0) { + if (p1_deadline == 0) + { p1_deadline = std::numeric_limits::max(); } - if (p2_deadline == 0) { + if (p2_deadline == 0) + { p2_deadline = std::numeric_limits::max(); } return p1_deadline < p2_deadline; } - - std::cout << "invalid compare opration on priority_exec" << std::endl; - return false; + else + { + std::cout << "invalid compare opration on priority_exec" << std::endl; + return false; + } } template <> -void PriorityMemoryStrategy<>::add_guard_condition( - rcl_guard_condition_t const *guard_condition) { - for (auto const &existing_guard_condition : guard_conditions_) { - if (existing_guard_condition == guard_condition) { +void PriorityMemoryStrategy<>::add_guard_condition(const rcl_guard_condition_t *guard_condition) +{ + for (const auto &existing_guard_condition : guard_conditions_) + { + if (existing_guard_condition == guard_condition) + { return; } } @@ -382,10 +153,12 @@ void PriorityMemoryStrategy<>::add_guard_condition( } template <> -void PriorityMemoryStrategy<>::remove_guard_condition( - rcl_guard_condition_t const *guard_condition) { - for (auto it = guard_conditions_.begin(); it != guard_conditions_.end(); ++it) { - if (*it == guard_condition) { +void PriorityMemoryStrategy<>::remove_guard_condition(const rcl_guard_condition_t *guard_condition) +{ + for (auto it = guard_conditions_.begin(); it != guard_conditions_.end(); ++it) + { + if (*it == guard_condition) + { guard_conditions_.erase(it); break; } @@ -393,85 +166,101 @@ void PriorityMemoryStrategy<>::remove_guard_condition( } template <> -void PriorityMemoryStrategy<>::add_executable_to_queue( - std::shared_ptr e) { +void PriorityMemoryStrategy<>::add_executable_to_queue(std::shared_ptr e) +{ // e may have changed. remove and re-add all_executables_.erase(e.get()); // convert from shared_ptr to raw pointer all_executables_.insert(e.get()); } -template <> -void PriorityMemoryStrategy<>::clear_handles() { +template <> +void PriorityMemoryStrategy<>::clear_handles() +{ subscription_handles_.clear(); service_handles_.clear(); client_handles_.clear(); timer_handles_.clear(); waitable_handles_.clear(); - // all_executables_ = std::priority_queue, PriorityExecutableComparator>(); + // all_executables_ = std::priority_queue, PriorityExecutableComparator>(); // all_executables_.clear(); - // all_executables_ = std::map(PriorityExecutableComparator()); + // all_executables_ = std::map(PriorityExecutableComparator()); } template <> -void PriorityMemoryStrategy<>::remove_null_handles(rcl_wait_set_t* wait_set) { +void PriorityMemoryStrategy<>::remove_null_handles(rcl_wait_set_t *wait_set) +{ // TODO(jacobperron): Check if wait set sizes are what we expect them to be? // e.g. wait_set->size_of_clients == client_handles_.size() - // Important to use subscription_handles_.size() instead of wait set's size - // since there may be more subscriptions in the wait set due to Waitables - // added to the end. The same logic applies for other entities. - for (size_t i = 0; i < subscription_handles_.size(); ++i) { - if (!wait_set->subscriptions[i]) { + // Important to use subscription_handles_.size() instead of wait set's size since + // there may be more subscriptions in the wait set due to Waitables added to the end. + // The same logic applies for other entities. + for (size_t i = 0; i < subscription_handles_.size(); ++i) + { + if (!wait_set->subscriptions[i]) + { priority_map[subscription_handles_[i]]->dont_run(); subscription_handles_[i].reset(); - } else { + } + else + { priority_map[subscription_handles_[i]]->allow_run(); } } - - for (size_t i = 0; i < service_handles_.size(); ++i) { - if (!wait_set->services[i]) { + for (size_t i = 0; i < service_handles_.size(); ++i) + { + if (!wait_set->services[i]) + { priority_map[service_handles_[i]]->dont_run(); service_handles_[i].reset(); - } else { + } + else + { priority_map[service_handles_[i]]->allow_run(); } } - - for (size_t i = 0; i < client_handles_.size(); ++i) { - if (!wait_set->clients[i]) { + for (size_t i = 0; i < client_handles_.size(); ++i) + { + if (!wait_set->clients[i]) + { priority_map[client_handles_[i]]->dont_run(); client_handles_[i].reset(); - } else { + } + else + { priority_map[client_handles_[i]]->allow_run(); } } - - for (size_t i = 0; i < timer_handles_.size(); ++i) { - if (!wait_set->timers[i]) { + for (size_t i = 0; i < timer_handles_.size(); ++i) + { + if (!wait_set->timers[i]) + { priority_map[timer_handles_[i]]->dont_run(); timer_handles_[i].reset(); - } else { + } + else + { priority_map[timer_handles_[i]]->allow_run(); } } - - for (size_t i = 0; i < waitable_handles_.size(); ++i) { - if (!waitable_handles_[i]->is_ready(wait_set)) { + for (size_t i = 0; i < waitable_handles_.size(); ++i) + { + if (!waitable_handles_[i]->is_ready(wait_set)) + { priority_map[waitable_handles_[i]]->dont_run(); waitable_handles_[i].reset(); - } else { + } + else + { priority_map[waitable_handles_[i]]->allow_run(); } } - subscription_handles_.erase(std::remove(subscription_handles_.begin(), - subscription_handles_.end(), nullptr), - subscription_handles_.end()); + subscription_handles_.erase( + std::remove(subscription_handles_.begin(), subscription_handles_.end(), nullptr), + subscription_handles_.end()); service_handles_.erase( std::remove(service_handles_.begin(), service_handles_.end(), nullptr), @@ -491,628 +280,698 @@ void PriorityMemoryStrategy<>::remove_null_handles(rcl_wait_set_t* wait_set) { } template <> -bool PriorityMemoryStrategy<>::collect_entities( - const WeakNodeList &weak_nodes) { +bool PriorityMemoryStrategy<>::collect_entities(const WeakNodeList &weak_nodes) +{ - bool has_invalid_weak_nodes = false; - for (auto &weak_node : weak_nodes) { - auto node = weak_node.lock(); - if (!node) { - has_invalid_weak_nodes = true; - continue; + bool has_invalid_weak_nodes = false; + for (auto &weak_node : weak_nodes) + { + auto node = weak_node.lock(); + if (!node) + { + has_invalid_weak_nodes = true; + continue; + } + for (auto &weak_group : node->get_callback_groups()) + { + auto group = weak_group.lock(); + if (!group) + // if (!group || !group->can_be_taken_from().load()) + { + continue; + } + group->find_subscription_ptrs_if( + [this](const rclcpp::SubscriptionBase::SharedPtr &subscription) + { + auto subscription_handle = subscription->get_subscription_handle(); + subscription_handles_.push_back(subscription_handle); + add_executable_to_queue(get_and_reset_priority(subscription_handle, SUBSCRIPTION)); + return false; + }); + group->find_service_ptrs_if( + [this](const rclcpp::ServiceBase::SharedPtr &service) + { + add_executable_to_queue(get_and_reset_priority(service->get_service_handle(), SERVICE)); + service_handles_.push_back(service->get_service_handle()); + return false; + }); + group->find_client_ptrs_if( + [this](const rclcpp::ClientBase::SharedPtr &client) + { + add_executable_to_queue(get_and_reset_priority(client->get_client_handle(), CLIENT)); + client_handles_.push_back(client->get_client_handle()); + return false; + }); + group->find_timer_ptrs_if( + [this](const rclcpp::TimerBase::SharedPtr &timer) + { + add_executable_to_queue(get_and_reset_priority(timer->get_timer_handle(), TIMER)); + timer_handles_.push_back(timer->get_timer_handle()); + return false; + }); + group->find_waitable_ptrs_if( + [this](const rclcpp::Waitable::SharedPtr &waitable) + { + add_executable_to_queue(get_and_reset_priority(waitable, WAITABLE)); + waitable_handles_.push_back(waitable); + return false; + }); + } } - for (auto &weak_group : node->get_callback_groups()) { - auto group = weak_group.lock(); - if (!group) - // if (!group || !group->can_be_taken_from().load()) - { - continue; - } - group->find_subscription_ptrs_if( - [this](const rclcpp::SubscriptionBase::SharedPtr &subscription) { - auto subscription_handle = subscription->get_subscription_handle(); - subscription_handles_.push_back(subscription_handle); - add_executable_to_queue( - get_and_reset_priority(subscription_handle, ExecutableType::SUBSCRIPTION)); - return false; - }); - group->find_service_ptrs_if( - [this](const rclcpp::ServiceBase::SharedPtr &service) { - add_executable_to_queue( - get_and_reset_priority(service->get_service_handle(), ExecutableType::SERVICE)); - service_handles_.push_back(service->get_service_handle()); - return false; - }); - group->find_client_ptrs_if( - [this](const rclcpp::ClientBase::SharedPtr &client) { - add_executable_to_queue( - get_and_reset_priority(client->get_client_handle(), ExecutableType::CLIENT)); - client_handles_.push_back(client->get_client_handle()); - return false; - }); - group->find_timer_ptrs_if( - [this](const rclcpp::TimerBase::SharedPtr &timer) { - add_executable_to_queue( - get_and_reset_priority(timer->get_timer_handle(), ExecutableType::TIMER)); - timer_handles_.push_back(timer->get_timer_handle()); - return false; - }); - group->find_waitable_ptrs_if( - [this](const rclcpp::Waitable::SharedPtr &waitable) { - add_executable_to_queue(get_and_reset_priority(waitable, ExecutableType::WAITABLE)); - waitable_handles_.push_back(waitable); - return false; - }); - } - } - return has_invalid_weak_nodes; + return has_invalid_weak_nodes; } template <> -void PriorityMemoryStrategy<>::add_waitable_handle( - const rclcpp::Waitable::SharedPtr &waitable) { +void PriorityMemoryStrategy<>::add_waitable_handle(const rclcpp::Waitable::SharedPtr &waitable) +{ - if (nullptr == waitable) { - throw std::runtime_error("waitable object unexpectedly nullptr"); - } - waitable_handles_.push_back(waitable); + if (nullptr == waitable) + { + throw std::runtime_error("waitable object unexpectedly nullptr"); + } + waitable_handles_.push_back(waitable); } template <> -bool PriorityMemoryStrategy<>::add_handles_to_wait_set( - rcl_wait_set_t *wait_set) { - for (auto subscription : subscription_handles_) { - if (rcl_wait_set_add_subscription(wait_set, subscription.get(), NULL) != - RCL_RET_OK) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", - "Couldn't add subscription to wait set: %s", - rcl_get_error_string().str); - return false; +bool PriorityMemoryStrategy<>::add_handles_to_wait_set(rcl_wait_set_t *wait_set) +{ + for (auto subscription : subscription_handles_) + { + if (rcl_wait_set_add_subscription(wait_set, subscription.get(), NULL) != RCL_RET_OK) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add subscription to wait set: %s", rcl_get_error_string().str); + return false; + } } - } - for (auto client : client_handles_) { - if (rcl_wait_set_add_client(wait_set, client.get(), NULL) != RCL_RET_OK) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", "Couldn't add client to wait set: %s", - rcl_get_error_string().str); - return false; + for (auto client : client_handles_) + { + if (rcl_wait_set_add_client(wait_set, client.get(), NULL) != RCL_RET_OK) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add client to wait set: %s", rcl_get_error_string().str); + return false; + } } - } - for (auto service : service_handles_) { - if (rcl_wait_set_add_service(wait_set, service.get(), NULL) != RCL_RET_OK) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", "Couldn't add service to wait set: %s", - rcl_get_error_string().str); - return false; + for (auto service : service_handles_) + { + if (rcl_wait_set_add_service(wait_set, service.get(), NULL) != RCL_RET_OK) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add service to wait set: %s", rcl_get_error_string().str); + return false; + } } - } - for (auto timer : timer_handles_) { - if (rcl_wait_set_add_timer(wait_set, timer.get(), NULL) != RCL_RET_OK) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", "Couldn't add timer to wait set: %s", - rcl_get_error_string().str); - return false; + for (auto timer : timer_handles_) + { + if (rcl_wait_set_add_timer(wait_set, timer.get(), NULL) != RCL_RET_OK) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add timer to wait set: %s", rcl_get_error_string().str); + return false; + } } - } - for (auto guard_condition : guard_conditions_) { - if (rcl_wait_set_add_guard_condition(wait_set, guard_condition, NULL) != - RCL_RET_OK) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", - "Couldn't add guard_condition to wait set: %s", - rcl_get_error_string().str); - return false; + for (auto guard_condition : guard_conditions_) + { + if (rcl_wait_set_add_guard_condition(wait_set, guard_condition, NULL) != RCL_RET_OK) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add guard_condition to wait set: %s", + rcl_get_error_string().str); + return false; + } } - } - for (auto waitable : waitable_handles_) { - if (!waitable->add_to_wait_set(wait_set)) { - RCUTILS_LOG_ERROR_NAMED("rclcpp", "Couldn't add waitable to wait set: %s", - rcl_get_error_string().str); - return false; + for (auto waitable : waitable_handles_) + { + if (!waitable->add_to_wait_set(wait_set)) + { + RCUTILS_LOG_ERROR_NAMED( + "rclcpp", + "Couldn't add waitable to wait set: %s", rcl_get_error_string().str); + return false; + } } - } - return true; + return true; } template <> void PriorityMemoryStrategy<>::get_next_executable( - rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) { + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ - const PriorityExecutable *next_exec = nullptr; - // std::cout << "all_executables_.size():" << all_executables_.size() << - // std::endl; log contents of all_executables_ std::cout << exec->name << ": " - // << exec->handle << " : " << exec->sched_type << std::endl; - std::set skip_groups; + const PriorityExecutable *next_exec = nullptr; + // std::cout << "all_executables_.size():" << all_executables_.size() << std::endl; + // log contents of all_executables_ + // std::cout << exec->name << ": " << exec->handle << " : " << exec->sched_type << std::endl; + std::set skip_groups; - // while (!all_executables_.empty()) - for (auto exec : all_executables_) { - next_exec = exec; - if (!next_exec->can_be_run) { - continue; - } - ExecutableType type = next_exec->type; - switch (type) { - case ExecutableType::SUBSCRIPTION: { - std::shared_ptr subs_handle = - std::static_pointer_cast(next_exec->handle); - auto subscription = get_subscription_by_handle(subs_handle, weak_nodes); - if (subscription) { - auto group = get_group_by_subscription(subscription, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking - // it = subscription_handles_.erase(it); - continue; + // while (!all_executables_.empty()) + for (auto exec : all_executables_) + { + next_exec = exec; + if (!next_exec->can_be_run) + { + continue; } - if (skip_groups.find(group) != skip_groups.end()) { - // group was used at some point during this loop. skip it so we can - // re-check it next time - continue; - } - - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - // ++it; - skip_groups.insert(group); - continue; - } - any_exec.callback_group = group; - any_exec.subscription = subscription; - any_exec.node_base = get_node_by_group(group, weak_nodes); - // std::cout << "Using new priority sub " << - // subscription->get_topic_name() << std::endl; - } - } break; - case ExecutableType::SERVICE: { - std::shared_ptr service_handle = - std::static_pointer_cast(next_exec->handle); - auto service = get_service_by_handle(service_handle, weak_nodes); - if (service) { - auto group = get_group_by_service(service, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking - // it = subscription_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - // ++it; - continue; - } - any_exec.callback_group = group; - any_exec.service = service; - any_exec.node_base = get_node_by_group(group, weak_nodes); - // std::cout << "Using new priority service " << - // service->get_service_name() << std::endl; - } - } break; - case ExecutableType::CLIENT: { - std::shared_ptr client_handle = - std::static_pointer_cast(next_exec->handle); - auto client = get_client_by_handle(client_handle, weak_nodes); - if (client) { - auto group = get_group_by_client(client, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking - // it = subscription_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - // ++it; - continue; - } - any_exec.callback_group = group; - any_exec.client = client; - any_exec.node_base = get_node_by_group(group, weak_nodes); - // std::cout << "Using new priority client " << - // client->get_service_name() << std::endl; - } - } break; - case ExecutableType::TIMER: { - std::shared_ptr timer_handle = - std::static_pointer_cast(next_exec->handle); - auto timer = get_timer_by_handle(timer_handle, weak_nodes); - if (timer) { - auto group = get_group_by_timer(timer, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking - // it = subscription_handles_.erase(it); - continue; - } - if (skip_groups.find(group) != skip_groups.end()) { - // group was used at some point during this loop. skip it so we can - // re-check it next time - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - // ++it; - skip_groups.insert(group); - continue; - } - any_exec.callback_group = group; - any_exec.timer = timer; - any_exec.node_base = get_node_by_group(group, weak_nodes); - } - } break; - case ExecutableType::WAITABLE: { - std::shared_ptr waitable_handle = - std::static_pointer_cast(next_exec->waitable); - auto waitable = waitable_handle; - if (waitable) { - auto group = get_group_by_waitable(waitable, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking - // it = subscription_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - // ++it; - continue; - } - any_exec.callback_group = group; - any_exec.waitable = waitable; - any_exec.node_base = get_node_by_group(group, weak_nodes); - // std::cout << "Using new priority waitable" << std::endl; - } - } break; - default: - std::cout << "Unknown type from priority!!!" << std::endl; - continue; - // break; - } - if (next_exec->is_first_in_chain && next_exec->sched_type == ExecutableScheduleType::DEADLINE) { - // std::cout << "running first in chain deadline" << std::endl; - } - /* std::ostringstream oss; - oss << "{\"operation\":\"get_next_executable\""; - // output names and handles of all_executables_ in json array - oss << ",\"all_executables\":["; - for (auto exec : all_executables_) + ExecutableType type = next_exec->type; + switch (type) + { + case SUBSCRIPTION: + { + std::shared_ptr subs_handle = std::static_pointer_cast(next_exec->handle); + auto subscription = get_subscription_by_handle(subs_handle, weak_nodes); + if (subscription) { - // if (exec->can_be_run) - oss << "{\"name\":\"" << exec->name << "\", \"sched_type\":\"" - << exec->sched_type << "\", \"can_be_run\":\"" << exec->can_be_run << - "\"},"; + auto group = get_group_by_subscription(subscription, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + // it = subscription_handles_.erase(it); + continue; + } + if (skip_groups.find(group) != skip_groups.end()) + { + // group was used at some point during this loop. skip it so we can re-check it next time + continue; + } + + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + // ++it; + skip_groups.insert(group); + continue; + } + any_exec.callback_group = group; + any_exec.subscription = subscription; + any_exec.node_base = get_node_by_group(group, weak_nodes); + // std::cout << "Using new priority sub " << subscription->get_topic_name() << std::endl; } - // remove trailing comma - oss.seekp(-1, oss.cur); - oss << "]}"; - log_entry(logger_, oss.str()); - oss.str(""); - oss.clear(); - oss << "{\"operation\": \"select_task\", \"task\": \"" << exec->name - << "\"}"; log_entry(logger_, oss.str()); */ + } + break; + case SERVICE: + { + std::shared_ptr service_handle = std::static_pointer_cast(next_exec->handle); + auto service = get_service_by_handle(service_handle, weak_nodes); + if (service) + { + auto group = get_group_by_service(service, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + // it = subscription_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + // ++it; + continue; + } + any_exec.callback_group = group; + any_exec.service = service; + any_exec.node_base = get_node_by_group(group, weak_nodes); + // std::cout << "Using new priority service " << service->get_service_name() << std::endl; + } + } + break; + case CLIENT: + { + std::shared_ptr client_handle = std::static_pointer_cast(next_exec->handle); + auto client = get_client_by_handle(client_handle, weak_nodes); + if (client) + { + auto group = get_group_by_client(client, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + // it = subscription_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + // ++it; + continue; + } + any_exec.callback_group = group; + any_exec.client = client; + any_exec.node_base = get_node_by_group(group, weak_nodes); + // std::cout << "Using new priority client " << client->get_service_name() << std::endl; + } + } + break; + case TIMER: + { + std::shared_ptr timer_handle = std::static_pointer_cast(next_exec->handle); + auto timer = get_timer_by_handle(timer_handle, weak_nodes); + if (timer) + { + auto group = get_group_by_timer(timer, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + // it = subscription_handles_.erase(it); + continue; + } + if (skip_groups.find(group) != skip_groups.end()) + { + // group was used at some point during this loop. skip it so we can re-check it next time + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + // ++it; + skip_groups.insert(group); + continue; + } + any_exec.callback_group = group; + any_exec.timer = timer; + any_exec.node_base = get_node_by_group(group, weak_nodes); + } + } + break; + case WAITABLE: + { + std::shared_ptr waitable_handle = std::static_pointer_cast(next_exec->waitable); + auto waitable = waitable_handle; + if (waitable) + { + auto group = get_group_by_waitable(waitable, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + // it = subscription_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + // ++it; + continue; + } + any_exec.callback_group = group; + any_exec.waitable = waitable; + any_exec.node_base = get_node_by_group(group, weak_nodes); + // std::cout << "Using new priority waitable" << std::endl; + } + } + break; + default: + std::cout << "Unknown type from priority!!!" << std::endl; + continue; + // break; + } + if (next_exec->is_first_in_chain && next_exec->sched_type == DEADLINE) + { + // std::cout << "running first in chain deadline" << std::endl; + } + /* std::ostringstream oss; + oss << "{\"operation\":\"get_next_executable\""; + // output names and handles of all_executables_ in json array + oss << ",\"all_executables\":["; + for (auto exec : all_executables_) + { + // if (exec->can_be_run) + oss << "{\"name\":\"" << exec->name << "\", \"sched_type\":\"" << exec->sched_type << "\", \"can_be_run\":\"" << exec->can_be_run << "\"},"; + } + // remove trailing comma + oss.seekp(-1, oss.cur); + oss << "]}"; + log_entry(logger_, oss.str()); + oss.str(""); + oss.clear(); + oss << "{\"operation\": \"select_task\", \"task\": \"" << exec->name << "\"}"; + log_entry(logger_, oss.str()); */ - // returning with an executable - // remove from all_executables_ map - all_executables_.erase(next_exec); + // returning with an executable + // remove from all_executables_ map + all_executables_.erase(next_exec); + // std::stringstream oss; + // oss << "{\"operation\":\"get_next_executable\", \"result\":\"success\", \"type\":\"" << type << "\"}"; + // log_entry(logger_, oss.str()); + return; + } + // nothing found // std::stringstream oss; - // oss << "{\"operation\":\"get_next_executable\", \"result\":\"success\", - // \"type\":\"" << type << "\"}"; log_entry(logger_, oss.str()); - return; - } - // nothing found - // std::stringstream oss; - // oss << "{\"operation\":\"get_next_executable\", - // \"result\":\"nothing_found\"}"; log_entry(logger_, oss.str()); -} - -template <> -void PriorityMemoryStrategy<>::post_execute(rclcpp::AnyExecutable any_exec, - int thread_id) { - std::shared_ptr next_exec = nullptr; - if (any_exec.subscription) { - next_exec = - get_priority_settings(any_exec.subscription->get_subscription_handle()); - } else if (any_exec.service) { - next_exec = get_priority_settings(any_exec.service->get_service_handle()); - } else if (any_exec.client) { - next_exec = get_priority_settings(any_exec.client->get_client_handle()); - } else if (any_exec.timer) { - next_exec = get_priority_settings(any_exec.timer->get_timer_handle()); - } else if (any_exec.waitable) { - next_exec = get_priority_settings(any_exec.waitable); - } - if (next_exec == nullptr) { - RCLCPP_WARN(rclcpp::get_logger("rclcpp"), - "Could not find priority settings for handle"); - } - // callback has executed - // std::cout<< "running callback. first? " << next_exec->is_first_in_chain << - // " type " << next_exec->sched_type << std::endl; - if (next_exec->is_first_in_chain - && next_exec->sched_type != ExecutableScheduleType::DEADLINE) { - /* - timespec current_time; - clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); - uint64_t millis = (current_time.tv_sec * (uint64_t)1000) + - (current_time.tv_nsec / 1000000); - - auto timer = next_exec->timer_handle; - int64_t time_until_next_call = timer->time_until_trigger().count() / - 1000000; - // std::cout << "end of chain. time until trigger: " << - std::to_string(time_until_next_call) << std::endl; - */ - } - if (next_exec->is_last_in_chain - && next_exec->sched_type == ExecutableScheduleType::DEADLINE) { - // did we make the deadline? - timespec current_time; - clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); - uint64_t millis = - (current_time.tv_sec * 1000UL) + (current_time.tv_nsec / 1000000); - uint64_t this_deadline = next_exec->deadlines->front(); - // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "last, chain id: %d", - // next_exec->chain_id); for (auto deadline : *next_exec->deadlines) - // { - // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "deadline: %d", deadline); - // } - std::ostringstream oss; - // oss << "{\"operation\": \"deadline_check\", \"chain_id\": " << - // next_exec->chain_id << ", \"deadline\": " << - // next_exec->deadlines->front() << ", \"current_time\": " << millis << "}"; + // oss << "{\"operation\":\"get_next_executable\", \"result\":\"nothing_found\"}"; // log_entry(logger_, oss.str()); - - if (next_exec->timer_handle == nullptr) { - std::cout << "tried to use a chain without a timer handle!!!" - << std::endl; - } - auto timer = next_exec->timer_handle; - if (timer == nullptr) { - std::cout << "somehow, this timer handle didn't have an associated timer" - << std::endl; - } - // timespec current_time; - // clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); - // uint64_t millis = (current_time.tv_sec * (uint64_t)1000) + - // (current_time.tv_nsec / 1000000); uint64_t this_deadline = - // next_exec->deadlines->front(); - uint64_t next_deadline = 0; - bool on_time; - // time_diff may be negative, to use a signed type to prevent overflow - int64_t time_diff = millis - this_deadline; - int periods_late; - // if time_diff is negative, we completed on time. add one period to the - // deadline - if (time_diff <= 0) { - periods_late = 0; - next_deadline = this_deadline + next_exec->period; - on_time = true; - } - // if time_diff is positive, we completed late. add one period for each - // period we were late - else { - periods_late = std::ceil(time_diff / (double)next_exec->period); - next_deadline = this_deadline + (periods_late)*next_exec->period; - on_time = false; - } - // std::chrono::nanoseconds time_until = - // next_exec->timer_handle->time_until_trigger(); next_deadline = millis + - // (time_until.count() / 1000000) + next_exec->period; the next deadline is - // the current time plus the period, skipping periods that have already - // passed - - // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "next deadline: %d", - // next_deadline); - oss << "{\"operation\": \"next_deadline\", \"chain_id\": " - << next_exec->chain_id << ", \"deadline\": " << next_deadline - << ", \"on_time\": " << on_time << ", \"time_diff\": " << time_diff - << ", \"periods_late\": " << periods_late; - if (thread_id != -1) { - oss << ", \"thread_id\": " << thread_id; - } - - oss << "}"; - // oss << "{\"operation\": \"next_deadline\", \"chain_id\": " << - // next_exec->chain_id << ", \"deadline\": " << - // next_exec->deadlines->front() << "}"; - log_entry(logger_, oss.str()); - if (time_diff < -next_exec->period) { - // the deadline was much later than it needed to be - } - next_exec->deadlines->push_back(next_deadline); - - // print chain id and deadline contents - // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "chain id: %d", - // next_exec->chain_id); for (auto deadline : *next_exec->deadlines) - // { - // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "deadline: %d", deadline); - // } - // std::cout << "deadline set" << std::endl; - - if (!next_exec->deadlines->empty()) - next_exec->deadlines->pop_front(); - } - if (next_exec->sched_type == ExecutableScheduleType::CHAIN_AWARE_PRIORITY - || next_exec->sched_type == ExecutableScheduleType::DEADLINE) { - // this is safe, since we popped it earlier - // get a mutable reference - // TODO: find a cleaner way to do this - std::shared_ptr mut_executable = - get_priority_settings(next_exec->handle); - // std::cout << "running chain aware cb" << std::endl; - mut_executable->increment_counter(); - } } template <> -void PriorityMemoryStrategy<>::get_next_subscription( - rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) { +void PriorityMemoryStrategy<>::post_execute(rclcpp::AnyExecutable any_exec, int thread_id) +{ - auto it = subscription_handles_.begin(); - while (it != subscription_handles_.end()) { - auto subscription = get_subscription_by_handle(*it, weak_nodes); - if (subscription) { - // Find the group for this handle and see if it can be serviced - auto group = get_group_by_subscription(subscription, weak_nodes); - if (!group) { - // Group was not found, meaning the subscription is not valid... - // Remove it from the ready list and continue looking + std::shared_ptr next_exec = nullptr; + if (any_exec.subscription) + { + next_exec = get_priority_settings(any_exec.subscription->get_subscription_handle()); + } + else if (any_exec.service) + { + next_exec = get_priority_settings(any_exec.service->get_service_handle()); + } + else if (any_exec.client) + { + next_exec = get_priority_settings(any_exec.client->get_client_handle()); + } + else if (any_exec.timer) + { + next_exec = get_priority_settings(any_exec.timer->get_timer_handle()); + } + else if (any_exec.waitable) + { + next_exec = get_priority_settings(any_exec.waitable); + } + if (next_exec == nullptr) + { + RCLCPP_WARN(rclcpp::get_logger("rclcpp"), "Could not find priority settings for handle"); + } + // callback has executed + // std::cout<< "running callback. first? " << next_exec->is_first_in_chain << " type " << next_exec->sched_type << std::endl; + if (next_exec->is_first_in_chain && next_exec->sched_type != DEADLINE) + { + /* + timespec current_time; + clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); + uint64_t millis = (current_time.tv_sec * (uint64_t)1000) + (current_time.tv_nsec / 1000000); + + auto timer = next_exec->timer_handle; + int64_t time_until_next_call = timer->time_until_trigger().count() / 1000000; + // std::cout << "end of chain. time until trigger: " << std::to_string(time_until_next_call) << std::endl; + */ + } + if (next_exec->is_last_in_chain && next_exec->sched_type == DEADLINE) + { + // did we make the deadline? + timespec current_time; + clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); + uint64_t millis = (current_time.tv_sec * 1000UL) + (current_time.tv_nsec / 1000000); + uint64_t this_deadline = next_exec->deadlines->front(); + // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "last, chain id: %d", next_exec->chain_id); + // for (auto deadline : *next_exec->deadlines) + // { + // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "deadline: %d", deadline); + // } + std::ostringstream oss; + // oss << "{\"operation\": \"deadline_check\", \"chain_id\": " << next_exec->chain_id << ", \"deadline\": " << next_exec->deadlines->front() << ", \"current_time\": " << millis << "}"; + // log_entry(logger_, oss.str()); + + if (next_exec->timer_handle == nullptr) + { + std::cout << "tried to use a chain without a timer handle!!!" << std::endl; + } + auto timer = next_exec->timer_handle; + if (timer == nullptr) + { + std::cout << "somehow, this timer handle didn't have an associated timer" << std::endl; + } + // timespec current_time; + // clock_gettime(CLOCK_MONOTONIC_RAW, ¤t_time); + // uint64_t millis = (current_time.tv_sec * (uint64_t)1000) + (current_time.tv_nsec / 1000000); + // uint64_t this_deadline = next_exec->deadlines->front(); + uint64_t next_deadline = 0; + bool on_time; + // time_diff may be negative, to use a signed type to prevent overflow + int64_t time_diff = millis - this_deadline; + int periods_late; + // if time_diff is negative, we completed on time. add one period to the deadline + if (time_diff <= 0) + { + periods_late = 0; + next_deadline = this_deadline + next_exec->period; + on_time = true; + } + // if time_diff is positive, we completed late. add one period for each period we were late + else + { + periods_late = std::ceil(time_diff / (double)next_exec->period); + next_deadline = this_deadline + (periods_late)*next_exec->period; + on_time = false; + } + // std::chrono::nanoseconds time_until = next_exec->timer_handle->time_until_trigger(); + // next_deadline = millis + (time_until.count() / 1000000) + next_exec->period; + // the next deadline is the current time plus the period, skipping periods that have already passed + + // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "next deadline: %d", next_deadline); + oss << "{\"operation\": \"next_deadline\", \"chain_id\": " << next_exec->chain_id << ", \"deadline\": " << next_deadline << ", \"on_time\": " << on_time << ", \"time_diff\": " << time_diff << ", \"periods_late\": " << periods_late; + if (thread_id != -1) + { + oss << ", \"thread_id\": " << thread_id; + } + + oss << "}"; + // oss << "{\"operation\": \"next_deadline\", \"chain_id\": " << next_exec->chain_id << ", \"deadline\": " << next_exec->deadlines->front() << "}"; + log_entry(logger_, oss.str()); + if (time_diff < -next_exec->period) + { + // the deadline was much later than it needed to be + } + next_exec->deadlines->push_back(next_deadline); + + // print chain id and deadline contents + // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "chain id: %d", next_exec->chain_id); + // for (auto deadline : *next_exec->deadlines) + // { + // RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "deadline: %d", deadline); + // } + // std::cout << "deadline set" << std::endl; + + if (!next_exec->deadlines->empty()) + next_exec->deadlines->pop_front(); + } + if (next_exec->sched_type == CHAIN_AWARE_PRIORITY || next_exec->sched_type == DEADLINE) + { + // this is safe, since we popped it earlier + // get a mutable reference + // TODO: find a cleaner way to do this + std::shared_ptr mut_executable = get_priority_settings(next_exec->handle); + // std::cout << "running chain aware cb" << std::endl; + mut_executable->increment_counter(); + } +} + +template <> +void PriorityMemoryStrategy<>:: + get_next_subscription( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ + + auto it = subscription_handles_.begin(); + while (it != subscription_handles_.end()) + { + auto subscription = get_subscription_by_handle(*it, weak_nodes); + if (subscription) + { + // Find the group for this handle and see if it can be serviced + auto group = get_group_by_subscription(subscription, weak_nodes); + if (!group) + { + // Group was not found, meaning the subscription is not valid... + // Remove it from the ready list and continue looking + it = subscription_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + ++it; + continue; + } + // Otherwise it is safe to set and return the any_exec + any_exec.subscription = subscription; + any_exec.callback_group = group; + any_exec.node_base = get_node_by_group(group, weak_nodes); + subscription_handles_.erase(it); + return; + } + // Else, the subscription is no longer valid, remove it and continue it = subscription_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - ++it; - continue; - } - // Otherwise it is safe to set and return the any_exec - any_exec.subscription = subscription; - any_exec.callback_group = group; - any_exec.node_base = get_node_by_group(group, weak_nodes); - subscription_handles_.erase(it); - return; } - // Else, the subscription is no longer valid, remove it and continue - it = subscription_handles_.erase(it); - } } template <> -void PriorityMemoryStrategy<>::get_next_service( - rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) { - auto it = service_handles_.begin(); - while (it != service_handles_.end()) { - auto service = get_service_by_handle(*it, weak_nodes); - if (service) { - // Find the group for this handle and see if it can be serviced - auto group = get_group_by_service(service, weak_nodes); - if (!group) { - // Group was not found, meaning the service is not valid... - // Remove it from the ready list and continue looking +void PriorityMemoryStrategy<>:: + get_next_service( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ + auto it = service_handles_.begin(); + while (it != service_handles_.end()) + { + auto service = get_service_by_handle(*it, weak_nodes); + if (service) + { + // Find the group for this handle and see if it can be serviced + auto group = get_group_by_service(service, weak_nodes); + if (!group) + { + // Group was not found, meaning the service is not valid... + // Remove it from the ready list and continue looking + it = service_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + ++it; + continue; + } + // Otherwise it is safe to set and return the any_exec + any_exec.service = service; + any_exec.callback_group = group; + any_exec.node_base = get_node_by_group(group, weak_nodes); + service_handles_.erase(it); + return; + } + // Else, the service is no longer valid, remove it and continue it = service_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - ++it; - continue; - } - // Otherwise it is safe to set and return the any_exec - any_exec.service = service; - any_exec.callback_group = group; - any_exec.node_base = get_node_by_group(group, weak_nodes); - service_handles_.erase(it); - return; } - // Else, the service is no longer valid, remove it and continue - it = service_handles_.erase(it); - } } template <> -void PriorityMemoryStrategy<>::get_next_client(rclcpp::AnyExecutable &any_exec, - const WeakNodeList &weak_nodes) { +void PriorityMemoryStrategy<>:: + get_next_client( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ - auto it = client_handles_.begin(); - while (it != client_handles_.end()) { - auto client = get_client_by_handle(*it, weak_nodes); - if (client) { - // Find the group for this handle and see if it can be serviced - auto group = get_group_by_client(client, weak_nodes); - if (!group) { - // Group was not found, meaning the service is not valid... - // Remove it from the ready list and continue looking + auto it = client_handles_.begin(); + while (it != client_handles_.end()) + { + auto client = get_client_by_handle(*it, weak_nodes); + if (client) + { + // Find the group for this handle and see if it can be serviced + auto group = get_group_by_client(client, weak_nodes); + if (!group) + { + // Group was not found, meaning the service is not valid... + // Remove it from the ready list and continue looking + it = client_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + ++it; + continue; + } + // Otherwise it is safe to set and return the any_exec + any_exec.client = client; + any_exec.callback_group = group; + any_exec.node_base = get_node_by_group(group, weak_nodes); + client_handles_.erase(it); + return; + } + // Else, the service is no longer valid, remove it and continue it = client_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - ++it; - continue; - } - // Otherwise it is safe to set and return the any_exec - any_exec.client = client; - any_exec.callback_group = group; - any_exec.node_base = get_node_by_group(group, weak_nodes); - client_handles_.erase(it); - return; } - // Else, the service is no longer valid, remove it and continue - it = client_handles_.erase(it); - } } template <> -void PriorityMemoryStrategy<>::get_next_timer(rclcpp::AnyExecutable &any_exec, - const WeakNodeList &weak_nodes) { +void PriorityMemoryStrategy<>:: + get_next_timer( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ - auto it = timer_handles_.begin(); - while (it != timer_handles_.end()) { - auto timer = get_timer_by_handle(*it, weak_nodes); - if (timer) { - // Find the group for this handle and see if it can be serviced - auto group = get_group_by_timer(timer, weak_nodes); - if (!group) { - // Group was not found, meaning the timer is not valid... - // Remove it from the ready list and continue looking + auto it = timer_handles_.begin(); + while (it != timer_handles_.end()) + { + auto timer = get_timer_by_handle(*it, weak_nodes); + if (timer) + { + // Find the group for this handle and see if it can be serviced + auto group = get_group_by_timer(timer, weak_nodes); + if (!group) + { + // Group was not found, meaning the timer is not valid... + // Remove it from the ready list and continue looking + it = timer_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + ++it; + continue; + } + // Otherwise it is safe to set and return the any_exec + any_exec.timer = timer; + any_exec.callback_group = group; + any_exec.node_base = get_node_by_group(group, weak_nodes); + timer_handles_.erase(it); + return; + } + // Else, the service is no longer valid, remove it and continue it = timer_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - ++it; - continue; - } - // Otherwise it is safe to set and return the any_exec - any_exec.timer = timer; - any_exec.callback_group = group; - any_exec.node_base = get_node_by_group(group, weak_nodes); - timer_handles_.erase(it); - return; } - // Else, the service is no longer valid, remove it and continue - it = timer_handles_.erase(it); - } } template <> -void PriorityMemoryStrategy<>::get_next_waitable( - rclcpp::AnyExecutable &any_exec, const WeakNodeList &weak_nodes) { - auto it = waitable_handles_.begin(); - while (it != waitable_handles_.end()) { - auto waitable = *it; - if (waitable) { - // Find the group for this handle and see if it can be serviced - auto group = get_group_by_waitable(waitable, weak_nodes); - if (!group) { - // Group was not found, meaning the waitable is not valid... - // Remove it from the ready list and continue looking +void PriorityMemoryStrategy<>:: + get_next_waitable( + rclcpp::AnyExecutable &any_exec, + const WeakNodeList &weak_nodes) +{ + auto it = waitable_handles_.begin(); + while (it != waitable_handles_.end()) + { + auto waitable = *it; + if (waitable) + { + // Find the group for this handle and see if it can be serviced + auto group = get_group_by_waitable(waitable, weak_nodes); + if (!group) + { + // Group was not found, meaning the waitable is not valid... + // Remove it from the ready list and continue looking + it = waitable_handles_.erase(it); + continue; + } + if (!group->can_be_taken_from().load()) + { + // Group is mutually exclusive and is being used, so skip it for now + // Leave it to be checked next time, but continue searching + ++it; + continue; + } + // Otherwise it is safe to set and return the any_exec + any_exec.waitable = waitable; + any_exec.callback_group = group; + any_exec.node_base = get_node_by_group(group, weak_nodes); + waitable_handles_.erase(it); + return; + } + // Else, the waitable is no longer valid, remove it and continue it = waitable_handles_.erase(it); - continue; - } - if (!group->can_be_taken_from().load()) { - // Group is mutually exclusive and is being used, so skip it for now - // Leave it to be checked next time, but continue searching - ++it; - continue; - } - // Otherwise it is safe to set and return the any_exec - any_exec.waitable = waitable; - any_exec.callback_group = group; - any_exec.node_base = get_node_by_group(group, weak_nodes); - waitable_handles_.erase(it); - return; } - // Else, the waitable is no longer valid, remove it and continue - it = waitable_handles_.erase(it); - } -} \ No newline at end of file +} diff --git a/src/priority_executor/src/usage_example.cpp b/src/priority_executor/src/usage_example.cpp index b9c4b4c..8750cdc 100755 --- a/src/priority_executor/src/usage_example.cpp +++ b/src/priority_executor/src/usage_example.cpp @@ -1,109 +1,79 @@ +#include "rclcpp/rclcpp.hpp" +#include "priority_executor/priority_executor.hpp" +#include "priority_executor/priority_memory_strategy.hpp" #include #include #include - -#include -#include - -#include "priority_executor/priority_executor.hpp" -#include "priority_executor/priority_memory_strategy.hpp" +#include "std_msgs/msg/string.hpp" // re-create the classic talker-listener example with two listeners class Talker : public rclcpp::Node { public: - Talker() : Node("talker") - { - // Create a publisher on the "chatter" topic with 10 msg queue size. - pub_ = this->create_publisher("chatter", 10); - // Create a timer of period 1s that calls our callback member function. - timer_ = this->create_wall_timer(std::chrono::seconds(1), - std::bind(&Talker::timer_callback, this)); - } - // the timer must be public - rclcpp::TimerBase::SharedPtr timer_; + Talker() : Node("talker") + { + // Create a publisher on the "chatter" topic with 10 msg queue size. + pub_ = this->create_publisher("chatter", 10); + // Create a timer of period 1s that calls our callback member function. + timer_ = this->create_wall_timer(std::chrono::seconds(1), std::bind(&Talker::timer_callback, this)); + } + // the timer must be public + rclcpp::TimerBase::SharedPtr timer_; private: - void timer_callback() - { - std_msgs::msg::String msg; - msg.data = "Hello World!"; - RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", msg.data.c_str()); - pub_->publish(msg); - } - rclcpp::Publisher::SharedPtr pub_; + void timer_callback() + { + // Create a message and publish it 10 times. + std_msgs::msg::String msg; + msg.data = "Hello World!"; + for (int i = 0; i < 10; ++i) + { + RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", msg.data.c_str()); + pub_->publish(msg); + } + } + rclcpp::Publisher::SharedPtr pub_; }; class Listener : public rclcpp::Node { public: - Listener(std::string name) : Node(name) - { - // Create a subscription on the "chatter" topic with the default callback - // method. - sub_ = this->create_subscription( - "chatter", 10, - std::bind(&Listener::callback, this, std::placeholders::_1)); - } - // the publisher must be public - rclcpp::Subscription::SharedPtr sub_; + Listener(std::string name) : Node(name) + { + // Create a subscription on the "chatter" topic with the default callback method. + sub_ = this->create_subscription("chatter", 10, std::bind(&Listener::callback, this, std::placeholders::_1)); + } + // the publisher must be public + rclcpp::Subscription::SharedPtr sub_; private: - void callback(const std_msgs::msg::String::SharedPtr msg) - { - RCLCPP_INFO(this->get_logger(), "I heard: '%s'", msg->data.c_str()); - } + void callback(const std_msgs::msg::String::SharedPtr msg) + { + RCLCPP_INFO(this->get_logger(), "I heard: '%s'", msg->data.c_str()); + } }; -int main(int argc, char* argv[]) { +int main(int argc, char **argv) +{ rclcpp::init(argc, argv); - auto talker = std::make_shared(); auto listener1 = std::make_shared("listener1"); auto listener2 = std::make_shared("listener2"); rclcpp::ExecutorOptions options; - auto strategy = std::make_shared>(); + auto strategy = std::make_shared>(); options.memory_strategy = strategy; - auto executor = new priority_executor::TimedExecutor(options); + auto executor = new timed_executor::TimedExecutor(options); + // replace the above line with the following line to use the default executor + // which will intermix the execution of listener1 and listener2 + // auto executor = std::make_shared(options); - // must be set to post_execute can set new deadlines - executor->prio_memory_strategy_ = strategy; - - - // the new funcitons in PriorityMemoryStrategy accept the handle of the - // timer/subscription as the first argument - strategy->set_executable_deadline(talker->timer_->get_timer_handle(), 1000, - priority_executor::ExecutableType::TIMER, 0); - // you _must_ set the timer_handle for each chain - strategy->get_priority_settings(talker->timer_->get_timer_handle()) - ->timer_handle = talker->timer_; - // you _must_ mark the first executable in the chain - strategy->set_first_in_chain(talker->timer_->get_timer_handle()); - // set the same period and chain_id for each callback in the chain - strategy->set_executable_deadline(listener1->sub_->get_subscription_handle(), - 1000, priority_executor::ExecutableType::SUBSCRIPTION, 0); - strategy->set_executable_deadline(listener2->sub_->get_subscription_handle(), - 1000, priority_executor::ExecutableType::SUBSCRIPTION, 0); - // you _must_ mark the last executable in the chain (used to keep track of different instances of the same chain) - strategy->set_last_in_chain(listener2->sub_->get_subscription_handle()); - // add all the nodes to the executor + strategy->set_executable_deadline(talker->timer_->get_timer_handle(), 100, TIMER); + strategy->set_executable_deadline(listener1->sub_->get_subscription_handle(), 100, SUBSCRIPTION); + strategy->set_executable_deadline(listener2->sub_->get_subscription_handle(), 50, SUBSCRIPTION); executor->add_node(talker); executor->add_node(listener1); executor->add_node(listener2); - // if the executor behaves unexpectedly, you can print the priority settings to make sure they are correct - std::cout << *strategy->get_priority_settings( - talker->timer_->get_timer_handle()) - << std::endl; - std::cout << *strategy->get_priority_settings( - listener1->sub_->get_subscription_handle()) - << std::endl; - std::cout << *strategy->get_priority_settings( - listener2->sub_->get_subscription_handle()) - << std::endl; executor->spin(); - - rclcpp::shutdown(); - return 0; } diff --git a/src/simple_timer/CMakeLists.txt b/src/simple_timer/CMakeLists.txt index f33473c..f0630d9 100644 --- a/src/simple_timer/CMakeLists.txt +++ b/src/simple_timer/CMakeLists.txt @@ -1,51 +1,48 @@ -cmake_minimum_required(VERSION 3.8) -project(simple_timer VERSION 0.1.0) +cmake_minimum_required(VERSION 3.5) +project(simple_timer) -# Set C++ standards -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() -# Compiler options if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) endif() -# Find dependencies +# find dependencies find_package(ament_cmake REQUIRED) -find_package(rclcpp REQUIRED) +# uncomment the following section in order to fill in +# further dependencies manually. +# find_package( REQUIRED) -# Library targets -add_library(simple_timer - src/cycle_timer.cpp - src/period_timer.cpp -) +add_library(simple_timer src/cycle_timer.cpp src/period_timer.cpp) target_include_directories(simple_timer PUBLIC $ $ ) -ament_target_dependencies(simple_timer - rclcpp -) - -add_library(rt-sched - src/rt-sched.cpp -) +add_library(rt-sched src/rt-sched.cpp) target_include_directories(rt-sched PUBLIC $ $ ) -ament_target_dependencies(rt-sched - rclcpp -) -# Testing if(BUILD_TESTING) find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # uncomment the line when a copyright and license is not present in all source files + #set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # uncomment the line when this package is not in a git repo + #set(ament_cmake_cpplint_FOUND TRUE) ament_lint_auto_find_test_dependencies() endif() -# Installation install( DIRECTORY include/ DESTINATION include @@ -53,15 +50,12 @@ install( install( TARGETS simple_timer rt-sched - EXPORT export_${PROJECT_NAME} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION bin - INCLUDES DESTINATION include ) -# Export and package configuration ament_export_include_directories(include) -ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) -ament_export_dependencies(rclcpp) +ament_export_libraries(simple_timer) +ament_export_libraries(rt-sched) ament_package() diff --git a/src/simple_timer/include/simple_timer/cycle_timer.hpp b/src/simple_timer/include/simple_timer/cycle_timer.hpp index e2a47ea..556c8d4 100644 --- a/src/simple_timer/include/simple_timer/cycle_timer.hpp +++ b/src/simple_timer/include/simple_timer/cycle_timer.hpp @@ -3,23 +3,21 @@ #include #include "simple_timer/rt-sched.hpp" - -namespace simple_timer { - -class CycleTimer { -public: - explicit CycleTimer(long const start_delay = 0); - void tick(); - - u64 const start_delay_time; - u64 start_time = 0; - u64 last_cycle_time = 0; - unsigned long max_diff = 0; - unsigned long min_diff = 0; - unsigned long last_diff = 0; - bool recording = false; -}; - +namespace simple_timer +{ + class CycleTimer + { + public: + CycleTimer(long start_delay=0); + void tick() ; + const u64 start_delay_time; + u64 start_time = 0; + u64 last_cycle_time = 0; + unsigned long max_diff = 0; + unsigned long min_diff = 0; + unsigned long last_diff = 0; + bool recording = false; + }; } // namespace simple_timer #endif \ No newline at end of file diff --git a/src/simple_timer/include/simple_timer/period_timer.hpp b/src/simple_timer/include/simple_timer/period_timer.hpp index bdaccf4..bdc909c 100644 --- a/src/simple_timer/include/simple_timer/period_timer.hpp +++ b/src/simple_timer/include/simple_timer/period_timer.hpp @@ -3,24 +3,23 @@ #include #include "simple_timer/rt-sched.hpp" +namespace simple_timer +{ + class PeriodTimer + { + public: + PeriodTimer(long start_delay = 0); + void start(); + void stop(); + const u64 start_delay_time; + u64 start_time = 0; -namespace simple_timer { - -class PeriodTimer { -public: - explicit PeriodTimer(long const start_delay = 0); - void start(); - void stop(); - - u64 const start_delay_time; - u64 start_time = 0; - u64 last_period_time = 0; - unsigned long max_period = 0; - unsigned long min_period = 0; - unsigned long last_period = 0; - bool recording = false; -}; - + u64 last_period_time = 0; + unsigned long max_period = 0; + unsigned long min_period = 0; + unsigned long last_period = 0; + bool recording = false; + }; } // namespace simple_timer #endif \ No newline at end of file diff --git a/src/simple_timer/include/simple_timer/rt-sched.hpp b/src/simple_timer/include/simple_timer/rt-sched.hpp index 73ccce8..4f035b2 100644 --- a/src/simple_timer/include/simple_timer/rt-sched.hpp +++ b/src/simple_timer/include/simple_timer/rt-sched.hpp @@ -22,76 +22,77 @@ #endif #ifdef __x86_64__ -#define __NR_sched_setattr 314 -#define __NR_sched_getattr 315 +#define __NR_sched_setattr 314 +#define __NR_sched_getattr 315 #endif #ifdef __i386__ -#define __NR_sched_setattr 351 -#define __NR_sched_getattr 352 +#define __NR_sched_setattr 351 +#define __NR_sched_getattr 352 #endif #ifdef __arm__ #ifndef __NR_sched_setattr -#define __NR_sched_setattr 380 +#define __NR_sched_setattr 380 #endif #ifndef __NR_sched_getattr -#define __NR_sched_getattr 381 +#define __NR_sched_getattr 381 #endif #endif #ifdef __tilegx__ -#define __NR_sched_setattr 274 -#define __NR_sched_getattr 275 +#define __NR_sched_setattr 274 +#define __NR_sched_getattr 275 #endif typedef unsigned long long u64; #define NS_TO_MS 1000000 - struct sched_attr { - uint32_t size; - uint32_t sched_policy; - uint64_t sched_flags; + uint32_t size; + uint32_t sched_policy; + uint64_t sched_flags; - /* SCHED_NORMAL, SCHED_BATCH */ - int32_t sched_nice; + /* SCHED_NORMAL, SCHED_BATCH */ + int32_t sched_nice; - /* SCHED_FIFO, SCHED_RR */ - uint32_t sched_priority; + /* SCHED_FIFO, SCHED_RR */ + uint32_t sched_priority; - /* SCHED_DEADLINE */ - uint64_t sched_runtime; - uint64_t sched_deadline; - uint64_t sched_period; + /* SCHED_DEADLINE */ + uint64_t sched_runtime; + uint64_t sched_deadline; + uint64_t sched_period; }; int sched_setattr(pid_t pid, - sched_attr const* attr, - unsigned int flags); + const struct sched_attr *attr, + unsigned int flags); int sched_getattr(pid_t pid, - sched_attr* attr, - unsigned int size, - unsigned int flags); + struct sched_attr *attr, + unsigned int size, + unsigned int flags); u64 get_time_us(void); -typedef struct node_time_logger { - std::shared_ptr>> recorded_times; +typedef struct node_time_logger +{ + std::shared_ptr>> recorded_times; } node_time_logger; -void log_entry(node_time_logger logger, std::string const& text); +void log_entry(node_time_logger logger, std::string text); node_time_logger create_logger(); -inline u64 get_time_us(void) { - struct timespec ts; - u64 time; +inline u64 get_time_us(void) +{ + struct timespec ts; + unsigned long long time; - clock_gettime(CLOCK_MONOTONIC_RAW, &ts); - time = ts.tv_sec * 1000000; - time += ts.tv_nsec / 1000; + clock_gettime(CLOCK_MONOTONIC_RAW, &ts); + time = ts.tv_sec * 1000000; + time += ts.tv_nsec / 1000; - return time; + return time; } #endif /* __RT_SCHED_H__ */ diff --git a/src/simple_timer/package.xml b/src/simple_timer/package.xml index f0e1c00..f009c67 100644 --- a/src/simple_timer/package.xml +++ b/src/simple_timer/package.xml @@ -2,19 +2,13 @@ simple_timer - 0.1.0 - - ROS 2 package providing timer functionality for the dynamic executor experiments - - kurt - Apache License 2.0 + 0.0.0 + TODO: Package description + nvidia + TODO: License declaration ament_cmake - - rclcpp - - ament_lint_auto ament_lint_common diff --git a/src/simple_timer/src/cycle_timer.cpp b/src/simple_timer/src/cycle_timer.cpp index 4bd539a..143f13b 100644 --- a/src/simple_timer/src/cycle_timer.cpp +++ b/src/simple_timer/src/cycle_timer.cpp @@ -1,34 +1,43 @@ #include "simple_timer/cycle_timer.hpp" -namespace simple_timer { +namespace simple_timer +{ + CycleTimer::CycleTimer(long start_delay) : start_delay_time(start_delay * 1000) + { + } -CycleTimer::CycleTimer(long const start_delay) - : start_delay_time(start_delay * 1000) { -} - -void CycleTimer::tick() { - u64 const current_wall_time = get_time_us(); + void CycleTimer::tick() + { + u64 current_wall_time = get_time_us(); u64 time_diff = 0; - if (!recording) { - if (start_time == 0) { - start_time = current_wall_time; - } else if (current_wall_time - start_time > start_delay_time) { - recording = true; - last_cycle_time = current_wall_time; - start_time = current_wall_time; - } - } else { - time_diff = current_wall_time - last_cycle_time; - if (time_diff < min_diff || min_diff == 0) { - min_diff = time_diff; - } - if (time_diff > max_diff || max_diff == 0) { - max_diff = time_diff; - } - last_cycle_time = current_wall_time; - last_diff = time_diff; - } -} + if (!recording) + { -} // namespace simple_timer + if (start_time == 0) + { + start_time = current_wall_time; + } + else if (current_wall_time - start_time > start_delay_time) + { + recording = true; + last_cycle_time = current_wall_time; + start_time = current_wall_time; + } + } + else + { + time_diff = current_wall_time - last_cycle_time; + if (time_diff < min_diff || min_diff == 0) + { + min_diff = time_diff; + } + if (time_diff > max_diff || max_diff == 0) + { + max_diff = time_diff; + } + last_cycle_time = current_wall_time; + last_diff = time_diff; + } + } +} // namespace simple_timer diff --git a/src/simple_timer/src/period_timer.cpp b/src/simple_timer/src/period_timer.cpp index 1869270..9c9a9ce 100644 --- a/src/simple_timer/src/period_timer.cpp +++ b/src/simple_timer/src/period_timer.cpp @@ -1,43 +1,56 @@ + #include "simple_timer/period_timer.hpp" -namespace simple_timer { +namespace simple_timer +{ + PeriodTimer::PeriodTimer(long start_delay) : start_delay_time(start_delay * 1000) + { + } -PeriodTimer::PeriodTimer(long const start_delay) - : start_delay_time(start_delay * 1000) { -} + void PeriodTimer::start() + { + u64 current_wall_time = get_time_us(); -void PeriodTimer::start() { - u64 const current_wall_time = get_time_us(); + if (!recording) + { - if (!recording) { - if (start_time == 0) { - start_time = current_wall_time; - } else if (current_wall_time - start_time > start_delay_time) { - recording = true; - start_time = current_wall_time; + if (start_time == 0) + { + start_time = current_wall_time; + } + else if (current_wall_time - start_time > start_delay_time) + { + recording = true; + start_time = current_wall_time; + last_period_time = current_wall_time; + } + } + else + { last_period_time = current_wall_time; } - } else { - last_period_time = current_wall_time; } -} - -void PeriodTimer::stop() { - u64 const current_wall_time = get_time_us(); + void PeriodTimer::stop() + { + u64 current_wall_time = get_time_us(); u64 time_diff = 0; - if (!recording) { + if (!recording) + { return; } - - time_diff = current_wall_time - last_period_time; - if (time_diff < min_period || min_period == 0) { + else + { + time_diff = current_wall_time - last_period_time; + if (time_diff < min_period || min_period == 0) + { min_period = time_diff; - } - if (time_diff > max_period || max_period == 0) { + } + if (time_diff > max_period || max_period == 0) + { max_period = time_diff; + } + last_period = time_diff; } - last_period = time_diff; -} - -} // namespace simple_timer + } +} // namespace simple_timer diff --git a/src/simple_timer/src/rt-sched.cpp b/src/simple_timer/src/rt-sched.cpp index a6955c8..000c370 100644 --- a/src/simple_timer/src/rt-sched.cpp +++ b/src/simple_timer/src/rt-sched.cpp @@ -13,33 +13,37 @@ #include #include #include + #include "simple_timer/rt-sched.hpp" -int sched_setattr( - pid_t pid, - sched_attr const* attr, - unsigned int flags) { - return syscall(__NR_sched_setattr, pid, attr, flags); +int sched_setattr(pid_t pid, + const struct sched_attr *attr, + unsigned int flags) +{ + return syscall(__NR_sched_setattr, pid, attr, flags); } -int sched_getattr( - pid_t pid, - sched_attr* attr, - unsigned int size, - unsigned int flags) { - return syscall(__NR_sched_getattr, pid, attr, size, flags); +int sched_getattr(pid_t pid, + struct sched_attr *attr, + unsigned int size, + unsigned int flags) +{ + return syscall(__NR_sched_getattr, pid, attr, size, flags); } -void log_entry(node_time_logger logger, std::string const& text) { - if (logger.recorded_times != nullptr) { - logger.recorded_times->emplace_back(text, get_time_us() / 1000); +void log_entry(node_time_logger logger, std::string text) +{ + if (logger.recorded_times != nullptr) + { + logger.recorded_times->push_back(std::make_pair(text, get_time_us() / 1000)); // std::cout<>>(); return logger;