basic refactor

This commit is contained in:
Niklas Halle 2025-04-01 16:04:19 +02:00 committed by Niklas Halle
parent a6df3fcc41
commit abf3496c78
21 changed files with 13752 additions and 13232 deletions

74
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,74 @@
{
"files.associations": {
"cctype": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"csignal": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"cwctype": "cpp",
"array": "cpp",
"atomic": "cpp",
"strstream": "cpp",
"bit": "cpp",
"*.tcc": "cpp",
"bitset": "cpp",
"chrono": "cpp",
"compare": "cpp",
"complex": "cpp",
"concepts": "cpp",
"condition_variable": "cpp",
"cstdint": "cpp",
"deque": "cpp",
"list": "cpp",
"map": "cpp",
"set": "cpp",
"unordered_map": "cpp",
"unordered_set": "cpp",
"vector": "cpp",
"exception": "cpp",
"algorithm": "cpp",
"functional": "cpp",
"iterator": "cpp",
"memory": "cpp",
"memory_resource": "cpp",
"numeric": "cpp",
"optional": "cpp",
"random": "cpp",
"ratio": "cpp",
"string": "cpp",
"string_view": "cpp",
"system_error": "cpp",
"tuple": "cpp",
"type_traits": "cpp",
"utility": "cpp",
"fstream": "cpp",
"future": "cpp",
"initializer_list": "cpp",
"iomanip": "cpp",
"iosfwd": "cpp",
"iostream": "cpp",
"istream": "cpp",
"limits": "cpp",
"mutex": "cpp",
"new": "cpp",
"ostream": "cpp",
"ranges": "cpp",
"shared_mutex": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"stop_token": "cpp",
"streambuf": "cpp",
"thread": "cpp",
"cfenv": "cpp",
"cinttypes": "cpp",
"typeindex": "cpp",
"typeinfo": "cpp",
"variant": "cpp"
}
}

125
README.md
View file

@ -1,3 +1,124 @@
Experiment files for the RTIS ros_rt executor. Clone with submodules on, or add the executor to the src folder.
# Dynamic Priority Scheduling for ROS-EDF Executor
Run `ros2 run casestudy casestudy_example` then run the `analysis/analysis.ipynb` notebook.
This repository contains experiments for the ROS-EDF executor with priority-based and deadline-based scheduling capabilities.
## Usage
Clone with submodules enabled:
```
git clone --recurse-submodules https://github.com/user/ROS-Dynamic-Executor-Experiments.git
```
Make sure you have sourced your ROS 2 environment.
Run the example:
```
ros2 run casestudy casestudy_example
```
Then run the analysis notebook:
```
jupyter notebook analysis/analysis.ipynb
```
---
## Dynamically Adjusting Scheduler Approaches
The PriorityMemoryStrategy supports different scheduling policies for ROS callbacks. To extend it with dynamically adjusting scheduling that reacts to semantic events, there are several viable approaches:
### Approach A: Custom PriorityExecutableComparator
Replace the existing comparator with a custom implementation:
**Pros:**
- Direct control over scheduling logic
- Clean separation of concerns
- Can implement complex scheduling policies
- Doesn't require modifying deadline values
**Cons:**
- Requires understanding and maintaining the comparison logic
- May need to add new fields to PriorityExecutable to track dynamic priorities
- Could become complex if multiple factors affect priority
### Approach B: Dynamic Deadline Adjustment
Keep the EDF comparator but adjust deadlines based on priority logic:
**Pros:**
- Works within the existing EDF framework
- Conceptually simple - just manipulate deadline values
- Doesn't require changing the core sorting mechanism
- Easier to debug (you can log deadline changes)
**Cons:**
- Potentially confusing semantics (using deadlines to represent non-deadline priorities)
- May interfere with actual deadline-based requirements
- Could lead to instability if not carefully managed
### Approach C: Event-Driven Priority Field
Add a dynamic boost factor field to PriorityExecutable:
```cpp
int dynamic_priority_boost = 0;
```
Then modify the comparator to consider this value:
```cpp
if (p1->sched_type == DEADLINE) {
// First compare deadline as usual
// Then use dynamic_priority_boost as a tiebreaker
if (p1_deadline == p2_deadline) {
return p1->dynamic_priority_boost > p2->dynamic_priority_boost;
}
return p1_deadline < p2_deadline;
}
```
**Pros:**
- Clearer semantics than manipulating deadlines
- Keeps deadline scheduling intact for real-time guarantees
- Easy to adjust at runtime
**Cons:**
- Requires modifying both PriorityExecutable and comparison logic
### Approach D: Priority Multiplier System
Implement a system where chains can have priority multipliers applied:
```cpp
float priority_multiplier = 1.0f;
```
And then in the comparator:
```cpp
// For priority-based scheduling
int effective_p1 = p1->priority * p1->priority_multiplier;
int effective_p2 = p2->priority * p2->priority_multiplier;
return effective_p1 < effective_p2;
```
**Pros:**
- Scales existing priorities rather than replacing them
- Preserves relative importance within chains
- Intuitive model for temporary priority boosts
**Cons:**
- May need to handle overflow/boundary cases
- Requires careful tuning of multiplier ranges
## Recommendation
Approach C (Event-Driven Priority Field) offers the best balance of:
1. Clean semantics
2. Minimal interference with existing scheduling logic
3. Clear separation between baseline priorities and dynamic adjustments
4. Straightforward implementation
This approach maintains real-time guarantees while enabling dynamic behaviors based on semantic events.

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -1,95 +1,74 @@
cmake_minimum_required(VERSION 3.5)
project(casestudy)
cmake_minimum_required(VERSION 3.8)
project(casestudy VERSION 0.1.0)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
# Set C++ standards
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# Compiler options
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
# Find dependencies
find_package(ament_cmake REQUIRED)
# uncomment the following section in order to fill in
# further dependencies manually.
# find_package(<dependency> REQUIRED)
find_package(rclcpp REQUIRED)
find_package(priority_executor REQUIRED)
find_package(simple_timer REQUIRED)
find_package(rclcpp REQUIRED)
find_package(jsoncpp REQUIRED)
find_package(casestudy_tools REQUIRED)
# add_library(test_nodes src/test_nodes.cpp)
# target_include_directories(test_nodes PUBLIC
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include>
# )
# ament_target_dependencies(test_nodes
# rclcpp
# simple_timer
# casestudy_tools
# )
list(APPEND experiments
# Define experiment targets
set(EXPERIMENTS
casestudy_2023customfile_singlethread
casestudy_2024ours_latency
casestudy_2024ours_executor2executor
casestudy_example
)
# Function to create experiment targets
function(new_experiment experiment_name)
add_executable(${experiment_name} src/${experiment_name}.cpp)
target_include_directories(${experiment_name} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
)
# Add dependencies with ament
ament_target_dependencies(${experiment_name}
rclcpp
priority_executor
simple_timer
casestudy_tools
rclcpp
priority_executor
)
endfunction()
foreach(experiment ${experiments})
# Create all experiment targets
foreach(experiment ${EXPERIMENTS})
new_experiment(${experiment})
endforeach()
target_link_libraries(casestudy_2023customfile_singlethread
jsoncpp
# Special handling for targets with additional dependencies
target_link_libraries(casestudy_2023customfile_singlethread jsoncpp)
# Installation
install(
TARGETS ${EXPERIMENTS}
RUNTIME DESTINATION lib/${PROJECT_NAME}
)
install(TARGETS
${experiments}
DESTINATION lib/${PROJECT_NAME}
)
install(
DIRECTORY include/
DESTINATION include
)
# Testing
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
# Export and package configuration
ament_export_include_directories(include)
ament_package()

3
src/casestudy/include/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
# We don't have content in this dir yet, but want it in git anyways
# So we add a non-empty .gitignore file, explicitly not ignoring itself
!.gitignore

View file

@ -2,19 +2,25 @@
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>casestudy</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="vboxuser@todo.todo">vboxuser</maintainer>
<license>TODO: License declaration</license>
<version>0.1.0</version>
<description>
ROS 2 case study package for evaluating different executor implementations and timing behaviors
</description>
<maintainer email="kurt4wilson@gmail.com">kurt</maintainer>
<license>Apache License 2.0</license>
<buildtool_depend>ament_cmake</buildtool_depend>
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<!-- Build Dependencies -->
<depend>rclcpp</depend>
<depend>priority_executor</depend>
<depend>simple_timer</depend>
<depend>casestudy_tools</depend>
<depend>jsoncpp</depend>
<!-- Test Dependencies -->
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export>
<build_type>ament_cmake</build_type>

View file

@ -2,92 +2,88 @@
#include "priority_executor/priority_executor.hpp"
#include "priority_executor/priority_memory_strategy.hpp"
#include "casestudy_tools/test_nodes.hpp"
#include <string>
#include <fstream>
#include <unistd.h>
#include "std_msgs/msg/string.hpp"
#include "casestudy_tools/primes_workload.hpp"
#include <sys/prctl.h>
#include "casestudy_tools/primes_workload.hpp"
#include "casestudy_tools/experiment.hpp"
#include "jsoncpp/json/reader.h"
#include <string>
#include <fstream>
#include <unistd.h>
#include <sys/prctl.h>
// bool should_do_task = true;
std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, int idx)
{
void run_one_executor(
std::function<void(std::atomic<bool>&)> const& exec_fun,
int const idx) {
(void)idx;
// set this process to highest priority
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
int result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0)
{
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl;
int const result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result
<< ": " << strerror(errno) << std::endl;
}
// set process name
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
}
int ros_experiment(int argc, char **arg, std::string file_name, ExperimentConfig config)
{
int ros_experiment(
int const argc,
char** const argv,
std::string const& file_name,
ExperimentConfig config) {
rclcpp::init(argc, argv);
rclcpp::init(argc, arg);
// we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node");
auto const node = rclcpp::Node::make_shared("experiment_node");
node->declare_parameter("executor_type", "edf");
std::string executor_type;
node->get_parameter("executor_type", executor_type);
std::cout << "using executor type: " << executor_type << std::endl;
// TODO: move experiment configuration to main
config.executor_type = executor_type;
config.nodes.push_back(node);
Experiment experiment(config);
// std::string result_log = experiment.run(should_do_task);
std::vector<std::function<void(std::atomic<bool> &)>> exec_funs = experiment.getRunFunctions();
auto const exec_funs = experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs)
{
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
for (auto const& exec_fun : exec_funs) {
exec_threads.emplace_back(run_one_executor, exec_fun, i++);
}
for (auto &t : exec_threads)
{
for (auto& t : exec_threads) {
t.join();
}
experiment.writeLogsToFile(file_name, "2023customfile_singlethread");
return 0;
}
int main(int argc, char *argv[])
{
std::ifstream input_file("taskset.json"); // Replace with actual filename
int main(int argc, char* argv[]) {
std::ifstream input_file("taskset.json");
if (!input_file.is_open())
{
if (!input_file.is_open()) {
std::cerr << "Error opening file!" << std::endl;
return 1;
}
Json::Value root;
Json::Reader reader;
bool parsingSuccessful = reader.parse(input_file, root);
bool const parsingSuccessful = reader.parse(input_file, root);
if (!parsingSuccessful)
{
if (!parsingSuccessful) {
std::cerr << "Error parsing file!" << std::endl;
return 1;
}
@ -95,100 +91,74 @@ int main(int argc, char *argv[])
// set ourselves to the second highest priority
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
int result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0)
{
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl;
int const result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result
<< ": " << strerror(errno) << std::endl;
}
// calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
// config.chain_lengths = {4, 3, 4};
for (uint i = 0; i < root["chain_lengths"].size(); i++)
{
for (uint i = 0; i < root["chain_lengths"].size(); i++) {
config.chain_lengths.push_back(root["chain_lengths"][i].asUInt());
}
// config.node_ids = {{0, 1, 2, 3}, {4, 5, 6}, {7, 8, 9, 10}};
for (uint i = 0; i < root["node_ids"].size(); i++)
{
for (uint i = 0; i < root["node_ids"].size(); i++) {
std::vector<uint32_t> node_ids_row;
for (uint j = 0; j < root["node_ids"][i].size(); j++)
{
for (uint j = 0; j < root["node_ids"][i].size(); j++) {
node_ids_row.push_back(root["node_ids"][i][j].asUInt());
}
config.node_ids.push_back(node_ids_row);
config.node_ids.push_back(std::move(node_ids_row));
}
// config.node_priorities = {3, 2, 1, 0, 6, 5, 4, 10, 9, 8, 7}; // PICAS-style prioritiesA
for (uint i = 0; i < root["node_priorities"].size(); i++)
{
for (uint i = 0; i < root["node_priorities"].size(); i++) {
config.node_priorities.push_back(root["node_priorities"][i].asUInt());
}
// disabled for custom
config.num_groups = 0;
// config.group_memberships = {0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 2};
// config.chain_timer_control = {0, 1, 2};
for (uint i = 0; i < root["chain_timer_control"].size(); i++)
{
for (uint i = 0; i < root["chain_timer_control"].size(); i++) {
config.chain_timer_control.push_back(root["chain_timer_control"][i].asInt());
}
// config.node_runtimes = {5, 5, 5, 5, // chain 1
// 10, 20, 5, // chain 2
// 10, 10, 15, 20}; // chain 3
for (uint i = 0; i < root["node_runtimes"].size(); i++)
{
for (uint i = 0; i < root["node_runtimes"].size(); i++) {
config.node_runtimes.push_back(root["node_runtimes"][i].asDouble());
}
// config.chain_periods = {50, 100, 110};
for (uint i = 0; i < root["chain_periods"].size(); i++)
{
for (uint i = 0; i < root["chain_periods"].size(); i++) {
config.chain_periods.push_back(root["chain_periods"][i].asInt());
}
// config.node_executor_assignments = {}; // empty - multicore mode
for (uint i = 0; i < root["node_executor_assignments"].size(); i++)
{
for (uint i = 0; i < root["node_executor_assignments"].size(); i++) {
config.node_executor_assignments.push_back(root["node_executor_assignments"][i].asUInt() - 1);
}
// single threaded mode - assign executors to cpu cores
for (uint i = 0; i < root["executor_to_cpu_core"].size(); i++)
{
for (uint i = 0; i < root["executor_to_cpu_core"].size(); i++) {
config.executor_to_cpu_assignments.push_back(root["executor_to_cpu_core"][i].asUInt() - 1);
}
config.parallel_mode = false;
config.cores = 4;
// sanity_check_config(config);
int const i = (100 - 35) / 5;
int const ros_budget = 35 + i * 5;
int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
// from 35/65 to 100/0 in increments of 5
// for (int i = 0; i <= (100 - 35) / 5; i++)
// {
// for now, run with 100% ros
int i = (100 - 35) / 5;
int ros_budget = 35 + i * 5;
int other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget << " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread
should_do_task.store(true);
double other_task_time = 80 * other_task_budget / 100.0;
double const other_task_time = 80 * other_task_budget / 100.0;
std::thread other_task(do_other_task, other_task_time, 80, std::ref(should_do_task));
// do ros task
std::string file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name);
std::string const file_name = "ros_" + std::to_string(ros_budget);
std::thread ros_task(ros_experiment, argc, argv, file_name, config);
std::cout << "tasks started" << std::endl;
ros_task.join();
other_task.join();
std::cout << "tasks joined" << std::endl;
// // run once
// break;
// }
return 0;
}

View file

@ -7,8 +7,9 @@
// bool should_do_task = true;
std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
int idx) {
void run_one_executor(
std::function<void(std::atomic<bool>&)> const& exec_fun,
int const idx) {
(void)idx;
// set this process to highest priority
// struct sched_param param;
@ -27,7 +28,8 @@ void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
rt_attr.sched_policy = SCHED_FIFO;
rt_attr.sched_runtime = 0;
rt_attr.sched_period = 0;
int result = sched_setattr(0, &rt_attr, 0);
int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "executor task: could not set scheduler: " << result << ": "
<< strerror(errno) << std::endl;
@ -35,11 +37,12 @@ void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
// set process name
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
}
int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
int ros_experiment(
rclcpp::Node::SharedPtr const node,
std::string const& file_name,
ExperimentConfig config) {
std::string executor_type;
@ -56,24 +59,23 @@ int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
Experiment experiment(config);
// std::string result_log = experiment.run(should_do_task);
std::vector<std::function<void(std::atomic<bool> &)>> exec_funs =
experiment.getRunFunctions();
auto const exec_funs = experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs) {
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
for (auto const& exec_fun : exec_funs) {
exec_threads.emplace_back(run_one_executor, exec_fun, i++);
}
for (auto& t : exec_threads) {
t.join();
}
std::string outputname = "2024ours_executor2executor";
outputname +=
"_" + std::to_string(num_subscribers) + "_" + std::to_string(num_chains);
std::string outputname = "2024ours_executor2executor_" +
std::to_string(num_subscribers) + "_" +
std::to_string(num_chains);
experiment.writeLogsToFile(file_name, outputname);
return 0;
@ -82,7 +84,7 @@ int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
int main(int argc, char* argv[]) {
rclcpp::init(argc, argv);
// we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node");
auto const node = rclcpp::Node::make_shared("experiment_node");
// auto node = rclcpp::Node::make_shared("experiment_node",
// rclcpp::NodeOptions().use_intra_process_comms(true));
node->declare_parameter("executor_type", "edf");
@ -106,7 +108,7 @@ int main(int argc, char *argv[]) {
rt_attr.sched_policy = SCHED_FIFO;
// rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
rt_attr.sched_priority = 99;
int result = sched_setattr(0, &rt_attr, 0);
int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": "
@ -141,12 +143,10 @@ int main(int argc, char *argv[]) {
}
for (int i = 0; i < num_s + 1; i++) {
config.node_priorities.push_back(i);
config.node_runtimes.push_back(runtime);
}
config.chain_lengths.push_back(num_s+1);
config.chain_periods.push_back(10);
for (int i = 0; i < num_s + 1; i++) {
config.node_runtimes.push_back(runtime);
}
config.chain_timer_control.push_back(c);
}
@ -174,16 +174,16 @@ int main(int argc, char *argv[]) {
// for (int i = 0; i <= (100 - 35) / 5; i++)
// {
// for now, run with 100% ros
int i = (100 - 35) / 5;
int ros_budget = 35 + i * 5;
int other_task_budget = 65 - i * 5;
int const i = (100 - 35) / 5;
int const ros_budget = 35 + i * 5;
int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread
should_do_task.store(true);
// do ros task
std::string file_name = "ros_" + std::to_string(ros_budget);
std::string const file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name);
std::thread ros_task(ros_experiment, node, file_name, config);
std::cout << "tasks started" << std::endl;
@ -193,4 +193,5 @@ int main(int argc, char *argv[]) {
// // run once
// break;
// }
return 0;
}

View file

@ -7,9 +7,8 @@
// bool should_do_task = true;
std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
int idx) {
void run_one_executor(std::function<void(std::atomic<bool>&)> const& exec_fun,
int const idx) {
(void)idx;
// set this process to highest priority
// struct sched_param param;
@ -28,7 +27,7 @@ void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
rt_attr.sched_policy = SCHED_FIFO;
rt_attr.sched_runtime = 0;
rt_attr.sched_period = 0;
int result = sched_setattr(0, &rt_attr, 0);
int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "executor task: could not set scheduler: " << result << ": "
<< strerror(errno) << std::endl;
@ -40,9 +39,8 @@ void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun,
exec_fun(should_do_task);
}
int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
int ros_experiment(rclcpp::Node::SharedPtr const& node, std::string const& file_name,
ExperimentConfig config) {
std::string executor_type;
node->get_parameter("executor_type", executor_type);
int num_subscribers;
@ -63,7 +61,7 @@ int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs) {
for (auto const& exec_fun : exec_funs) {
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
}
@ -72,9 +70,8 @@ int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
t.join();
}
std::string outputname = "2024ours_latency";
outputname +=
"_" + std::to_string(num_subscribers) + "_" + std::to_string(num_chains);
std::string const outputname = "2024ours_latency_" +
std::to_string(num_subscribers) + "_" + std::to_string(num_chains);
experiment.writeLogsToFile(file_name, outputname);
return 0;
@ -83,7 +80,7 @@ int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name,
int main(int argc, char* argv[]) {
rclcpp::init(argc, argv);
// we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node");
auto const node = rclcpp::Node::make_shared("experiment_node");
// auto node = rclcpp::Node::make_shared("experiment_node",
// rclcpp::NodeOptions().use_intra_process_comms(true));
node->declare_parameter("executor_type", "edf");
@ -107,7 +104,7 @@ int main(int argc, char *argv[]) {
rt_attr.sched_policy = SCHED_FIFO;
// rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
rt_attr.sched_priority = 99;
int result = sched_setattr(0, &rt_attr, 0);
int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": "
@ -150,7 +147,6 @@ int main(int argc, char *argv[]) {
std::cout << std::endl;
}
config.num_groups = 0;
config.group_memberships = {}; // no groups
@ -166,16 +162,16 @@ int main(int argc, char *argv[]) {
// for (int i = 0; i <= (100 - 35) / 5; i++)
// {
// for now, run with 100% ros
int i = (100 - 35) / 5;
int ros_budget = 35 + i * 5;
int other_task_budget = 65 - i * 5;
int const i = (100 - 35) / 5;
int const ros_budget = 35 + i * 5;
int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread
should_do_task.store(true);
// do ros task
std::string file_name = "ros_" + std::to_string(ros_budget);
std::string const file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name);
std::thread ros_task(ros_experiment, node, file_name, config);
std::cout << "tasks started" << std::endl;
@ -185,4 +181,5 @@ int main(int argc, char *argv[]) {
// // run once
// break;
// }
return 0;
}

View file

@ -3,13 +3,10 @@
#include <string>
#include <fstream>
#include <iostream>
#include <unistd.h>
#include <sys/prctl.h>
#include <rclcpp/rclcpp.hpp>
#include <std_msgs/msg/string.hpp>
#include "casestudy_tools/test_nodes.hpp"
#include "casestudy_tools/experiment.hpp"
#include "casestudy_tools/primes_workload.hpp"
@ -19,8 +16,9 @@
// bool should_do_task = true;
std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, int idx)
{
void run_one_executor(
std::function<void(std::atomic<bool>&)> const& exec_fun,
int const idx) {
(void)idx;
// cpu_set_t cpuset;
// CPU_ZERO(&cpuset);
@ -38,32 +36,37 @@ void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, int idx
// set this process to second highest priority
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
int result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0)
{
std::cerr << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl;
std::cerr << "This operation requires root privileges. Please run the program with sufficient permissions." << std::endl;
int const result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0) {
std::cerr << "ros_experiment: sched_setscheduler failed: " << result
<< ": " << strerror(errno) << std::endl;
std::cerr << "This operation requires root privileges. Please run the program with sufficient permissions."
<< std::endl;
exit(EXIT_FAILURE);
}
// Set the process name to "ros_experiment" for easier identification during debugging or system monitoring.
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
}
int ros_experiment(int argc, char **arg, std::string file_name, ExperimentConfig config)
{
int ros_experiment(
int const argc,
char** const argv,
std::string const& file_name,
ExperimentConfig config) {
rclcpp::init(argc, argv);
rclcpp::init(argc, arg);
// we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_parameters");
auto const node = rclcpp::Node::make_shared("experiment_parameters");
node->declare_parameter("executor_type", "edf");
std::string executor_type;
node->get_parameter("executor_type", executor_type);
std::cout << "using executor type: " << executor_type << std::endl;
// TODO: move experiment configuration to main
config.executor_type = executor_type;
// Enable parallel mode for the experiment configuration.
// When set to true, this allows multiple executors to run concurrently,
@ -73,29 +76,28 @@ int ros_experiment(int argc, char **arg, std::string file_name, ExperimentConfig
Experiment experiment(config);
// std::string result_log = experiment.run(should_do_task);
std::vector<std::function<void(std::atomic<bool> &)> > exec_funs = experiment.getRunFunctions();
auto const exec_funs = experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs)
{
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
for (auto const& exec_fun : exec_funs) {
exec_threads.emplace_back(run_one_executor, exec_fun, i++);
}
for (auto &t : exec_threads)
{
for (auto& t : exec_threads) {
t.join();
}
std::string outputname = "casestudy_example";
std::string const outputname = "casestudy_example";
experiment.writeLogsToFile(file_name, outputname);
return 0;
}
int main(int argc, char *argv[])
{
int main(int argc, char* argv[]) {
// calibrate the dummy load for the current system
calibrate_dummy_load();
@ -115,20 +117,18 @@ int main(int argc, char *argv[])
sanity_check_config(config);
std::thread ros_task([&]() {
try
{
try {
ros_experiment(argc, argv, "cs_example", config);
}
catch (const std::exception &e)
{
} catch (std::exception const& e) {
std::cerr << "Exception in ros_experiment: " << e.what() << std::endl;
}
catch (...)
{
} catch (...) {
std::cerr << "Unknown exception in ros_experiment." << std::endl;
}
});
std::cout << "tasks started" << std::endl;
ros_task.join();
std::cout << "tasks joined" << std::endl;
return 0;
}

View file

@ -1,38 +1,26 @@
cmake_minimum_required(VERSION 3.5)
project(casestudy_tools)
cmake_minimum_required(VERSION 3.8)
project(casestudy_tools VERSION 0.1.0)
# Default to C99
if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
# Set C++ standards
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# Compiler options
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# find dependencies
# Find dependencies
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(priority_executor REQUIRED)
find_package(simple_timer REQUIRED)
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
add_library(primes_workload src/primes_workload.cpp)
# Library targets
add_library(primes_workload
src/primes_workload.cpp
)
target_include_directories(primes_workload PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
@ -41,7 +29,6 @@ ament_target_dependencies(primes_workload
simple_timer
rclcpp
priority_executor
)
add_library(test_nodes src/test_nodes.cpp)
@ -54,8 +41,9 @@ ament_target_dependencies(test_nodes
rclcpp
simple_timer
)
add_library(experiment_host src/experiment.cpp)
add_library(experiment_host
src/experiment.cpp
)
target_include_directories(experiment_host PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include>
@ -70,7 +58,13 @@ ament_target_dependencies(experiment_host
priority_executor
)
# Testing
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
endif()
# Installation
install(
DIRECTORY include/
DESTINATION include
@ -85,6 +79,7 @@ install(
INCLUDES DESTINATION include
)
# Export and package configuration
ament_export_include_directories(include)
# ament_export_libraries(primes_workload test_nodes experiment_host)
ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET)

View file

@ -8,9 +8,7 @@
#include "casestudy_tools/test_nodes.hpp"
#include "simple_timer/rt-sched.hpp"
struct ExperimentConfig
{
struct ExperimentConfig {
std::vector<uint32_t> chain_lengths;
// TODO: rename to callback_ids
std::vector<std::vector<uint32_t>> node_ids;
@ -38,103 +36,101 @@ struct ExperimentConfig
bool parallel_mode = false;
size_t cores = 2;
// rclcpp::Node::SharedPtr node;
// nodes to host the callbacks. In parallel (one executor using multiple threads) mode, there is one node and one executor. The node is provided by the experiment provider
// in singlethread (each executor uses one thread), there is one node per executor. Additional nodes may be created by the experiment host
// nodes to host the callbacks. In parallel (one executor using multiple threads) mode,
// there is one node and one executor. The node is provided by the experiment provider.
// In singlethread (each executor uses one thread), there is one node per executor.
// Additional nodes may be created by the experiment host.
std::vector<rclcpp::Node::SharedPtr> nodes;
std::vector<std::string> special_cases = {};
std::vector<std::string> special_cases;
};
struct experiment_executor
{
std::shared_ptr<timed_executor::TimedExecutor> executor;
std::shared_ptr<PriorityMemoryStrategy<>> strat;
struct experiment_executor {
std::shared_ptr<priority_executor::TimedExecutor> executor;
std::shared_ptr<priority_executor::PriorityMemoryStrategy<>> strat;
std::shared_ptr<rclcpp::Executor> default_executor;
};
class Experiment
{
class Experiment {
public:
Experiment(ExperimentConfig config);
explicit Experiment(ExperimentConfig config);
/**
* @brief Run the experiment
* runs in the current thread
* @param should_do_task a flag that can be set to false to stop the experiment. The flag will be set to false when the experiment is complete.
* @brief Run the experiment in the current thread
* @param should_do_task a flag that can be set to false to stop the experiment.
* The flag will be set to false when the experiment is complete.
* @return std::string containing experiment results
*/
std::string run(std::atomic<bool>& should_do_task);
/**
* @brief Get the functions required to run the experiment
* returns a vector of functions. Each function will run a different executor. It's up to the caller to run these functions in different threads.
* @return vector of functions. Each function will run a different executor.
* It's up to the caller to run these functions in different threads.
* The caller may set the thread properties (e.g. priority) before running the function.
* Each function accepts a flag that can be set to false to stop the experiment. The flag will be set to false when the experiment is complete.
* Each function accepts a flag that can be set to false to stop the experiment.
* The flag will be set to false when the experiment is complete.
*/
std::vector<std::function<void(std::atomic<bool>&)>> getRunFunctions();
static size_t numExecutorsRequired(const ExperimentConfig &config);
static size_t numExecutorsRequired(ExperimentConfig const &config);
/**
* get the executor (and strategy) for a given executor index (0 by default)
* @brief Get the executor (and strategy) for a given executor index
* @param executor_idx The index of the executor (defaults to 0)
* @return experiment_executor
*/
experiment_executor getExecutor(int executor_idx = 0);
node_time_logger getInternalLogger();
/**
* collect logs from the executors. Also add any logs from passed in loggers
* @param optional array of loggers to add to the logs
* @brief Collect logs from the executors and optionally add logs from passed in loggers
* @param loggers Optional array of loggers to add to the logs
* @return std::string containing the combined logs
*/
std::string buildLogs(std::vector<node_time_logger> loggers = {});
void resetTimers();
void writeLogsToFile(std::string filename, std::string experiment_name);
void writeLogsToFile(std::string const &filename, std::string const &experiment_name);
private:
ExperimentConfig config;
std::vector<experiment_executor> executors;
// values to keep track of runtime values
// chain-indexed vector of deques of deadlines
// Chain-indexed vector of deques of deadlines
std::vector<std::deque<uint64_t>*> chain_deadlines_deque;
// maps node_id to node
// Maps node_id to node
std::map<uint32_t, std::shared_ptr<CaseStudyNode>> nodes;
// maps chain_id to timer
// Maps chain_id to timer
std::map<uint32_t, std::shared_ptr<rclcpp::TimerBase>> chain_timer_handles;
/**
* @brief Create the executors using the type and amount set in the config
*
*/
void createExecutors();
/**
* @brief Create a single executor using the type set in the config
*
* @param executor_num The executor number to create
* @return experiment_executor
*/
experiment_executor createSingleExecutor(uint executor_num = 0);
/**
* @brief Get the executor for a given node id
* @param node_id if the node_executor_assignments is empty, then this parameter is ignored, and the first executor is returned
* @param node_id If node_executor_assignments is empty, this parameter is ignored
* and the first executor is returned
* @return experiment_executor
*/
experiment_executor getExecutorForNode(int node_id = 0);
void createNodesAndAssignProperties();
void setInitialDeadlines();
node_time_logger _logger;
};
int do_other_task(double work_time, uint period, std::atomic<bool>& should_do_task);
void sanity_check_config(ExperimentConfig config);
void sanity_check_config(ExperimentConfig const &config);
#endif // RTIS_EXPERIMENT_HOST

View file

@ -22,7 +22,7 @@ namespace DummyLoadCalibration
{
inline std::atomic<int> calibration{1};
inline void setCalibration(int calib);
inline void setCalibration(int const calib);
inline int getCalibration();
}
@ -39,6 +39,6 @@ void calibrate_dummy_load();
* This function performs a busy wait for a specified duration.
* It is used to calibrate the dummy load for the current system.
*/
void dummy_load(int load_ms);
void dummy_load(int const load_ms);
#endif

View file

@ -1,21 +1,21 @@
#ifndef RTIS_TEST_NODES
#define RTIS_TEST_NODES
#include <string>
#include "rclcpp/rclcpp.hpp"
#include "std_msgs/msg/string.hpp"
#include "std_msgs/msg/int32.hpp"
#include "simple_timer/rt-sched.hpp"
class CaseStudyNode
{
public: node_time_logger logger;
class CaseStudyNode {
public:
node_time_logger logger;
// inherit constructor
CaseStudyNode(std::string publish_topic, rclcpp::Node::SharedPtr node)
{
CaseStudyNode(std::string const &publish_topic, rclcpp::Node::SharedPtr const &node) {
this->name = publish_topic;
this->node = node;
// seed random number generator
srand(time(NULL));
srand(time(nullptr));
}
bool use_random_runtime = false;
@ -34,7 +34,9 @@ class CaseStudyNode
class PublisherNode : public CaseStudyNode {
public:
PublisherNode(std::string publish_topic, double runtime, int period, int chain, rclcpp::Node::SharedPtr node, rclcpp::CallbackGroup::SharedPtr callback_group = nullptr);
PublisherNode(std::string const &publish_topic, double runtime, int period, int chain,
rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group = nullptr);
rclcpp::TimerBase::SharedPtr timer_;
int count_max_ = COUNT_MAX;
@ -47,7 +49,10 @@ class PublisherNode : public CaseStudyNode{
class WorkerNode : public CaseStudyNode {
public:
WorkerNode(std::string subscribe_topic, std::string publish_topic, double runtime, int period, int chain, rclcpp::Node::SharedPtr node, rclcpp::CallbackGroup::SharedPtr callback_group = nullptr);
WorkerNode(std::string const &subscribe_topic, std::string const &publish_topic,
double runtime, int period, int chain,
rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group = nullptr);
rclcpp::Subscription<std_msgs::msg::Int32>::SharedPtr sub_;
int count_max_ = COUNT_MAX;

View file

@ -2,10 +2,12 @@
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>casestudy_tools</name>
<version>0.0.0</version>
<description>TODO: Package description</description>
<maintainer email="kurt4wilson@gmail.com">vboxuser</maintainer>
<license>TODO: License declaration</license>
<version>0.1.0</version>
<description>
ROS 2 package providing utility tools and common functionality for dynamic executor case studies
</description>
<maintainer email="kurt4wilson@gmail.com">kurt</maintainer>
<license>Apache License 2.0</license>
<buildtool_depend>ament_cmake</buildtool_depend>

View file

@ -13,11 +13,12 @@
#include <sys/prctl.h>
#include <vector>
Experiment::Experiment(ExperimentConfig config) : config(config) {
Experiment::Experiment(ExperimentConfig config)
: config(std::move(config)) {
_logger = create_logger();
}
void sanity_check_config(ExperimentConfig config) {
void sanity_check_config(ExperimentConfig const &config) {
// make sure chain_lengths and node_ids are the same size
if (config.chain_lengths.size() != config.node_ids.size()) {
std::cout << "ros_experiment: chain_lengths.size()= "
@ -25,6 +26,7 @@ void sanity_check_config(ExperimentConfig config) {
<< " != node_ids.size()= " << config.node_ids.size() << std::endl;
exit(1);
}
// make sure each chain_lengths is the same size as the corresponding node_ids
for (uint32_t i = 0; i < config.chain_lengths.size(); i++) {
if (config.chain_lengths[i] != config.node_ids[i].size()) {
@ -34,6 +36,7 @@ void sanity_check_config(ExperimentConfig config) {
exit(1);
}
}
// make sure chain_timer_control is the same size as chain_lengths
if (config.chain_timer_control.size() != config.chain_lengths.size()) {
std::cout << "ros_experiment: chain_timer_control.size()= "
@ -42,12 +45,14 @@ void sanity_check_config(ExperimentConfig config) {
<< std::endl;
exit(1);
}
std::set<int> all_node_ids = std::set<int>();
for (uint32_t i = 0; i < config.node_ids.size(); i++) {
for (uint32_t j = 0; j < config.node_ids[i].size(); j++) {
all_node_ids.insert(config.node_ids[i][j]);
std::set<int> all_node_ids;
for (auto const &chain : config.node_ids) {
for (auto const &node_id : chain) {
all_node_ids.insert(node_id);
}
}
// make sure we have the right number of node_priorities and node_runtimes
if (all_node_ids.size() != config.node_priorities.size()) {
std::cout << "ros_experiment: all_node_ids.size()= " << all_node_ids.size()
@ -66,47 +71,43 @@ void sanity_check_config(ExperimentConfig config) {
std::string Experiment::run(std::atomic<bool>& should_do_task) {
// TODO: split into setup and run, so that run can be re-used in
// Experiment::getRunFunctions
if (config.node_executor_assignments.size() != 0 ||
numExecutorsRequired(config) != 1) {
std::cerr
<< "called Experiment::run with node_executor_assignments.size() != 0"
if (!config.node_executor_assignments.empty()
|| numExecutorsRequired(config) != 1) {
std::cerr << "called Experiment::run with non-empty node_executor_assignments"
<< std::endl;
return "";
}
createExecutors();
createNodesAndAssignProperties();
setInitialDeadlines();
// reset all timers
resetTimers();
// in this function (run), there is only one executor, so retrieve it now
// In this function (run), there is only one executor, so retrieve it now
experiment_executor executor = getExecutorForNode();
if (config.executor_type == "edf" || config.executor_type == "picas") {
executor.executor->spin();
} else if (config.executor_type == "default") {
executor.default_executor->spin();
}
std::cout << "spin done" << std::endl;
should_do_task.store(false);
return buildLogs();
}
std::vector<std::function<void(std::atomic<bool>&)>>
Experiment::getRunFunctions() {
// do pre-run setup
// Do pre-run setup
createExecutors();
createNodesAndAssignProperties();
// create a function for each executor
// Create a function for each executor
std::vector<std::function<void(std::atomic<bool>&)>> run_functions;
for (size_t i = 0; i < numExecutorsRequired(config); i++) {
// get the executor for this function
experiment_executor executor = executors[i];
run_functions.push_back([this,
executor](std::atomic<bool> &should_do_task) {
experiment_executor const executor = executors[i];
run_functions.emplace_back(
[this, executor](std::atomic<bool>& should_do_task) {
if (config.executor_type == "edf" || config.executor_type == "picas") {
executor.executor->spin();
} else if (config.executor_type == "default") {
@ -117,92 +118,90 @@ Experiment::getRunFunctions() {
});
}
// do these last, since they're time-sensitive
// Do these last since they're time-sensitive
setInitialDeadlines();
// resetTimers();
return run_functions;
}
size_t Experiment::numExecutorsRequired(const ExperimentConfig &config) {
// get the number of unique values in the node_executor_assignments
if (config.node_executor_assignments.size() == 0) {
size_t Experiment::numExecutorsRequired(ExperimentConfig const &config) {
if (config.node_executor_assignments.empty()) {
return 1;
}
std::set<int> unique_executors;
for (auto &assignment : config.node_executor_assignments) {
for (auto const &assignment : config.node_executor_assignments) {
unique_executors.insert(assignment);
}
return unique_executors.size();
}
experiment_executor Experiment::getExecutor(int executor_idx) {
experiment_executor Experiment::getExecutor(int const executor_idx) {
return executors[executor_idx];
}
void Experiment::createExecutors() {
if (config.node_executor_assignments.size() == 0) {
// create a single executor
if (config.node_executor_assignments.empty()) {
// Create a single executor
executors.push_back(createSingleExecutor(0));
return;
}
// Check configuration validity
// depending on config.executor_to_cpu_assignments and config.parallel_mode,
// we may need to create the host nodes
if (config.executor_to_cpu_assignments.size() != 0 && config.parallel_mode) {
if (!config.executor_to_cpu_assignments.empty() && config.parallel_mode) {
std::cerr << "executor_to_cpu_assignments not supported for parallel mode"
<< std::endl;
}
if (config.executor_to_cpu_assignments.size() != 0 &&
config.executor_to_cpu_assignments.size() !=
numExecutorsRequired(config)) {
std::cerr
<< "executor_to_cpu_assignments.size() != numExecutorsRequired(config)"
if (!config.executor_to_cpu_assignments.empty()
&& config.executor_to_cpu_assignments.size() != numExecutorsRequired(config)) {
std::cerr << "executor_to_cpu_assignments.size() != numExecutorsRequired(config)"
<< std::endl;
exit(1);
}
// create the required host nodes - re-use any existing nodes
// Create the required host nodes - re-use any existing nodes
for (uint i = config.nodes.size();
i < config.executor_to_cpu_assignments.size(); i++) {
// create a node for each executor
rclcpp::Node::SharedPtr node =
std::make_shared<rclcpp::Node>("node_" + std::to_string(i));
auto const node = std::make_shared<rclcpp::Node>(
"node_" + std::to_string(i));
config.nodes.push_back(node);
}
std::cout << "created " << config.nodes.size() << " nodes" << std::endl;
int num_executors = numExecutorsRequired(config);
int const num_executors = numExecutorsRequired(config);
std::cout << "creating " << num_executors << " executors" << std::endl;
for (int i = 0; i < num_executors; i++) {
executors.push_back(createSingleExecutor(i));
}
}
experiment_executor Experiment::createSingleExecutor(uint executor_num) {
experiment_executor Experiment::createSingleExecutor(uint const executor_num) {
experiment_executor executor = {nullptr, nullptr, nullptr};
if (config.executor_type == "edf" || config.executor_type == "picas") {
executor.strat = std::make_shared<PriorityMemoryStrategy<>>();
executor.strat = std::make_shared<priority_executor::PriorityMemoryStrategy<>>();
rclcpp::ExecutorOptions options;
options.memory_strategy = executor.strat;
if (config.parallel_mode) {
executor.executor =
std::make_shared<timed_executor::MultithreadTimedExecutor>(
executor.executor = std::make_shared<priority_executor::MultithreadTimedExecutor>(
options, "priority_executor", config.cores);
} else {
executor.executor =
std::make_shared<timed_executor::TimedExecutor>(options);
executor.executor = std::make_shared<priority_executor::TimedExecutor>(options);
}
executor.executor->prio_memory_strategy_ = executor.strat;
if (config.node_executor_assignments.size() == 0) {
// add all nodes to the executor
for (auto &node : config.nodes) {
if (config.node_executor_assignments.empty()) {
// Add all nodes to the executor
for (auto const &node : config.nodes) {
executor.executor->add_node(node);
}
} else {
// add only this executor's nodes
// Add only this executor's nodes
executor.executor->add_node(config.nodes[executor_num]);
}
} else if (config.executor_type == "default") {
@ -210,42 +209,40 @@ experiment_executor Experiment::createSingleExecutor(uint executor_num) {
// executor.default_executor =
// std::make_shared<ROSDefaultMultithreadedExecutor>(rclcpp::ExecutorOptions(),
// config.cores);
executor.default_executor =
std::make_shared<rclcpp::executors::MultiThreadedExecutor>(
executor.default_executor = std::make_shared<rclcpp::executors::MultiThreadedExecutor>(
rclcpp::ExecutorOptions(), config.cores);
} else {
executor.default_executor = std::make_shared<ROSDefaultExecutor>();
executor.default_executor = std::make_shared<priority_executor::ROSDefaultExecutor>();
}
if (config.node_executor_assignments.size() == 0) {
// add all nodes to the executor
for (auto &node : config.nodes) {
if (config.node_executor_assignments.empty()) {
// Add all nodes to the executor
for (auto const &node : config.nodes) {
executor.default_executor->add_node(node);
}
} else {
// add only this executor's nodes
// Add only this executor's nodes
executor.default_executor->add_node(config.nodes[executor_num]);
}
} else {
}
// RCLCPP_ERROR(node->get_logger(), "Unknown executor type: %s",
// executor_type.c_str());
return executor;
}
return executor;
experiment_executor Experiment::getExecutorForNode(int const node_id) {
if (config.node_executor_assignments.empty()) {
return executors[0];
}
experiment_executor Experiment::getExecutorForNode(int node_id) {
if (config.node_executor_assignments.size() == 0) {
return executors[0];
} else {
int executor_idx = config.node_executor_assignments[node_id];
int const executor_idx = config.node_executor_assignments[node_id];
std::cout << "node " << node_id << " assigned to executor " << executor_idx
<< std::endl;
return executors[executor_idx];
}
}
void Experiment::createNodesAndAssignProperties() {
// BEGIN SPECIAL CASES
std::vector<std::vector<std::shared_ptr<CaseStudyNode>>> all_nodes;
rclcpp::CallbackGroup::SharedPtr cb_group0;
@ -272,7 +269,7 @@ void Experiment::createNodesAndAssignProperties() {
std::vector<rclcpp::CallbackGroup::SharedPtr> cb_groups;
if (config.num_groups != 0 &&
config.executor_to_cpu_assignments.size() != 0) {
!config.executor_to_cpu_assignments.empty()) {
std::cout << "WARNING: num_groups and executor_to_cpu_assignments are both "
"set. This is not supported. Ignoring num_groups."
<< std::endl;
@ -290,7 +287,7 @@ void Experiment::createNodesAndAssignProperties() {
bool first_node = true;
for (uint32_t node_chain_idx = 0;
node_chain_idx < config.node_ids[chain_id].size(); node_chain_idx++) {
uint32_t node_id = config.node_ids[chain_id][node_chain_idx];
uint32_t const node_id = config.node_ids[chain_id][node_chain_idx];
// has this node been created yet?
if (nodes.find(node_id) != nodes.end()) {
// if it has, then this node exists in another chain
@ -299,7 +296,7 @@ void Experiment::createNodesAndAssignProperties() {
}
experiment_executor executor = getExecutorForNode(node_id);
std::shared_ptr<const void> handle;
ExecutableType type;
priority_executor::ExecutableType type;
// is this the first node in the chain?
if (node_chain_idx == 0) {
// create a timer node
@ -313,7 +310,7 @@ void Experiment::createNodesAndAssignProperties() {
cb_group = cb_groups[config.group_memberships[node_id] - 1];
}
std::shared_ptr<PublisherNode> node;
if (config.node_executor_assignments.size() == 0) {
if (config.node_executor_assignments.empty()) {
node = std::make_shared<PublisherNode>(
"node_" + std::to_string(node_id), config.node_runtimes[node_id],
config.chain_periods[chain_id], chain_id, config.nodes[0],
@ -327,7 +324,7 @@ void Experiment::createNodesAndAssignProperties() {
}
handle = node->timer_->get_timer_handle();
nodes[node_id] = node;
type = ExecutableType::TIMER;
type = priority_executor::ExecutableType::TIMER;
// this_chain_timer_handle = node->timer_;
std::cout << "chain timer handle set: " << chain_id << std::endl;
chain_timer_handles[chain_id] = node->timer_;
@ -335,7 +332,7 @@ void Experiment::createNodesAndAssignProperties() {
all_nodes[chain_id].push_back(node);
} else {
// create a subscriber node
int subscription_id = config.node_ids[chain_id][node_chain_idx - 1];
int const subscription_id = config.node_ids[chain_id][node_chain_idx - 1];
rclcpp::CallbackGroup::SharedPtr cb_group;
if (jiang2022_cb && ((chain_id == 1 && node_chain_idx == 1) ||
(chain_id == 2 && node_chain_idx == 1))) {
@ -352,13 +349,13 @@ void Experiment::createNodesAndAssignProperties() {
std::shared_ptr<WorkerNode> node;
if (ours2024_latency) {
uint32_t first_node_id = config.node_ids[chain_id][0];
uint32_t const first_node_id = config.node_ids[chain_id][0];
node = std::make_shared<WorkerNode>(
"node_" + std::to_string(first_node_id),
"node_" + std::to_string(node_id), config.node_runtimes[node_id],
config.chain_periods[chain_id], chain_id, config.nodes[0],
cb_group);
} else if (config.node_executor_assignments.size() == 0) {
} else if (config.node_executor_assignments.empty()) {
node = std::make_shared<WorkerNode>(
"node_" + std::to_string(subscription_id),
"node_" + std::to_string(node_id), config.node_runtimes[node_id],
@ -374,12 +371,12 @@ void Experiment::createNodesAndAssignProperties() {
}
handle = node->sub_->get_subscription_handle();
nodes[node_id] = node;
type = ExecutableType::SUBSCRIPTION;
type = priority_executor::ExecutableType::SUBSCRIPTION;
all_nodes[chain_id].push_back(node);
}
if (config.executor_type == "edf") {
std::string cb_name = "node_" + std::to_string(node_id) + "_cb";
std::string const cb_name = "node_" + std::to_string(node_id) + "_cb";
executor.strat->set_executable_deadline(
handle, config.chain_periods[chain_id], type, chain_id, cb_name);
// executor.executor->add_node(nodes[node_id]);
@ -406,7 +403,7 @@ void Experiment::createNodesAndAssignProperties() {
// executor.executor->add_node(nodes[node_id]);
executor.strat->set_executable_priority(
handle, config.node_priorities[node_id], type,
ExecutableScheduleType::CHAIN_AWARE_PRIORITY, chain_id);
priority_executor::ExecutableScheduleType::CHAIN_AWARE_PRIORITY, chain_id);
std::cout << "node " << node_id << " has priority "
<< config.node_priorities[node_id] << std::endl;
} else if (config.executor_type == "default") {
@ -422,29 +419,28 @@ std::string Experiment::buildLogs(std::vector<node_time_logger> node_logs) {
output_file << "[" << std::endl;
// for each node
for (auto node : nodes) {
for (auto const &node : nodes) {
// print the node's logs
node_time_logger logger = node.second->logger;
for (auto &log : *(logger.recorded_times)) {
node_time_logger const logger = node.second->logger;
for (auto const &log : *(logger.recorded_times)) {
output_file << "{\"entry\": " << log.first
<< ", \"time\": " << log.second << "}," << std::endl;
}
}
if (config.executor_type == "edf" || config.executor_type == "picas") {
for (auto &executor : executors) {
node_time_logger strat_log = executor.strat->logger_;
for (auto &log : *(strat_log.recorded_times)) {
for (auto const &executor : executors) {
node_time_logger const strat_log = executor.strat->logger_;
for (auto const &log : *(strat_log.recorded_times)) {
// std::cout << log.first << " " << log.second << std::endl;
output_file << "{\"entry\": " << log.first
<< ", \"time\": " << log.second << "}," << std::endl;
}
// executor may be a subclass of RTISTimed, so we can get its logger
if (dynamic_cast<RTISTimed *>(executor.executor.get())) {
node_time_logger executor_log =
dynamic_cast<RTISTimed *>(executor.executor.get())->logger_;
for (auto &log : *(executor_log.recorded_times)) {
if (auto const *timed_executor = dynamic_cast<priority_executor::RTISTimed*>(executor.executor.get())) {
node_time_logger const executor_log = timed_executor->logger_;
for (auto const &log : *(executor_log.recorded_times)) {
output_file << "{\"entry\": " << log.first
<< ", \"time\": " << log.second << "}," << std::endl;
}
@ -454,14 +450,13 @@ std::string Experiment::buildLogs(std::vector<node_time_logger> node_logs) {
}
} else {
// a default executor
for (auto &executor : executors) {
for (auto const &executor : executors) {
// experimental: executor.default_executor's type is rclcpp::Executor, but
// here it's guaranteed to inherit from RTISTimed we can get a logger from
// it. for safety, we check the type first
if (dynamic_cast<RTISTimed *>(executor.default_executor.get())) {
node_time_logger default_log =
dynamic_cast<RTISTimed *>(executor.default_executor.get())->logger_;
for (auto &log : *(default_log.recorded_times)) {
if (auto const* timed_executor = dynamic_cast<priority_executor::RTISTimed*>(executor.default_executor.get())) {
node_time_logger const default_log = timed_executor->logger_;
for (auto const &log : *(default_log.recorded_times)) {
output_file << "{\"entry\": " << log.first
<< ", \"time\": " << log.second << "}," << std::endl;
}
@ -473,14 +468,14 @@ std::string Experiment::buildLogs(std::vector<node_time_logger> node_logs) {
}
// add internal logs
for (auto &log_entry : *(_logger.recorded_times)) {
for (auto const &log_entry : *(_logger.recorded_times)) {
output_file << "{\"entry\": " << log_entry.first
<< ", \"time\": " << log_entry.second << "}," << std::endl;
}
// add logs from argument
for (auto &log : node_logs) {
for (auto &log_entry : *(log.recorded_times)) {
for (auto const &log : node_logs) {
for (auto const &log_entry : *(log.recorded_times)) {
output_file << "{\"entry\": " << log_entry.first
<< ", \"time\": " << log_entry.second << "}," << std::endl;
}
@ -495,7 +490,7 @@ std::string Experiment::buildLogs(std::vector<node_time_logger> node_logs) {
void Experiment::setInitialDeadlines() {
timespec current_time;
clock_gettime(CLOCK_MONOTONIC_RAW, &current_time);
uint64_t millis =
uint64_t const millis =
(current_time.tv_sec * 1000UL) + (current_time.tv_nsec / 1000000);
// we need a logger to record the first deadline, which is generated here
// TODO: generate the first deadline in the executor (or node) itself
@ -503,7 +498,7 @@ void Experiment::setInitialDeadlines() {
for (uint32_t chain_index = 0; chain_index < config.chain_lengths.size();
chain_index++) {
size_t executor_idx;
if (config.node_executor_assignments.size() == 0) {
if (config.node_executor_assignments.empty()) {
executor_idx = 0;
} else {
executor_idx = config.node_executor_assignments[chain_index];
@ -525,7 +520,7 @@ void Experiment::setInitialDeadlines() {
}
void Experiment::resetTimers() {
for (auto timer : chain_timer_handles) {
for (auto const &timer : chain_timer_handles) {
std::cout << "resetting timer " << timer.first << std::endl;
if (timer.second == nullptr) {
std::cout << "timer is null" << std::endl;
@ -542,14 +537,14 @@ void Experiment::resetTimers() {
// std::this_thread::sleep_for(std::chrono::seconds(1));
}
void Experiment::writeLogsToFile(std::string file_name,
std::string experiment_name) {
void Experiment::writeLogsToFile(std::string const &file_name,
std::string const &experiment_name) {
std::ofstream output_file;
std::string name = experiment_name;
std::string const name = experiment_name;
// make sure results directory exists. we don't have access to std::filesystem
// here :(
std::string command = "mkdir -p results/" + name;
int result = system(command.c_str());
std::string const command = "mkdir -p results/" + name;
int const result = system(command.c_str());
if (result != 0) {
std::cout << "could not create results directory: " << result << ": "
<< strerror(errno) << std::endl;
@ -563,7 +558,7 @@ void Experiment::writeLogsToFile(std::string file_name,
<< std::endl;
}
int do_other_task(double work_time, uint period,
int do_other_task(double const work_time, uint const period,
std::atomic<bool>& should_do_task) {
if (work_time == 0) {
std::cout << "other task: work time is 0, exiting" << std::endl;
@ -573,7 +568,7 @@ int do_other_task(double work_time, uint period,
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(0, &cpuset);
int result = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
int const result = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
if (result != 0) {
std::cout << "other task: could not set affinity: " << result << ": "
<< strerror(errno) << std::endl;
@ -593,7 +588,7 @@ int do_other_task(double work_time, uint period,
do {
timespec current_time;
clock_gettime(CLOCK_MONOTONIC_RAW, &current_time);
uint64_t millis_start = (current_time.tv_sec * (uint64_t)1000) +
uint64_t const millis_start = (current_time.tv_sec * (uint64_t)1000) +
(current_time.tv_nsec / 1000000);
// do work
@ -602,9 +597,9 @@ int do_other_task(double work_time, uint period,
// sleep until next period
clock_gettime(CLOCK_MONOTONIC_RAW, &current_time);
uint64_t millis_end = (current_time.tv_sec * (uint64_t)1000) +
uint64_t const millis_end = (current_time.tv_sec * (uint64_t)1000) +
(current_time.tv_nsec / 1000000);
uint64_t sleep_time = period - (millis_end - millis_start);
uint64_t const sleep_time = period - (millis_end - millis_start);
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time));
} while (should_do_task);
std::cout << "other task done" << std::endl;

View file

@ -6,7 +6,7 @@
#include "casestudy_tools/primes_workload.hpp"
void DummyLoadCalibration::setCalibration(int calib) {
void DummyLoadCalibration::setCalibration(int const calib) {
calibration.store(calib, std::memory_order_relaxed);
}
@ -20,12 +20,12 @@ void calibrate_dummy_load()
for (int iteration = 0; iteration < MAX_ITERATIONS; ++iteration)
{
// Measure the time taken for the dummy load
auto start_time = std::chrono::high_resolution_clock::now();
auto const start_time = std::chrono::high_resolution_clock::now();
dummy_load(TARGET_DURATION_MS); // intended workload
auto end_time = std::chrono::high_resolution_clock::now();
auto const end_time = std::chrono::high_resolution_clock::now();
// Calculate duration in microseconds
auto duration_us =
auto const duration_us =
std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time).count();
std::cout << "[Calibration] Iteration: " << iteration
@ -51,12 +51,12 @@ void calibrate_dummy_load()
}
}
void dummy_load(int load_ms)
void dummy_load(int const load_ms)
{
const int calib_factor = DummyLoadCalibration::getCalibration();
int const calib_factor = DummyLoadCalibration::getCalibration();
// calibration factor corresponds to 100ms load, scale it
const int scaled_factor = calib_factor * load_ms / 100;
int const scaled_factor = calib_factor * load_ms / 100;
volatile int dummy_var = 0;
constexpr int DUMMY_LOAD_ITER = 100'000;

View file

@ -6,71 +6,68 @@
#include <ctime>
#include <rclcpp/logger.hpp>
std::string CaseStudyNode::get_name() { return name; }
std::string CaseStudyNode::get_name() {
return name;
}
#define BUFFER_LENGTH 0
rclcpp::QoS get_qos()
{
if (BUFFER_LENGTH == 0)
{
rclcpp::QoS get_qos() {
if (BUFFER_LENGTH == 0) {
return rclcpp::QoS(rclcpp::KeepAll());
}
else
{
return rclcpp::QoS(rclcpp::KeepLast(BUFFER_LENGTH));
}
}
void timespec_diff(timespec *start, timespec *end, timespec *result)
{
if ((end->tv_nsec - start->tv_nsec) < 0)
{
void timespec_diff(timespec const* start, timespec const* end, timespec* result) {
if ((end->tv_nsec - start->tv_nsec) < 0) {
result->tv_sec = end->tv_sec - start->tv_sec - 1;
result->tv_nsec = 1000000000 + end->tv_nsec - start->tv_nsec;
}
else
{
} else {
result->tv_sec = end->tv_sec - start->tv_sec;
result->tv_nsec = end->tv_nsec - start->tv_nsec;
}
}
PublisherNode::PublisherNode(std::string publish_topic, double runtime,
int period, int chain,
rclcpp::Node::SharedPtr node,
rclcpp::CallbackGroup::SharedPtr callback_group)
: CaseStudyNode(publish_topic, node), runtime_(runtime), period_(period),
chain_(chain)
{
PublisherNode::PublisherNode(
std::string const &publish_topic,
double const runtime,
int const period,
int const chain,
rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group)
: CaseStudyNode(publish_topic, node)
, runtime_(runtime)
, period_(period)
, chain_(chain) {
logger = create_logger();
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos());
auto timer_callback = [this]() -> void
{
auto const timer_callback = [this]() -> void {
std::ostringstream ss;
ss << "{\"operation\": \"start_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_;
std::chrono::nanoseconds time_until_trigger = timer_->time_until_trigger();
std::chrono::microseconds time_until_trigger_us =
std::chrono::duration_cast<std::chrono::microseconds>(
time_until_trigger);
std::chrono::nanoseconds const time_until_trigger = timer_->time_until_trigger();
std::chrono::microseconds const time_until_trigger_us =
std::chrono::duration_cast<std::chrono::microseconds>(time_until_trigger);
ss << ", \"next_release_us\": " << time_until_trigger_us.count() << "}";
log_entry(logger, ss.str());
// nth_prime_silly(runtime_);
double used_runtime = runtime_;
if (use_random_runtime)
{
if (rand() % 100 < runtime_over_chance * 100)
{
if (use_random_runtime) {
if (rand() % 100 < runtime_over_chance * 100) {
used_runtime *= over_runtime_scale;
}
else
{
} else {
used_runtime *= normal_runtime_scale;
}
}
// RCLCPP_INFO(rclcpp::get_logger(get_name()), "running %s with runtime %f",
// get_name().c_str(), used_runtime);
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
@ -81,14 +78,15 @@ PublisherNode::PublisherNode(std::string publish_topic, double runtime,
pub_->publish(msg);
// TEST: can we take more than one msg from DDS?
// pub_->publish(msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_ << ", \"target_runtime\": " << runtime_
<< "}";
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_
<< ", \"target_runtime\": " << runtime_ << "}";
log_entry(logger, ss.str());
count_max_--;
if (count_max_ == 0)
{
if (count_max_ == 0) {
rclcpp::shutdown();
}
/* struct timespec end_time;
@ -100,29 +98,36 @@ PublisherNode::PublisherNode(std::string publish_topic, double runtime,
RCLCPP_INFO(rclcpp::get_logger(get_name()), "done running %s, took %lu ms",
get_name().c_str(), diff_ms); */
};
if (callback_group != nullptr)
{
if (callback_group != nullptr) {
callback_group_ = callback_group;
}
else
{
callback_group_ =
node->create_callback_group(rclcpp::CallbackGroupType::Reentrant);
}
timer_ = node->create_wall_timer(std::chrono::milliseconds(period),
timer_callback, callback_group_);
} else {
callback_group_ = node->create_callback_group(
rclcpp::CallbackGroupType::Reentrant);
}
WorkerNode::WorkerNode(std::string subscribe_topic, std::string publish_topic,
double runtime, int period, int chain,
rclcpp::Node::SharedPtr node,
rclcpp::CallbackGroup::SharedPtr callback_group)
: CaseStudyNode(publish_topic, node), runtime_(runtime), period_(period),
chain_(chain)
{
timer_ = node->create_wall_timer(
std::chrono::milliseconds(period),
timer_callback,
callback_group_);
}
WorkerNode::WorkerNode(
std::string const &subscribe_topic,
std::string const &publish_topic,
double const runtime,
int const period,
int const chain,
rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group)
: CaseStudyNode(publish_topic, node)
, runtime_(runtime)
, period_(period)
, chain_(chain) {
logger = create_logger();
auto callback = [this](const std_msgs::msg::Int32::SharedPtr msg) -> void
{
auto const callback = [this](std_msgs::msg::Int32::SharedPtr const msg) -> void {
// prevent unused variable warning
(void)msg;
@ -133,42 +138,42 @@ WorkerNode::WorkerNode(std::string subscribe_topic, std::string publish_topic,
log_entry(logger, ss.str());
// RCLCPP_INFO(this->get_logger(), "I heard: '%s'", msg->data.c_str());
// nth_prime_silly(runtime_);
double used_runtime = runtime_;
if (use_random_runtime)
{
if (rand() % 100 < runtime_over_chance * 100)
{
if (use_random_runtime) {
if (rand() % 100 < runtime_over_chance * 100) {
used_runtime *= over_runtime_scale;
}
else
{
} else {
used_runtime *= normal_runtime_scale;
}
}
dummy_load(used_runtime);
// RCLCPP_INFO(this->get_logger(), "Result: %d", result);
auto new_msg = std_msgs::msg::Int32();
new_msg.data = msg->data;
pub_->publish(new_msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << msg->data
<< "}";
log_entry(logger, ss.str());
};
rclcpp::SubscriptionOptions sub_options;
if (callback_group)
{
if (callback_group) {
callback_group_ = callback_group;
}
else
{
callback_group_ =
node->create_callback_group(rclcpp::CallbackGroupType::Reentrant);
} else {
callback_group_ = node->create_callback_group(
rclcpp::CallbackGroupType::Reentrant);
}
sub_options.callback_group = callback_group_;
sub_ = node->create_subscription<std_msgs::msg::Int32>(
subscribe_topic, get_qos(), callback, sub_options);
subscribe_topic,
get_qos(),
callback,
sub_options);
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos());
}

@ -1 +1 @@
Subproject commit a2741afc9a204758ec047cb25c6a85c92b3298a6
Subproject commit 91c2a42d6bc0130cec32447373a0fec0def34a26