basic refactor

This commit is contained in:
Niklas Halle 2025-04-01 16:04:19 +02:00 committed by Niklas Halle
parent a6df3fcc41
commit abf3496c78
21 changed files with 13752 additions and 13232 deletions

74
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,74 @@
{
"files.associations": {
"cctype": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"csignal": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"cwctype": "cpp",
"array": "cpp",
"atomic": "cpp",
"strstream": "cpp",
"bit": "cpp",
"*.tcc": "cpp",
"bitset": "cpp",
"chrono": "cpp",
"compare": "cpp",
"complex": "cpp",
"concepts": "cpp",
"condition_variable": "cpp",
"cstdint": "cpp",
"deque": "cpp",
"list": "cpp",
"map": "cpp",
"set": "cpp",
"unordered_map": "cpp",
"unordered_set": "cpp",
"vector": "cpp",
"exception": "cpp",
"algorithm": "cpp",
"functional": "cpp",
"iterator": "cpp",
"memory": "cpp",
"memory_resource": "cpp",
"numeric": "cpp",
"optional": "cpp",
"random": "cpp",
"ratio": "cpp",
"string": "cpp",
"string_view": "cpp",
"system_error": "cpp",
"tuple": "cpp",
"type_traits": "cpp",
"utility": "cpp",
"fstream": "cpp",
"future": "cpp",
"initializer_list": "cpp",
"iomanip": "cpp",
"iosfwd": "cpp",
"iostream": "cpp",
"istream": "cpp",
"limits": "cpp",
"mutex": "cpp",
"new": "cpp",
"ostream": "cpp",
"ranges": "cpp",
"shared_mutex": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"stop_token": "cpp",
"streambuf": "cpp",
"thread": "cpp",
"cfenv": "cpp",
"cinttypes": "cpp",
"typeindex": "cpp",
"typeinfo": "cpp",
"variant": "cpp"
}
}

125
README.md
View file

@ -1,3 +1,124 @@
Experiment files for the RTIS ros_rt executor. Clone with submodules on, or add the executor to the src folder. # Dynamic Priority Scheduling for ROS-EDF Executor
Run `ros2 run casestudy casestudy_example` then run the `analysis/analysis.ipynb` notebook. This repository contains experiments for the ROS-EDF executor with priority-based and deadline-based scheduling capabilities.
## Usage
Clone with submodules enabled:
```
git clone --recurse-submodules https://github.com/user/ROS-Dynamic-Executor-Experiments.git
```
Make sure you have sourced your ROS 2 environment.
Run the example:
```
ros2 run casestudy casestudy_example
```
Then run the analysis notebook:
```
jupyter notebook analysis/analysis.ipynb
```
---
## Dynamically Adjusting Scheduler Approaches
The PriorityMemoryStrategy supports different scheduling policies for ROS callbacks. To extend it with dynamically adjusting scheduling that reacts to semantic events, there are several viable approaches:
### Approach A: Custom PriorityExecutableComparator
Replace the existing comparator with a custom implementation:
**Pros:**
- Direct control over scheduling logic
- Clean separation of concerns
- Can implement complex scheduling policies
- Doesn't require modifying deadline values
**Cons:**
- Requires understanding and maintaining the comparison logic
- May need to add new fields to PriorityExecutable to track dynamic priorities
- Could become complex if multiple factors affect priority
### Approach B: Dynamic Deadline Adjustment
Keep the EDF comparator but adjust deadlines based on priority logic:
**Pros:**
- Works within the existing EDF framework
- Conceptually simple - just manipulate deadline values
- Doesn't require changing the core sorting mechanism
- Easier to debug (you can log deadline changes)
**Cons:**
- Potentially confusing semantics (using deadlines to represent non-deadline priorities)
- May interfere with actual deadline-based requirements
- Could lead to instability if not carefully managed
### Approach C: Event-Driven Priority Field
Add a dynamic boost factor field to PriorityExecutable:
```cpp
int dynamic_priority_boost = 0;
```
Then modify the comparator to consider this value:
```cpp
if (p1->sched_type == DEADLINE) {
// First compare deadline as usual
// Then use dynamic_priority_boost as a tiebreaker
if (p1_deadline == p2_deadline) {
return p1->dynamic_priority_boost > p2->dynamic_priority_boost;
}
return p1_deadline < p2_deadline;
}
```
**Pros:**
- Clearer semantics than manipulating deadlines
- Keeps deadline scheduling intact for real-time guarantees
- Easy to adjust at runtime
**Cons:**
- Requires modifying both PriorityExecutable and comparison logic
### Approach D: Priority Multiplier System
Implement a system where chains can have priority multipliers applied:
```cpp
float priority_multiplier = 1.0f;
```
And then in the comparator:
```cpp
// For priority-based scheduling
int effective_p1 = p1->priority * p1->priority_multiplier;
int effective_p2 = p2->priority * p2->priority_multiplier;
return effective_p1 < effective_p2;
```
**Pros:**
- Scales existing priorities rather than replacing them
- Preserves relative importance within chains
- Intuitive model for temporary priority boosts
**Cons:**
- May need to handle overflow/boundary cases
- Requires careful tuning of multiplier ranges
## Recommendation
Approach C (Event-Driven Priority Field) offers the best balance of:
1. Clean semantics
2. Minimal interference with existing scheduling logic
3. Clear separation between baseline priorities and dynamic adjustments
4. Straightforward implementation
This approach maintains real-time guarantees while enabling dynamic behaviors based on semantic events.

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -28,4 +28,4 @@ python -m ipykernel install --user --name=venv310 --display-name="Python 3.10 (v
echo "source /workspaces/ROS-Dynamic-Executor-Experiments/venv310/bin/activate" >> /root/.bashrc echo "source /workspaces/ROS-Dynamic-Executor-Experiments/venv310/bin/activate" >> /root/.bashrc
echo "Setup complete! Activate with: source venv310/bin/activate" echo "Setup complete! Activate with: source venv310/bin/activate"
echo "Then start JupyterLab with: jupyter lab --ip=0.0.0.0 --port=8888 --allow-root" echo "Then start JupyterLab with: jupyter lab --ip=0.0.0.0 --port=8888 --allow-root"

View file

@ -1,95 +1,74 @@
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.8)
project(casestudy) project(casestudy VERSION 0.1.0)
# Default to C99 # Set C++ standards
if(NOT CMAKE_C_STANDARD) set(CMAKE_CXX_STANDARD 17)
set(CMAKE_C_STANDARD 99) set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif() set(CMAKE_CXX_EXTENSIONS OFF)
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
# Compiler options
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic) add_compile_options(-Wall -Wextra -Wpedantic)
endif() endif()
# find dependencies # Find dependencies
find_package(ament_cmake REQUIRED) find_package(ament_cmake REQUIRED)
# uncomment the following section in order to fill in find_package(rclcpp REQUIRED)
# further dependencies manually.
# find_package(<dependency> REQUIRED)
find_package(priority_executor REQUIRED) find_package(priority_executor REQUIRED)
find_package(simple_timer REQUIRED) find_package(simple_timer REQUIRED)
find_package(rclcpp REQUIRED)
find_package(jsoncpp REQUIRED) find_package(jsoncpp REQUIRED)
find_package(casestudy_tools REQUIRED) find_package(casestudy_tools REQUIRED)
# add_library(test_nodes src/test_nodes.cpp) # Define experiment targets
# target_include_directories(test_nodes PUBLIC set(EXPERIMENTS
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include>
# )
# ament_target_dependencies(test_nodes
# rclcpp
# simple_timer
# casestudy_tools
# )
list(APPEND experiments
casestudy_2023customfile_singlethread casestudy_2023customfile_singlethread
casestudy_2024ours_latency casestudy_2024ours_latency
casestudy_2024ours_executor2executor casestudy_2024ours_executor2executor
casestudy_example casestudy_example
) )
# Function to create experiment targets
function(new_experiment experiment_name) function(new_experiment experiment_name)
add_executable(${experiment_name} src/${experiment_name}.cpp) add_executable(${experiment_name} src/${experiment_name}.cpp)
target_include_directories(${experiment_name} PUBLIC target_include_directories(${experiment_name} PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include> $<INSTALL_INTERFACE:include>
) )
# Add dependencies with ament
ament_target_dependencies(${experiment_name} ament_target_dependencies(${experiment_name}
rclcpp
priority_executor
simple_timer simple_timer
casestudy_tools casestudy_tools
rclcpp
priority_executor
) )
endfunction() endfunction()
foreach(experiment ${experiments}) # Create all experiment targets
foreach(experiment ${EXPERIMENTS})
new_experiment(${experiment}) new_experiment(${experiment})
endforeach() endforeach()
target_link_libraries(casestudy_2023customfile_singlethread # Special handling for targets with additional dependencies
jsoncpp target_link_libraries(casestudy_2023customfile_singlethread jsoncpp)
# Installation
install(
TARGETS ${EXPERIMENTS}
RUNTIME DESTINATION lib/${PROJECT_NAME}
) )
install(TARGETS
${experiments}
DESTINATION lib/${PROJECT_NAME}
)
install( install(
DIRECTORY include/ DIRECTORY include/
DESTINATION include DESTINATION include
) )
# Testing
if(BUILD_TESTING) if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED) find_package(ament_lint_auto REQUIRED)
# the following line skips the linter which checks for copyrights
# uncomment the line when a copyright and license is not present in all source files
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies() ament_lint_auto_find_test_dependencies()
endif() endif()
# Export and package configuration
ament_export_include_directories(include) ament_export_include_directories(include)
ament_package() ament_package()

3
src/casestudy/include/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
# We don't have content in this dir yet, but want it in git anyways
# So we add a non-empty .gitignore file, explicitly not ignoring itself
!.gitignore

View file

@ -2,19 +2,25 @@
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?> <?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3"> <package format="3">
<name>casestudy</name> <name>casestudy</name>
<version>0.0.0</version> <version>0.1.0</version>
<description>TODO: Package description</description> <description>
<maintainer email="vboxuser@todo.todo">vboxuser</maintainer> ROS 2 case study package for evaluating different executor implementations and timing behaviors
<license>TODO: License declaration</license> </description>
<maintainer email="kurt4wilson@gmail.com">kurt</maintainer>
<license>Apache License 2.0</license>
<buildtool_depend>ament_cmake</buildtool_depend> <buildtool_depend>ament_cmake</buildtool_depend>
<test_depend>ament_lint_auto</test_depend> <!-- Build Dependencies -->
<test_depend>ament_lint_common</test_depend> <depend>rclcpp</depend>
<depend>priority_executor</depend> <depend>priority_executor</depend>
<depend>simple_timer</depend> <depend>simple_timer</depend>
<depend>casestudy_tools</depend> <depend>casestudy_tools</depend>
<depend>jsoncpp</depend>
<!-- Test Dependencies -->
<test_depend>ament_lint_auto</test_depend>
<test_depend>ament_lint_common</test_depend>
<export> <export>
<build_type>ament_cmake</build_type> <build_type>ament_cmake</build_type>

View file

@ -2,193 +2,163 @@
#include "priority_executor/priority_executor.hpp" #include "priority_executor/priority_executor.hpp"
#include "priority_executor/priority_memory_strategy.hpp" #include "priority_executor/priority_memory_strategy.hpp"
#include "casestudy_tools/test_nodes.hpp" #include "casestudy_tools/test_nodes.hpp"
#include <string>
#include <fstream>
#include <unistd.h>
#include "std_msgs/msg/string.hpp" #include "std_msgs/msg/string.hpp"
#include "casestudy_tools/primes_workload.hpp" #include "casestudy_tools/primes_workload.hpp"
#include <sys/prctl.h>
#include "casestudy_tools/primes_workload.hpp"
#include "casestudy_tools/experiment.hpp" #include "casestudy_tools/experiment.hpp"
#include "jsoncpp/json/reader.h" #include "jsoncpp/json/reader.h"
#include <string>
#include <fstream>
#include <unistd.h>
#include <sys/prctl.h>
// bool should_do_task = true;
std::atomic<bool> should_do_task(true); std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, int idx) void run_one_executor(
{ std::function<void(std::atomic<bool>&)> const& exec_fun,
(void)idx; int const idx) {
// set this process to highest priority (void)idx;
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1; // set this process to highest priority
int result = sched_setscheduler(0, SCHED_FIFO, &param); struct sched_param param;
if (result != 0) param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
{ int const result = sched_setscheduler(0, SCHED_FIFO, &param);
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl; if (result != 0) {
} std::cout << "ros_experiment: sched_setscheduler failed: " << result
<< ": " << strerror(errno) << std::endl;
// set process name
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
}
int ros_experiment(int argc, char **arg, std::string file_name, ExperimentConfig config)
{
rclcpp::init(argc, arg);
// we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node");
node->declare_parameter("executor_type", "edf");
std::string executor_type;
node->get_parameter("executor_type", executor_type);
std::cout << "using executor type: " << executor_type << std::endl;
// TODO: move experiment configuration to main
config.executor_type = executor_type;
config.nodes.push_back(node);
Experiment experiment(config);
// std::string result_log = experiment.run(should_do_task);
std::vector<std::function<void(std::atomic<bool> &)>> exec_funs = experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs)
{
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
}
for (auto &t : exec_threads)
{
t.join();
}
experiment.writeLogsToFile(file_name, "2023customfile_singlethread");
return 0;
}
int main(int argc, char *argv[])
{
std::ifstream input_file("taskset.json"); // Replace with actual filename
if (!input_file.is_open())
{
std::cerr << "Error opening file!" << std::endl;
return 1;
}
Json::Value root;
Json::Reader reader;
bool parsingSuccessful = reader.parse(input_file, root);
if (!parsingSuccessful)
{
std::cerr << "Error parsing file!" << std::endl;
return 1;
}
// set ourselves to the second highest priority
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
int result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0)
{
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl;
}
// calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
// config.chain_lengths = {4, 3, 4};
for (uint i = 0; i < root["chain_lengths"].size(); i++)
{
config.chain_lengths.push_back(root["chain_lengths"][i].asUInt());
}
// config.node_ids = {{0, 1, 2, 3}, {4, 5, 6}, {7, 8, 9, 10}};
for (uint i = 0; i < root["node_ids"].size(); i++)
{
std::vector<uint32_t> node_ids_row;
for (uint j = 0; j < root["node_ids"][i].size(); j++)
{
node_ids_row.push_back(root["node_ids"][i][j].asUInt());
} }
config.node_ids.push_back(node_ids_row);
}
// config.node_priorities = {3, 2, 1, 0, 6, 5, 4, 10, 9, 8, 7}; // PICAS-style prioritiesA
for (uint i = 0; i < root["node_priorities"].size(); i++)
{
config.node_priorities.push_back(root["node_priorities"][i].asUInt());
}
// disabled for custom // set process name
config.num_groups = 0; prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
// config.group_memberships = {0, 0, 1, 2, 0, 0, 1, 0, 0, 1, 2}; exec_fun(should_do_task);
}
// config.chain_timer_control = {0, 1, 2}; int ros_experiment(
for (uint i = 0; i < root["chain_timer_control"].size(); i++) int const argc,
{ char** const argv,
config.chain_timer_control.push_back(root["chain_timer_control"][i].asInt()); std::string const& file_name,
} ExperimentConfig config) {
// config.node_runtimes = {5, 5, 5, 5, // chain 1 rclcpp::init(argc, argv);
// 10, 20, 5, // chain 2
// 10, 10, 15, 20}; // chain 3 // we have to create a node to process any passed parameters
for (uint i = 0; i < root["node_runtimes"].size(); i++) auto const node = rclcpp::Node::make_shared("experiment_node");
{ node->declare_parameter("executor_type", "edf");
config.node_runtimes.push_back(root["node_runtimes"][i].asDouble());
}
// config.chain_periods = {50, 100, 110};
for (uint i = 0; i < root["chain_periods"].size(); i++)
{
config.chain_periods.push_back(root["chain_periods"][i].asInt());
}
// config.node_executor_assignments = {}; // empty - multicore mode
for (uint i = 0; i < root["node_executor_assignments"].size(); i++)
{
config.node_executor_assignments.push_back(root["node_executor_assignments"][i].asUInt() - 1);
}
// single threaded mode - assign executors to cpu cores
for (uint i = 0; i < root["executor_to_cpu_core"].size(); i++)
{
config.executor_to_cpu_assignments.push_back(root["executor_to_cpu_core"][i].asUInt() - 1);
}
config.parallel_mode = false; std::string executor_type;
config.cores = 4; node->get_parameter("executor_type", executor_type);
std::cout << "using executor type: " << executor_type << std::endl;
// sanity_check_config(config); config.executor_type = executor_type;
config.nodes.push_back(node);
// from 35/65 to 100/0 in increments of 5 Experiment experiment(config);
// for (int i = 0; i <= (100 - 35) / 5; i++) auto const exec_funs = experiment.getRunFunctions();
// { std::cout << "got " << exec_funs.size() << " executors" << std::endl;
// for now, run with 100% ros
int i = (100 - 35) / 5; std::vector<std::thread> exec_threads;
int ros_budget = 35 + i * 5; int i = 0;
int other_task_budget = 65 - i * 5; experiment.resetTimers();
std::cout << "ROS budget: " << ros_budget << " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread for (auto const& exec_fun : exec_funs) {
should_do_task.store(true); exec_threads.emplace_back(run_one_executor, exec_fun, i++);
double other_task_time = 80 * other_task_budget / 100.0; }
std::thread other_task(do_other_task, other_task_time, 80, std::ref(should_do_task));
// do ros task for (auto& t : exec_threads) {
t.join();
}
std::string file_name = "ros_" + std::to_string(ros_budget); experiment.writeLogsToFile(file_name, "2023customfile_singlethread");
// ros_experiment(argc, argv, file_name); return 0;
std::thread ros_task(ros_experiment, argc, argv, file_name, config); }
std::cout << "tasks started" << std::endl;
ros_task.join();
other_task.join();
std::cout << "tasks joined" << std::endl;
// // run once int main(int argc, char* argv[]) {
// break; std::ifstream input_file("taskset.json");
// }
if (!input_file.is_open()) {
std::cerr << "Error opening file!" << std::endl;
return 1;
}
Json::Value root;
Json::Reader reader;
bool const parsingSuccessful = reader.parse(input_file, root);
if (!parsingSuccessful) {
std::cerr << "Error parsing file!" << std::endl;
return 1;
}
// set ourselves to the second highest priority
struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
int const result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result
<< ": " << strerror(errno) << std::endl;
}
// calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
for (uint i = 0; i < root["chain_lengths"].size(); i++) {
config.chain_lengths.push_back(root["chain_lengths"][i].asUInt());
}
for (uint i = 0; i < root["node_ids"].size(); i++) {
std::vector<uint32_t> node_ids_row;
for (uint j = 0; j < root["node_ids"][i].size(); j++) {
node_ids_row.push_back(root["node_ids"][i][j].asUInt());
}
config.node_ids.push_back(std::move(node_ids_row));
}
for (uint i = 0; i < root["node_priorities"].size(); i++) {
config.node_priorities.push_back(root["node_priorities"][i].asUInt());
}
config.num_groups = 0;
for (uint i = 0; i < root["chain_timer_control"].size(); i++) {
config.chain_timer_control.push_back(root["chain_timer_control"][i].asInt());
}
for (uint i = 0; i < root["node_runtimes"].size(); i++) {
config.node_runtimes.push_back(root["node_runtimes"][i].asDouble());
}
for (uint i = 0; i < root["chain_periods"].size(); i++) {
config.chain_periods.push_back(root["chain_periods"][i].asInt());
}
for (uint i = 0; i < root["node_executor_assignments"].size(); i++) {
config.node_executor_assignments.push_back(root["node_executor_assignments"][i].asUInt() - 1);
}
for (uint i = 0; i < root["executor_to_cpu_core"].size(); i++) {
config.executor_to_cpu_assignments.push_back(root["executor_to_cpu_core"][i].asUInt() - 1);
}
config.parallel_mode = false;
config.cores = 4;
int const i = (100 - 35) / 5;
int const ros_budget = 35 + i * 5;
int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
should_do_task.store(true);
double const other_task_time = 80 * other_task_budget / 100.0;
std::thread other_task(do_other_task, other_task_time, 80, std::ref(should_do_task));
std::string const file_name = "ros_" + std::to_string(ros_budget);
std::thread ros_task(ros_experiment, argc, argv, file_name, config);
std::cout << "tasks started" << std::endl;
ros_task.join();
other_task.join();
std::cout << "tasks joined" << std::endl;
return 0;
} }

View file

@ -7,190 +7,191 @@
// bool should_do_task = true; // bool should_do_task = true;
std::atomic<bool> should_do_task(true); std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, void run_one_executor(
int idx) { std::function<void(std::atomic<bool>&)> const& exec_fun,
(void)idx; int const idx) {
// set this process to highest priority (void)idx;
// struct sched_param param; // set this process to highest priority
// param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1; // struct sched_param param;
// int result = sched_setscheduler(0, SCHED_FIFO, &param); // param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
// if (result != 0) // int result = sched_setscheduler(0, SCHED_FIFO, &param);
// { // if (result != 0)
// std::cout << "ros_experiment: sched_setscheduler failed: " << result << // {
// ": " << strerror(errno) << std::endl; // std::cout << "ros_experiment: sched_setscheduler failed: " << result <<
// } // ": " << strerror(errno) << std::endl;
sched_attr rt_attr; // }
rt_attr.size = sizeof(rt_attr); sched_attr rt_attr;
rt_attr.sched_flags = 0; rt_attr.size = sizeof(rt_attr);
rt_attr.sched_nice = 0; rt_attr.sched_flags = 0;
rt_attr.sched_priority = 99; rt_attr.sched_nice = 0;
rt_attr.sched_policy = SCHED_FIFO; rt_attr.sched_priority = 99;
rt_attr.sched_runtime = 0; rt_attr.sched_policy = SCHED_FIFO;
rt_attr.sched_period = 0; rt_attr.sched_runtime = 0;
int result = sched_setattr(0, &rt_attr, 0); rt_attr.sched_period = 0;
if (result != 0) {
std::cout << "executor task: could not set scheduler: " << result << ": " int const result = sched_setattr(0, &rt_attr, 0);
<< strerror(errno) << std::endl; if (result != 0) {
} std::cout << "executor task: could not set scheduler: " << result << ": "
<< strerror(errno) << std::endl;
}
// set process name // set process name
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0); prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
exec_fun(should_do_task);
} }
int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name, int ros_experiment(
ExperimentConfig config) { rclcpp::Node::SharedPtr const node,
std::string const& file_name,
ExperimentConfig config) {
std::string executor_type; std::string executor_type;
node->get_parameter("executor_type", executor_type); node->get_parameter("executor_type", executor_type);
int num_subscribers; int num_subscribers;
node->get_parameter("num_subscribers", num_subscribers); node->get_parameter("num_subscribers", num_subscribers);
int num_chains; int num_chains;
node->get_parameter("num_chains", num_chains); node->get_parameter("num_chains", num_chains);
std::cout << "using executor type: " << executor_type << std::endl; std::cout << "using executor type: " << executor_type << std::endl;
// TODO: move experiment configuration to main // TODO: move experiment configuration to main
config.executor_type = executor_type; config.executor_type = executor_type;
config.nodes.push_back(node); config.nodes.push_back(node);
Experiment experiment(config); Experiment experiment(config);
// std::string result_log = experiment.run(should_do_task); // std::string result_log = experiment.run(should_do_task);
std::vector<std::function<void(std::atomic<bool> &)>> exec_funs = auto const exec_funs = experiment.getRunFunctions();
experiment.getRunFunctions(); std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::cout << "got " << exec_funs.size() << " executors" << std::endl; std::vector<std::thread> exec_threads;
std::vector<std::thread> exec_threads; int i = 0;
int i = 0; experiment.resetTimers();
experiment.resetTimers();
for (auto exec_fun : exec_funs) { for (auto const& exec_fun : exec_funs) {
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i)); exec_threads.emplace_back(run_one_executor, exec_fun, i++);
i++; }
}
for (auto &t : exec_threads) { for (auto& t : exec_threads) {
t.join(); t.join();
} }
std::string outputname = "2024ours_executor2executor"; std::string outputname = "2024ours_executor2executor_" +
outputname += std::to_string(num_subscribers) + "_" +
"_" + std::to_string(num_subscribers) + "_" + std::to_string(num_chains); std::to_string(num_chains);
experiment.writeLogsToFile(file_name, outputname); experiment.writeLogsToFile(file_name, outputname);
return 0; return 0;
} }
int main(int argc, char *argv[]) { int main(int argc, char* argv[]) {
rclcpp::init(argc, argv); rclcpp::init(argc, argv);
// we have to create a node to process any passed parameters // we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node"); auto const node = rclcpp::Node::make_shared("experiment_node");
// auto node = rclcpp::Node::make_shared("experiment_node", // auto node = rclcpp::Node::make_shared("experiment_node",
// rclcpp::NodeOptions().use_intra_process_comms(true)); // rclcpp::NodeOptions().use_intra_process_comms(true));
node->declare_parameter("executor_type", "edf"); node->declare_parameter("executor_type", "edf");
node->declare_parameter("num_subscribers", 3); node->declare_parameter("num_subscribers", 3);
node->declare_parameter("num_chains", 1); node->declare_parameter("num_chains", 1);
node->declare_parameter("runtime", 0); node->declare_parameter("runtime", 0);
int num_s; int num_s;
node->get_parameter("num_subscribers", num_s); node->get_parameter("num_subscribers", num_s);
int runtime; int runtime;
node->get_parameter("runtime", runtime); node->get_parameter("runtime", runtime);
int num_chains; int num_chains;
node->get_parameter("num_chains", num_chains); node->get_parameter("num_chains", num_chains);
// set ourselves to the second highest priority // set ourselves to the second highest priority
// struct sched_param param; // struct sched_param param;
// param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2; // param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
// int result = sched_setscheduler(0, SCHED_FIFO, &param); // int result = sched_setscheduler(0, SCHED_FIFO, &param);
sched_attr rt_attr; sched_attr rt_attr;
rt_attr.size = sizeof(rt_attr); rt_attr.size = sizeof(rt_attr);
rt_attr.sched_policy = SCHED_FIFO; rt_attr.sched_policy = SCHED_FIFO;
// rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2; // rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
rt_attr.sched_priority = 99; rt_attr.sched_priority = 99;
int result = sched_setattr(0, &rt_attr, 0); int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) { if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": "
<< strerror(errno) << std::endl; << strerror(errno) << std::endl;
}
// calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
// runtimes and timers subject to change
config.chain_lengths = {};
config.node_ids = {};
config.chain_timer_control = {};
config.chain_periods = {};
config.node_runtimes = {};
config.node_executor_assignments = {};
int node_id = 0;
for (int c = 0; c < num_chains; c++) {
config.node_ids.push_back({});
for (int i = 0; i < num_s + 1; i++) {
config.node_ids[c].push_back(node_id);
if (i == 0) {
// first node is the publisher, and goes in the first executor
config.node_executor_assignments.push_back(0);
} else {
// all other nodes go in the second executor
config.node_executor_assignments.push_back(1);
}
node_id++;
} }
for (int i = 0; i < num_s + 1; i++) {
config.node_priorities.push_back(i); // calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
// runtimes and timers subject to change
config.chain_lengths = {};
config.node_ids = {};
config.chain_timer_control = {};
config.chain_periods = {};
config.node_runtimes = {};
config.node_executor_assignments = {};
int node_id = 0;
for (int c = 0; c < num_chains; c++) {
config.node_ids.push_back({});
for (int i = 0; i < num_s + 1; i++) {
config.node_ids[c].push_back(node_id);
if (i == 0) {
// first node is the publisher, and goes in the first executor
config.node_executor_assignments.push_back(0);
} else {
// all other nodes go in the second executor
config.node_executor_assignments.push_back(1);
}
node_id++;
}
for (int i = 0; i < num_s + 1; i++) {
config.node_priorities.push_back(i);
config.node_runtimes.push_back(runtime);
}
config.chain_lengths.push_back(num_s+1);
config.chain_periods.push_back(10);
config.chain_timer_control.push_back(c);
} }
config.chain_lengths.push_back(num_s+1);
config.chain_periods.push_back(10); config.executor_to_cpu_assignments = {0, 1};
for (int i = 0; i < num_s + 1; i++) {
config.node_runtimes.push_back(runtime); std::cout << "node ids: " << std::endl;
for (size_t i = 0; i < config.node_ids.size(); i++) {
for (size_t j = 0; j < config.node_ids[i].size(); j++) {
std::cout << config.node_ids[i][j] << " ";
}
std::cout << std::endl;
} }
config.chain_timer_control.push_back(c);
}
config.executor_to_cpu_assignments = {0, 1};
std::cout << "node ids: " << std::endl;
for (size_t i = 0; i < config.node_ids.size(); i++) {
for (size_t j = 0; j < config.node_ids[i].size(); j++) {
std::cout << config.node_ids[i][j] << " ";
}
std::cout << std::endl;
}
config.num_groups = 0; config.num_groups = 0;
config.group_memberships = {}; // no groups config.group_memberships = {}; // no groups
config.parallel_mode = false; config.parallel_mode = false;
config.cores = 1; config.cores = 1;
config.special_cases.push_back("2024ours_latency"); config.special_cases.push_back("2024ours_latency");
// sanity_check_config(config); // sanity_check_config(config);
// from 35/65 to 100/0 in increments of 5 // from 35/65 to 100/0 in increments of 5
// for (int i = 0; i <= (100 - 35) / 5; i++) // for (int i = 0; i <= (100 - 35) / 5; i++)
// { // {
// for now, run with 100% ros // for now, run with 100% ros
int i = (100 - 35) / 5; int const i = (100 - 35) / 5;
int ros_budget = 35 + i * 5; int const ros_budget = 35 + i * 5;
int other_task_budget = 65 - i * 5; int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl; << " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread // do other task in a separate thread
should_do_task.store(true); should_do_task.store(true);
// do ros task // do ros task
std::string file_name = "ros_" + std::to_string(ros_budget); std::string const file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name); // ros_experiment(argc, argv, file_name);
std::thread ros_task(ros_experiment, node, file_name, config); std::thread ros_task(ros_experiment, node, file_name, config);
std::cout << "tasks started" << std::endl; std::cout << "tasks started" << std::endl;
ros_task.join(); ros_task.join();
std::cout << "tasks joined" << std::endl; std::cout << "tasks joined" << std::endl;
// // run once // // run once
// break; // break;
// } // }
return 0;
} }

View file

@ -7,182 +7,179 @@
// bool should_do_task = true; // bool should_do_task = true;
std::atomic<bool> should_do_task(true); std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool>&)> const& exec_fun,
int const idx) {
(void)idx;
// set this process to highest priority
// struct sched_param param;
// param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
// int result = sched_setscheduler(0, SCHED_FIFO, &param);
// if (result != 0)
// {
// std::cout << "ros_experiment: sched_setscheduler failed: " << result <<
// ": " << strerror(errno) << std::endl;
// }
sched_attr rt_attr;
rt_attr.size = sizeof(rt_attr);
rt_attr.sched_flags = 0;
rt_attr.sched_nice = 0;
rt_attr.sched_priority = 99;
rt_attr.sched_policy = SCHED_FIFO;
rt_attr.sched_runtime = 0;
rt_attr.sched_period = 0;
int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "executor task: could not set scheduler: " << result << ": "
<< strerror(errno) << std::endl;
}
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, // set process name
int idx) { prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
(void)idx;
// set this process to highest priority
// struct sched_param param;
// param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
// int result = sched_setscheduler(0, SCHED_FIFO, &param);
// if (result != 0)
// {
// std::cout << "ros_experiment: sched_setscheduler failed: " << result <<
// ": " << strerror(errno) << std::endl;
// }
sched_attr rt_attr;
rt_attr.size = sizeof(rt_attr);
rt_attr.sched_flags = 0;
rt_attr.sched_nice = 0;
rt_attr.sched_priority = 99;
rt_attr.sched_policy = SCHED_FIFO;
rt_attr.sched_runtime = 0;
rt_attr.sched_period = 0;
int result = sched_setattr(0, &rt_attr, 0);
if (result != 0) {
std::cout << "executor task: could not set scheduler: " << result << ": "
<< strerror(errno) << std::endl;
}
// set process name exec_fun(should_do_task);
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
} }
int ros_experiment(rclcpp::Node::SharedPtr node, std::string file_name, int ros_experiment(rclcpp::Node::SharedPtr const& node, std::string const& file_name,
ExperimentConfig config) { ExperimentConfig config) {
std::string executor_type;
node->get_parameter("executor_type", executor_type);
int num_subscribers;
node->get_parameter("num_subscribers", num_subscribers);
int num_chains;
node->get_parameter("num_chains", num_chains);
std::cout << "using executor type: " << executor_type << std::endl;
std::string executor_type; // TODO: move experiment configuration to main
node->get_parameter("executor_type", executor_type); config.executor_type = executor_type;
int num_subscribers; config.nodes.push_back(node);
node->get_parameter("num_subscribers", num_subscribers);
int num_chains;
node->get_parameter("num_chains", num_chains);
std::cout << "using executor type: " << executor_type << std::endl;
// TODO: move experiment configuration to main Experiment experiment(config);
config.executor_type = executor_type; // std::string result_log = experiment.run(should_do_task);
config.nodes.push_back(node); std::vector<std::function<void(std::atomic<bool>&)>> exec_funs =
experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto const& exec_fun : exec_funs) {
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
}
Experiment experiment(config); for (auto& t : exec_threads) {
// std::string result_log = experiment.run(should_do_task); t.join();
std::vector<std::function<void(std::atomic<bool> &)>> exec_funs = }
experiment.getRunFunctions();
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads;
int i = 0;
experiment.resetTimers();
for (auto exec_fun : exec_funs) {
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
}
for (auto &t : exec_threads) { std::string const outputname = "2024ours_latency_" +
t.join(); std::to_string(num_subscribers) + "_" + std::to_string(num_chains);
} experiment.writeLogsToFile(file_name, outputname);
std::string outputname = "2024ours_latency"; return 0;
outputname +=
"_" + std::to_string(num_subscribers) + "_" + std::to_string(num_chains);
experiment.writeLogsToFile(file_name, outputname);
return 0;
} }
int main(int argc, char *argv[]) { int main(int argc, char* argv[]) {
rclcpp::init(argc, argv); rclcpp::init(argc, argv);
// we have to create a node to process any passed parameters // we have to create a node to process any passed parameters
auto node = rclcpp::Node::make_shared("experiment_node"); auto const node = rclcpp::Node::make_shared("experiment_node");
// auto node = rclcpp::Node::make_shared("experiment_node", // auto node = rclcpp::Node::make_shared("experiment_node",
// rclcpp::NodeOptions().use_intra_process_comms(true)); // rclcpp::NodeOptions().use_intra_process_comms(true));
node->declare_parameter("executor_type", "edf"); node->declare_parameter("executor_type", "edf");
node->declare_parameter("num_subscribers", 3); node->declare_parameter("num_subscribers", 3);
node->declare_parameter("num_chains", 1); node->declare_parameter("num_chains", 1);
node->declare_parameter("runtime", 0); node->declare_parameter("runtime", 0);
int num_s; int num_s;
node->get_parameter("num_subscribers", num_s); node->get_parameter("num_subscribers", num_s);
int runtime; int runtime;
node->get_parameter("runtime", runtime); node->get_parameter("runtime", runtime);
int num_chains; int num_chains;
node->get_parameter("num_chains", num_chains); node->get_parameter("num_chains", num_chains);
// set ourselves to the second highest priority // set ourselves to the second highest priority
// struct sched_param param; // struct sched_param param;
// param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2; // param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
// int result = sched_setscheduler(0, SCHED_FIFO, &param); // int result = sched_setscheduler(0, SCHED_FIFO, &param);
sched_attr rt_attr; sched_attr rt_attr;
rt_attr.size = sizeof(rt_attr); rt_attr.size = sizeof(rt_attr);
rt_attr.sched_policy = SCHED_FIFO; rt_attr.sched_policy = SCHED_FIFO;
// rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2; // rt_attr.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
rt_attr.sched_priority = 99; rt_attr.sched_priority = 99;
int result = sched_setattr(0, &rt_attr, 0); int const result = sched_setattr(0, &rt_attr, 0);
if (result != 0) { if (result != 0) {
std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": " std::cout << "ros_experiment: sched_setscheduler failed: " << result << ": "
<< strerror(errno) << std::endl; << strerror(errno) << std::endl;
}
// calibrate the dummy load for the current system
calibrate_dummy_load();
ExperimentConfig config;
// runtimes and timers subject to change
config.chain_lengths = {};
config.node_ids = {};
config.chain_timer_control = {};
config.chain_periods = {};
config.node_runtimes = {};
int node_id = 0;
for (int c = 0; c < num_chains; c++) {
config.node_ids.push_back({});
for (int i = 0; i < num_s + 1; i++) {
config.node_ids[c].push_back(node_id);
node_id++;
} }
for (int i = 0; i < num_s + 1; i++) { // calibrate the dummy load for the current system
config.node_priorities.push_back(i); calibrate_dummy_load();
ExperimentConfig config;
// runtimes and timers subject to change
config.chain_lengths = {};
config.node_ids = {};
config.chain_timer_control = {};
config.chain_periods = {};
config.node_runtimes = {};
int node_id = 0;
for (int c = 0; c < num_chains; c++) {
config.node_ids.push_back({});
for (int i = 0; i < num_s + 1; i++) {
config.node_ids[c].push_back(node_id);
node_id++;
}
for (int i = 0; i < num_s + 1; i++) {
config.node_priorities.push_back(i);
}
config.chain_lengths.push_back(num_s + 1);
config.chain_periods.push_back(10);
for (int i = 0; i < num_s + 1; i++) {
config.node_runtimes.push_back(runtime);
}
config.chain_timer_control.push_back(c);
} }
config.chain_lengths.push_back(num_s+1);
config.chain_periods.push_back(10); std::cout << "node ids: " << std::endl;
for (int i = 0; i < num_s + 1; i++) { for (size_t i = 0; i < config.node_ids.size(); i++) {
config.node_runtimes.push_back(runtime); for (size_t j = 0; j < config.node_ids[i].size(); j++) {
std::cout << config.node_ids[i][j] << " ";
}
std::cout << std::endl;
} }
config.chain_timer_control.push_back(c);
}
std::cout << "node ids: " << std::endl; config.num_groups = 0;
for (size_t i = 0; i < config.node_ids.size(); i++) { config.group_memberships = {}; // no groups
for (size_t j = 0; j < config.node_ids[i].size(); j++) {
std::cout << config.node_ids[i][j] << " ";
}
std::cout << std::endl;
}
config.node_executor_assignments = {};
// config.node_executor_assignments = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
config.parallel_mode = false;
config.cores = 1;
config.special_cases.push_back("2024ours_latency");
config.num_groups = 0; // sanity_check_config(config);
config.group_memberships = {}; // no groups
config.node_executor_assignments = {}; // from 35/65 to 100/0 in increments of 5
// config.node_executor_assignments = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // for (int i = 0; i <= (100 - 35) / 5; i++)
config.parallel_mode = false; // {
config.cores = 1; // for now, run with 100% ros
config.special_cases.push_back("2024ours_latency"); int const i = (100 - 35) / 5;
int const ros_budget = 35 + i * 5;
int const other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread
should_do_task.store(true);
// sanity_check_config(config); // do ros task
std::string const file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name);
std::thread ros_task(ros_experiment, node, file_name, config);
std::cout << "tasks started" << std::endl;
ros_task.join();
std::cout << "tasks joined" << std::endl;
// from 35/65 to 100/0 in increments of 5 // // run once
// for (int i = 0; i <= (100 - 35) / 5; i++) // break;
// { // }
// for now, run with 100% ros return 0;
int i = (100 - 35) / 5;
int ros_budget = 35 + i * 5;
int other_task_budget = 65 - i * 5;
std::cout << "ROS budget: " << ros_budget
<< " Other task budget: " << other_task_budget << std::endl;
// do other task in a separate thread
should_do_task.store(true);
// do ros task
std::string file_name = "ros_" + std::to_string(ros_budget);
// ros_experiment(argc, argv, file_name);
std::thread ros_task(ros_experiment, node, file_name, config);
std::cout << "tasks started" << std::endl;
ros_task.join();
std::cout << "tasks joined" << std::endl;
// // run once
// break;
// }
} }

View file

@ -3,13 +3,10 @@
#include <string> #include <string>
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <unistd.h> #include <unistd.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <rclcpp/rclcpp.hpp> #include <rclcpp/rclcpp.hpp>
#include <std_msgs/msg/string.hpp> #include <std_msgs/msg/string.hpp>
#include "casestudy_tools/test_nodes.hpp" #include "casestudy_tools/test_nodes.hpp"
#include "casestudy_tools/experiment.hpp" #include "casestudy_tools/experiment.hpp"
#include "casestudy_tools/primes_workload.hpp" #include "casestudy_tools/primes_workload.hpp"
@ -19,116 +16,119 @@
// bool should_do_task = true; // bool should_do_task = true;
std::atomic<bool> should_do_task(true); std::atomic<bool> should_do_task(true);
void run_one_executor(std::function<void(std::atomic<bool> &)> exec_fun, int idx) void run_one_executor(
{ std::function<void(std::atomic<bool>&)> const& exec_fun,
(void)idx; int const idx) {
// cpu_set_t cpuset; (void)idx;
// CPU_ZERO(&cpuset); // cpu_set_t cpuset;
// CPU_SET(0, &cpuset); // CPU_ZERO(&cpuset);
// CPU_SET(1, &cpuset); // CPU_SET(0, &cpuset);
// CPU_SET(2, &cpuset); // CPU_SET(1, &cpuset);
// CPU_SET(3, &cpuset); // CPU_SET(2, &cpuset);
// CPU_SET(3, &cpuset);
// int result = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); // int result = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
// if (result != 0) // if (result != 0)
// { // {
// std::cout << "ros_experiment: sched_setaffinity failed: " << result << ": " << strerror(errno) << std::endl; // std::cout << "ros_experiment: sched_setaffinity failed: " << result << ": " << strerror(errno) << std::endl;
// } // }
// set this process to second highest priority // set this process to second highest priority
struct sched_param param; struct sched_param param;
param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2; param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 2;
int result = sched_setscheduler(0, SCHED_FIFO, &param); int const result = sched_setscheduler(0, SCHED_FIFO, &param);
if (result != 0)
{ if (result != 0) {
std::cerr << "ros_experiment: sched_setscheduler failed: " << result << ": " << strerror(errno) << std::endl; std::cerr << "ros_experiment: sched_setscheduler failed: " << result
std::cerr << "This operation requires root privileges. Please run the program with sufficient permissions." << std::endl; << ": " << strerror(errno) << std::endl;
exit(EXIT_FAILURE); std::cerr << "This operation requires root privileges. Please run the program with sufficient permissions."
} << std::endl;
// Set the process name to "ros_experiment" for easier identification during debugging or system monitoring. exit(EXIT_FAILURE);
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0); }
exec_fun(should_do_task); // Set the process name to "ros_experiment" for easier identification during debugging or system monitoring.
prctl(PR_SET_NAME, "ros_experiment", 0, 0, 0);
exec_fun(should_do_task);
} }
int ros_experiment(int argc, char **arg, std::string file_name, ExperimentConfig config) int ros_experiment(
{ int const argc,
char** const argv,
std::string const& file_name,
ExperimentConfig config) {
rclcpp::init(argc, argv);
// we have to create a node to process any passed parameters
auto const node = rclcpp::Node::make_shared("experiment_parameters");
node->declare_parameter("executor_type", "edf");
rclcpp::init(argc, arg); std::string executor_type;
// we have to create a node to process any passed parameters node->get_parameter("executor_type", executor_type);
auto node = rclcpp::Node::make_shared("experiment_parameters"); std::cout << "using executor type: " << executor_type << std::endl;
node->declare_parameter("executor_type", "edf");
std::string executor_type; config.executor_type = executor_type;
node->get_parameter("executor_type", executor_type); // Enable parallel mode for the experiment configuration.
std::cout << "using executor type: " << executor_type << std::endl; // When set to true, this allows multiple executors to run concurrently,
// leveraging multi-threading to improve performance on multi-core systems.
config.parallel_mode = true;
config.nodes.push_back(node);
// TODO: move experiment configuration to main Experiment experiment(config);
config.executor_type = executor_type; // std::string result_log = experiment.run(should_do_task);
// Enable parallel mode for the experiment configuration. auto const exec_funs = experiment.getRunFunctions();
// When set to true, this allows multiple executors to run concurrently, std::cout << "got " << exec_funs.size() << " executors" << std::endl;
// leveraging multi-threading to improve performance on multi-core systems.
config.parallel_mode = true; std::vector<std::thread> exec_threads;
config.nodes.push_back(node); int i = 0;
experiment.resetTimers();
for (auto const& exec_fun : exec_funs) {
exec_threads.emplace_back(run_one_executor, exec_fun, i++);
}
Experiment experiment(config); for (auto& t : exec_threads) {
// std::string result_log = experiment.run(should_do_task); t.join();
std::vector<std::function<void(std::atomic<bool> &)> > exec_funs = experiment.getRunFunctions(); }
std::cout << "got " << exec_funs.size() << " executors" << std::endl;
std::vector<std::thread> exec_threads; std::string const outputname = "casestudy_example";
int i = 0; experiment.writeLogsToFile(file_name, outputname);
experiment.resetTimers();
for (auto exec_fun : exec_funs)
{
exec_threads.push_back(std::thread(run_one_executor, exec_fun, i));
i++;
}
for (auto &t : exec_threads) return 0;
{
t.join();
}
std::string outputname = "casestudy_example";
experiment.writeLogsToFile(file_name, outputname);
return 0;
} }
int main(int argc, char *argv[]) int main(int argc, char* argv[]) {
{ // calibrate the dummy load for the current system
// calibrate the dummy load for the current system calibrate_dummy_load();
calibrate_dummy_load();
ExperimentConfig config; ExperimentConfig config;
config.chain_lengths = {2, 2}; config.chain_lengths = {2, 2};
config.node_ids = {{0, 1}, {2, 3}}; config.node_ids = {{0, 1}, {2, 3}};
config.node_priorities = {1, 0, 3, 2}; config.node_priorities = {1, 0, 3, 2};
config.chain_timer_control = {0, 1}; config.chain_timer_control = {0, 1};
config.node_runtimes = {10, 10, 10, 10}; config.node_runtimes = {10, 10, 10, 10};
// node 0 has a period of 80, and is the only timer // node 0 has a period of 80, and is the only timer
config.chain_periods = {100, 100}; config.chain_periods = {100, 100};
config.node_executor_assignments = {}; config.node_executor_assignments = {};
config.parallel_mode = true; config.parallel_mode = true;
config.cores = 2; config.cores = 2;
sanity_check_config(config); sanity_check_config(config);
std::thread ros_task([&]() { std::thread ros_task([&]() {
try try {
{ ros_experiment(argc, argv, "cs_example", config);
ros_experiment(argc, argv, "cs_example", config); } catch (std::exception const& e) {
} std::cerr << "Exception in ros_experiment: " << e.what() << std::endl;
catch (const std::exception &e) } catch (...) {
{ std::cerr << "Unknown exception in ros_experiment." << std::endl;
std::cerr << "Exception in ros_experiment: " << e.what() << std::endl; }
} });
catch (...)
{ std::cout << "tasks started" << std::endl;
std::cerr << "Unknown exception in ros_experiment." << std::endl; ros_task.join();
} std::cout << "tasks joined" << std::endl;
});
std::cout << "tasks started" << std::endl; return 0;
ros_task.join();
std::cout << "tasks joined" << std::endl;
} }

View file

@ -1,38 +1,26 @@
cmake_minimum_required(VERSION 3.5) cmake_minimum_required(VERSION 3.8)
project(casestudy_tools) project(casestudy_tools VERSION 0.1.0)
# Default to C99 # Set C++ standards
if(NOT CMAKE_C_STANDARD) set(CMAKE_CXX_STANDARD 17)
set(CMAKE_C_STANDARD 99) set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif() set(CMAKE_CXX_EXTENSIONS OFF)
# Default to C++14
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
endif()
# Compiler options
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic) add_compile_options(-Wall -Wextra -Wpedantic)
endif() endif()
# find dependencies # Find dependencies
find_package(ament_cmake REQUIRED) find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED) find_package(rclcpp REQUIRED)
find_package(priority_executor REQUIRED) find_package(priority_executor REQUIRED)
find_package(simple_timer REQUIRED) find_package(simple_timer REQUIRED)
if(BUILD_TESTING) # Library targets
find_package(ament_lint_auto REQUIRED) add_library(primes_workload
# the following line skips the linter which checks for copyrights src/primes_workload.cpp
# uncomment the line when a copyright and license is not present in all source files )
#set(ament_cmake_copyright_FOUND TRUE)
# the following line skips cpplint (only works in a git repo)
# uncomment the line when this package is not in a git repo
#set(ament_cmake_cpplint_FOUND TRUE)
ament_lint_auto_find_test_dependencies()
endif()
add_library(primes_workload src/primes_workload.cpp)
target_include_directories(primes_workload PUBLIC target_include_directories(primes_workload PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include> $<INSTALL_INTERFACE:include>
@ -41,7 +29,6 @@ ament_target_dependencies(primes_workload
simple_timer simple_timer
rclcpp rclcpp
priority_executor priority_executor
) )
add_library(test_nodes src/test_nodes.cpp) add_library(test_nodes src/test_nodes.cpp)
@ -54,8 +41,9 @@ ament_target_dependencies(test_nodes
rclcpp rclcpp
simple_timer simple_timer
) )
add_library(experiment_host
add_library(experiment_host src/experiment.cpp) src/experiment.cpp
)
target_include_directories(experiment_host PUBLIC target_include_directories(experiment_host PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:include> $<INSTALL_INTERFACE:include>
@ -70,7 +58,13 @@ ament_target_dependencies(experiment_host
priority_executor priority_executor
) )
# Testing
if(BUILD_TESTING)
find_package(ament_lint_auto REQUIRED)
ament_lint_auto_find_test_dependencies()
endif()
# Installation
install( install(
DIRECTORY include/ DIRECTORY include/
DESTINATION include DESTINATION include
@ -85,6 +79,7 @@ install(
INCLUDES DESTINATION include INCLUDES DESTINATION include
) )
# Export and package configuration
ament_export_include_directories(include) ament_export_include_directories(include)
# ament_export_libraries(primes_workload test_nodes experiment_host) # ament_export_libraries(primes_workload test_nodes experiment_host)
ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET)

View file

@ -8,9 +8,7 @@
#include "casestudy_tools/test_nodes.hpp" #include "casestudy_tools/test_nodes.hpp"
#include "simple_timer/rt-sched.hpp" #include "simple_timer/rt-sched.hpp"
struct ExperimentConfig {
struct ExperimentConfig
{
std::vector<uint32_t> chain_lengths; std::vector<uint32_t> chain_lengths;
// TODO: rename to callback_ids // TODO: rename to callback_ids
std::vector<std::vector<uint32_t>> node_ids; std::vector<std::vector<uint32_t>> node_ids;
@ -38,103 +36,101 @@ struct ExperimentConfig
bool parallel_mode = false; bool parallel_mode = false;
size_t cores = 2; size_t cores = 2;
// rclcpp::Node::SharedPtr node; // nodes to host the callbacks. In parallel (one executor using multiple threads) mode,
// nodes to host the callbacks. In parallel (one executor using multiple threads) mode, there is one node and one executor. The node is provided by the experiment provider // there is one node and one executor. The node is provided by the experiment provider.
// in singlethread (each executor uses one thread), there is one node per executor. Additional nodes may be created by the experiment host // In singlethread (each executor uses one thread), there is one node per executor.
// Additional nodes may be created by the experiment host.
std::vector<rclcpp::Node::SharedPtr> nodes; std::vector<rclcpp::Node::SharedPtr> nodes;
std::vector<std::string> special_cases = {}; std::vector<std::string> special_cases;
}; };
struct experiment_executor struct experiment_executor {
{ std::shared_ptr<priority_executor::TimedExecutor> executor;
std::shared_ptr<timed_executor::TimedExecutor> executor; std::shared_ptr<priority_executor::PriorityMemoryStrategy<>> strat;
std::shared_ptr<PriorityMemoryStrategy<>> strat; std::shared_ptr<rclcpp::Executor> default_executor;
std::shared_ptr<rclcpp::Executor> default_executor;
}; };
class Experiment class Experiment {
{
public: public:
Experiment(ExperimentConfig config); explicit Experiment(ExperimentConfig config);
/** /**
* @brief Run the experiment * @brief Run the experiment in the current thread
* runs in the current thread * @param should_do_task a flag that can be set to false to stop the experiment.
* @param should_do_task a flag that can be set to false to stop the experiment. The flag will be set to false when the experiment is complete. * The flag will be set to false when the experiment is complete.
*/ * @return std::string containing experiment results
std::string run(std::atomic<bool> &should_do_task); */
std::string run(std::atomic<bool>& should_do_task);
/** /**
* @brief Get the functions required to run the experiment * @brief Get the functions required to run the experiment
* returns a vector of functions. Each function will run a different executor. It's up to the caller to run these functions in different threads. * @return vector of functions. Each function will run a different executor.
* The caller may set the thread properties (e.g. priority) before running the function. * It's up to the caller to run these functions in different threads.
* Each function accepts a flag that can be set to false to stop the experiment. The flag will be set to false when the experiment is complete. * The caller may set the thread properties (e.g. priority) before running the function.
*/ * Each function accepts a flag that can be set to false to stop the experiment.
std::vector<std::function<void(std::atomic<bool> &)>> getRunFunctions(); * The flag will be set to false when the experiment is complete.
*/
std::vector<std::function<void(std::atomic<bool>&)>> getRunFunctions();
static size_t numExecutorsRequired(const ExperimentConfig &config); static size_t numExecutorsRequired(ExperimentConfig const &config);
/** /**
* get the executor (and strategy) for a given executor index (0 by default) * @brief Get the executor (and strategy) for a given executor index
*/ * @param executor_idx The index of the executor (defaults to 0)
* @return experiment_executor
*/
experiment_executor getExecutor(int executor_idx = 0); experiment_executor getExecutor(int executor_idx = 0);
node_time_logger getInternalLogger(); node_time_logger getInternalLogger();
/** /**
* collect logs from the executors. Also add any logs from passed in loggers * @brief Collect logs from the executors and optionally add logs from passed in loggers
* @param optional array of loggers to add to the logs * @param loggers Optional array of loggers to add to the logs
*/ * @return std::string containing the combined logs
*/
std::string buildLogs(std::vector<node_time_logger> loggers = {}); std::string buildLogs(std::vector<node_time_logger> loggers = {});
void resetTimers(); void resetTimers();
void writeLogsToFile(std::string const &filename, std::string const &experiment_name);
void writeLogsToFile(std::string filename, std::string experiment_name);
private: private:
ExperimentConfig config; ExperimentConfig config;
std::vector<experiment_executor> executors; std::vector<experiment_executor> executors;
// values to keep track of runtime values // Chain-indexed vector of deques of deadlines
std::vector<std::deque<uint64_t>*> chain_deadlines_deque;
// chain-indexed vector of deques of deadlines // Maps node_id to node
std::vector<std::deque<uint64_t> *> chain_deadlines_deque;
// maps node_id to node
std::map<uint32_t, std::shared_ptr<CaseStudyNode>> nodes; std::map<uint32_t, std::shared_ptr<CaseStudyNode>> nodes;
// maps chain_id to timer // Maps chain_id to timer
std::map<uint32_t, std::shared_ptr<rclcpp::TimerBase>> chain_timer_handles; std::map<uint32_t, std::shared_ptr<rclcpp::TimerBase>> chain_timer_handles;
/** /**
* @brief Create the executors using the type and amount set in the config * @brief Create the executors using the type and amount set in the config
*
*/ */
void createExecutors(); void createExecutors();
/** /**
* @brief Create a single executor using the type set in the config * @brief Create a single executor using the type set in the config
* * @param executor_num The executor number to create
* @return experiment_executor * @return experiment_executor
*/ */
experiment_executor createSingleExecutor(uint executor_num = 0); experiment_executor createSingleExecutor(uint executor_num = 0);
/** /**
* @brief Get the executor for a given node id * @brief Get the executor for a given node id
* @param node_id if the node_executor_assignments is empty, then this parameter is ignored, and the first executor is returned * @param node_id If node_executor_assignments is empty, this parameter is ignored
*/ * and the first executor is returned
experiment_executor getExecutorForNode(int node_id=0); * @return experiment_executor
*/
experiment_executor getExecutorForNode(int node_id = 0);
void createNodesAndAssignProperties(); void createNodesAndAssignProperties();
void setInitialDeadlines(); void setInitialDeadlines();
node_time_logger _logger; node_time_logger _logger;
}; };
int do_other_task(double work_time, uint period, std::atomic<bool> &should_do_task); int do_other_task(double work_time, uint period, std::atomic<bool>& should_do_task);
void sanity_check_config(ExperimentConfig config); void sanity_check_config(ExperimentConfig const &config);
#endif // RTIS_EXPERIMENT_HOST #endif // RTIS_EXPERIMENT_HOST

View file

@ -22,7 +22,7 @@ namespace DummyLoadCalibration
{ {
inline std::atomic<int> calibration{1}; inline std::atomic<int> calibration{1};
inline void setCalibration(int calib); inline void setCalibration(int const calib);
inline int getCalibration(); inline int getCalibration();
} }
@ -39,6 +39,6 @@ void calibrate_dummy_load();
* This function performs a busy wait for a specified duration. * This function performs a busy wait for a specified duration.
* It is used to calibrate the dummy load for the current system. * It is used to calibrate the dummy load for the current system.
*/ */
void dummy_load(int load_ms); void dummy_load(int const load_ms);
#endif #endif

View file

@ -1,61 +1,66 @@
#ifndef RTIS_TEST_NODES #ifndef RTIS_TEST_NODES
#define RTIS_TEST_NODES #define RTIS_TEST_NODES
#include <string> #include <string>
#include "rclcpp/rclcpp.hpp" #include "rclcpp/rclcpp.hpp"
#include "std_msgs/msg/string.hpp" #include "std_msgs/msg/string.hpp"
#include "std_msgs/msg/int32.hpp" #include "std_msgs/msg/int32.hpp"
#include "simple_timer/rt-sched.hpp" #include "simple_timer/rt-sched.hpp"
class CaseStudyNode class CaseStudyNode {
{ public:
public: node_time_logger logger; node_time_logger logger;
// inherit constructor // inherit constructor
CaseStudyNode(std::string publish_topic, rclcpp::Node::SharedPtr node) CaseStudyNode(std::string const &publish_topic, rclcpp::Node::SharedPtr const &node) {
{ this->name = publish_topic;
this->name = publish_topic; this->node = node;
this->node = node; // seed random number generator
// seed random number generator srand(time(nullptr));
srand(time(NULL)); }
}
bool use_random_runtime = false; bool use_random_runtime = false;
double normal_runtime_scale = 0.8; double normal_runtime_scale = 0.8;
double over_runtime_scale = 1.2; double over_runtime_scale = 1.2;
double runtime_over_chance = 0.1; double runtime_over_chance = 0.1;
std::string get_name(); std::string get_name();
rclcpp::CallbackGroup::SharedPtr callback_group_; rclcpp::CallbackGroup::SharedPtr callback_group_;
private: private:
rclcpp::Node::SharedPtr node; rclcpp::Node::SharedPtr node;
std::string name; std::string name;
}; };
#define COUNT_MAX 500 #define COUNT_MAX 500
class PublisherNode : public CaseStudyNode{ class PublisherNode : public CaseStudyNode {
public: public:
PublisherNode(std::string publish_topic, double runtime, int period, int chain, rclcpp::Node::SharedPtr node, rclcpp::CallbackGroup::SharedPtr callback_group = nullptr); PublisherNode(std::string const &publish_topic, double runtime, int period, int chain,
rclcpp::TimerBase::SharedPtr timer_; rclcpp::Node::SharedPtr const &node,
int count_max_ = COUNT_MAX; rclcpp::CallbackGroup::SharedPtr const &callback_group = nullptr);
rclcpp::TimerBase::SharedPtr timer_;
int count_max_ = COUNT_MAX;
private: private:
rclcpp::Publisher<std_msgs::msg::Int32>::SharedPtr pub_; rclcpp::Publisher<std_msgs::msg::Int32>::SharedPtr pub_;
double runtime_; double runtime_;
int period_; int period_;
int chain_; int chain_;
}; };
class WorkerNode : public CaseStudyNode{ class WorkerNode : public CaseStudyNode {
public: public:
WorkerNode(std::string subscribe_topic, std::string publish_topic, double runtime, int period, int chain, rclcpp::Node::SharedPtr node, rclcpp::CallbackGroup::SharedPtr callback_group = nullptr); WorkerNode(std::string const &subscribe_topic, std::string const &publish_topic,
rclcpp::Subscription<std_msgs::msg::Int32>::SharedPtr sub_; double runtime, int period, int chain,
int count_max_ = COUNT_MAX; rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group = nullptr);
rclcpp::Subscription<std_msgs::msg::Int32>::SharedPtr sub_;
int count_max_ = COUNT_MAX;
private: private:
double runtime_; double runtime_;
int period_; int period_;
int chain_; int chain_;
rclcpp::Publisher<std_msgs::msg::Int32>::SharedPtr pub_; rclcpp::Publisher<std_msgs::msg::Int32>::SharedPtr pub_;
}; };
#endif // RTIS_TEST_NODES #endif // RTIS_TEST_NODES

View file

@ -2,10 +2,12 @@
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?> <?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3"> <package format="3">
<name>casestudy_tools</name> <name>casestudy_tools</name>
<version>0.0.0</version> <version>0.1.0</version>
<description>TODO: Package description</description> <description>
<maintainer email="kurt4wilson@gmail.com">vboxuser</maintainer> ROS 2 package providing utility tools and common functionality for dynamic executor case studies
<license>TODO: License declaration</license> </description>
<maintainer email="kurt4wilson@gmail.com">kurt</maintainer>
<license>Apache License 2.0</license>
<buildtool_depend>ament_cmake</buildtool_depend> <buildtool_depend>ament_cmake</buildtool_depend>

File diff suppressed because it is too large Load diff

View file

@ -6,7 +6,7 @@
#include "casestudy_tools/primes_workload.hpp" #include "casestudy_tools/primes_workload.hpp"
void DummyLoadCalibration::setCalibration(int calib) { void DummyLoadCalibration::setCalibration(int const calib) {
calibration.store(calib, std::memory_order_relaxed); calibration.store(calib, std::memory_order_relaxed);
} }
@ -20,12 +20,12 @@ void calibrate_dummy_load()
for (int iteration = 0; iteration < MAX_ITERATIONS; ++iteration) for (int iteration = 0; iteration < MAX_ITERATIONS; ++iteration)
{ {
// Measure the time taken for the dummy load // Measure the time taken for the dummy load
auto start_time = std::chrono::high_resolution_clock::now(); auto const start_time = std::chrono::high_resolution_clock::now();
dummy_load(TARGET_DURATION_MS); // intended workload dummy_load(TARGET_DURATION_MS); // intended workload
auto end_time = std::chrono::high_resolution_clock::now(); auto const end_time = std::chrono::high_resolution_clock::now();
// Calculate duration in microseconds // Calculate duration in microseconds
auto duration_us = auto const duration_us =
std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time).count(); std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time).count();
std::cout << "[Calibration] Iteration: " << iteration std::cout << "[Calibration] Iteration: " << iteration
@ -51,12 +51,12 @@ void calibrate_dummy_load()
} }
} }
void dummy_load(int load_ms) void dummy_load(int const load_ms)
{ {
const int calib_factor = DummyLoadCalibration::getCalibration(); int const calib_factor = DummyLoadCalibration::getCalibration();
// calibration factor corresponds to 100ms load, scale it // calibration factor corresponds to 100ms load, scale it
const int scaled_factor = calib_factor * load_ms / 100; int const scaled_factor = calib_factor * load_ms / 100;
volatile int dummy_var = 0; volatile int dummy_var = 0;
constexpr int DUMMY_LOAD_ITER = 100'000; constexpr int DUMMY_LOAD_ITER = 100'000;

View file

@ -6,169 +6,174 @@
#include <ctime> #include <ctime>
#include <rclcpp/logger.hpp> #include <rclcpp/logger.hpp>
std::string CaseStudyNode::get_name() { return name; } std::string CaseStudyNode::get_name() {
return name;
}
#define BUFFER_LENGTH 0 #define BUFFER_LENGTH 0
rclcpp::QoS get_qos() rclcpp::QoS get_qos() {
{ if (BUFFER_LENGTH == 0) {
if (BUFFER_LENGTH == 0) return rclcpp::QoS(rclcpp::KeepAll());
{ }
return rclcpp::QoS(rclcpp::KeepAll());
}
else
{
return rclcpp::QoS(rclcpp::KeepLast(BUFFER_LENGTH)); return rclcpp::QoS(rclcpp::KeepLast(BUFFER_LENGTH));
}
} }
void timespec_diff(timespec *start, timespec *end, timespec *result) void timespec_diff(timespec const* start, timespec const* end, timespec* result) {
{ if ((end->tv_nsec - start->tv_nsec) < 0) {
if ((end->tv_nsec - start->tv_nsec) < 0) result->tv_sec = end->tv_sec - start->tv_sec - 1;
{ result->tv_nsec = 1000000000 + end->tv_nsec - start->tv_nsec;
result->tv_sec = end->tv_sec - start->tv_sec - 1; } else {
result->tv_nsec = 1000000000 + end->tv_nsec - start->tv_nsec; result->tv_sec = end->tv_sec - start->tv_sec;
} result->tv_nsec = end->tv_nsec - start->tv_nsec;
else
{
result->tv_sec = end->tv_sec - start->tv_sec;
result->tv_nsec = end->tv_nsec - start->tv_nsec;
}
}
PublisherNode::PublisherNode(std::string publish_topic, double runtime,
int period, int chain,
rclcpp::Node::SharedPtr node,
rclcpp::CallbackGroup::SharedPtr callback_group)
: CaseStudyNode(publish_topic, node), runtime_(runtime), period_(period),
chain_(chain)
{
logger = create_logger();
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos());
auto timer_callback = [this]() -> void
{
std::ostringstream ss;
ss << "{\"operation\": \"start_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_;
std::chrono::nanoseconds time_until_trigger = timer_->time_until_trigger();
std::chrono::microseconds time_until_trigger_us =
std::chrono::duration_cast<std::chrono::microseconds>(
time_until_trigger);
ss << ", \"next_release_us\": " << time_until_trigger_us.count() << "}";
log_entry(logger, ss.str());
// nth_prime_silly(runtime_);
double used_runtime = runtime_;
if (use_random_runtime)
{
if (rand() % 100 < runtime_over_chance * 100)
{
used_runtime *= over_runtime_scale;
}
else
{
used_runtime *= normal_runtime_scale;
}
} }
// RCLCPP_INFO(rclcpp::get_logger(get_name()), "running %s with runtime %f",
// get_name().c_str(), used_runtime);
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
dummy_load(used_runtime);
std_msgs::msg::Int32 msg;
msg.data = count_max_;
// RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", msg.data.c_str());
pub_->publish(msg);
// TEST: can we take more than one msg from DDS?
// pub_->publish(msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_ << ", \"target_runtime\": " << runtime_
<< "}";
log_entry(logger, ss.str());
count_max_--;
if (count_max_ == 0)
{
rclcpp::shutdown();
}
/* struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
struct timespec diff;
timespec_diff(&start_time, &end_time, &diff);
size_t diff_ns = diff.tv_sec * 1000000000 + diff.tv_nsec;
size_t diff_ms = diff_ns / 1000000;
RCLCPP_INFO(rclcpp::get_logger(get_name()), "done running %s, took %lu ms",
get_name().c_str(), diff_ms); */
};
if (callback_group != nullptr)
{
callback_group_ = callback_group;
}
else
{
callback_group_ =
node->create_callback_group(rclcpp::CallbackGroupType::Reentrant);
}
timer_ = node->create_wall_timer(std::chrono::milliseconds(period),
timer_callback, callback_group_);
} }
WorkerNode::WorkerNode(std::string subscribe_topic, std::string publish_topic, PublisherNode::PublisherNode(
double runtime, int period, int chain, std::string const &publish_topic,
rclcpp::Node::SharedPtr node, double const runtime,
rclcpp::CallbackGroup::SharedPtr callback_group) int const period,
: CaseStudyNode(publish_topic, node), runtime_(runtime), period_(period), int const chain,
chain_(chain) rclcpp::Node::SharedPtr const &node,
{ rclcpp::CallbackGroup::SharedPtr const &callback_group)
logger = create_logger(); : CaseStudyNode(publish_topic, node)
auto callback = [this](const std_msgs::msg::Int32::SharedPtr msg) -> void , runtime_(runtime)
{ , period_(period)
// prevent unused variable warning , chain_(chain) {
(void)msg;
logger = create_logger();
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos());
auto const timer_callback = [this]() -> void {
std::ostringstream ss;
ss << "{\"operation\": \"start_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_;
std::chrono::nanoseconds const time_until_trigger = timer_->time_until_trigger();
std::chrono::microseconds const time_until_trigger_us =
std::chrono::duration_cast<std::chrono::microseconds>(time_until_trigger);
ss << ", \"next_release_us\": " << time_until_trigger_us.count() << "}";
log_entry(logger, ss.str());
std::ostringstream ss; double used_runtime = runtime_;
ss << "{\"operation\": \"start_work\", \"chain\": " << chain_ if (use_random_runtime) {
<< ", \"node\": \"" << get_name() << "\", \"count\": " << msg->data if (rand() % 100 < runtime_over_chance * 100) {
<< "}"; used_runtime *= over_runtime_scale;
log_entry(logger, ss.str()); } else {
// RCLCPP_INFO(this->get_logger(), "I heard: '%s'", msg->data.c_str()); used_runtime *= normal_runtime_scale;
// nth_prime_silly(runtime_); }
double used_runtime = runtime_; }
if (use_random_runtime)
{ // RCLCPP_INFO(rclcpp::get_logger(get_name()), "running %s with runtime %f",
if (rand() % 100 < runtime_over_chance * 100) // get_name().c_str(), used_runtime);
{
used_runtime *= over_runtime_scale; struct timespec start_time;
} clock_gettime(CLOCK_MONOTONIC, &start_time);
else
{ dummy_load(used_runtime);
used_runtime *= normal_runtime_scale; std_msgs::msg::Int32 msg;
} msg.data = count_max_;
// RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", msg.data.c_str());
pub_->publish(msg);
// TEST: can we take more than one msg from DDS?
// pub_->publish(msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << count_max_
<< ", \"target_runtime\": " << runtime_ << "}";
log_entry(logger, ss.str());
count_max_--;
if (count_max_ == 0) {
rclcpp::shutdown();
}
/* struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
struct timespec diff;
timespec_diff(&start_time, &end_time, &diff);
size_t diff_ns = diff.tv_sec * 1000000000 + diff.tv_nsec;
size_t diff_ms = diff_ns / 1000000;
RCLCPP_INFO(rclcpp::get_logger(get_name()), "done running %s, took %lu ms",
get_name().c_str(), diff_ms); */
};
if (callback_group != nullptr) {
callback_group_ = callback_group;
} else {
callback_group_ = node->create_callback_group(
rclcpp::CallbackGroupType::Reentrant);
} }
dummy_load(used_runtime);
// RCLCPP_INFO(this->get_logger(), "Result: %d", result);
auto new_msg = std_msgs::msg::Int32();
new_msg.data = msg->data;
pub_->publish(new_msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << msg->data
<< "}";
log_entry(logger, ss.str());
};
rclcpp::SubscriptionOptions sub_options;
if (callback_group)
{
callback_group_ = callback_group;
}
else
{
callback_group_ =
node->create_callback_group(rclcpp::CallbackGroupType::Reentrant);
}
sub_options.callback_group = callback_group_; timer_ = node->create_wall_timer(
sub_ = node->create_subscription<std_msgs::msg::Int32>( std::chrono::milliseconds(period),
subscribe_topic, get_qos(), callback, sub_options); timer_callback,
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos()); callback_group_);
}
WorkerNode::WorkerNode(
std::string const &subscribe_topic,
std::string const &publish_topic,
double const runtime,
int const period,
int const chain,
rclcpp::Node::SharedPtr const &node,
rclcpp::CallbackGroup::SharedPtr const &callback_group)
: CaseStudyNode(publish_topic, node)
, runtime_(runtime)
, period_(period)
, chain_(chain) {
logger = create_logger();
auto const callback = [this](std_msgs::msg::Int32::SharedPtr const msg) -> void {
// prevent unused variable warning
(void)msg;
std::ostringstream ss;
ss << "{\"operation\": \"start_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << msg->data
<< "}";
log_entry(logger, ss.str());
// RCLCPP_INFO(this->get_logger(), "I heard: '%s'", msg->data.c_str());
// nth_prime_silly(runtime_);
double used_runtime = runtime_;
if (use_random_runtime) {
if (rand() % 100 < runtime_over_chance * 100) {
used_runtime *= over_runtime_scale;
} else {
used_runtime *= normal_runtime_scale;
}
}
dummy_load(used_runtime);
// RCLCPP_INFO(this->get_logger(), "Result: %d", result);
auto new_msg = std_msgs::msg::Int32();
new_msg.data = msg->data;
pub_->publish(new_msg);
ss.str("");
ss << "{\"operation\": \"end_work\", \"chain\": " << chain_
<< ", \"node\": \"" << get_name() << "\", \"count\": " << msg->data
<< "}";
log_entry(logger, ss.str());
};
rclcpp::SubscriptionOptions sub_options;
if (callback_group) {
callback_group_ = callback_group;
} else {
callback_group_ = node->create_callback_group(
rclcpp::CallbackGroupType::Reentrant);
}
sub_options.callback_group = callback_group_;
sub_ = node->create_subscription<std_msgs::msg::Int32>(
subscribe_topic,
get_qos(),
callback,
sub_options);
pub_ = node->create_publisher<std_msgs::msg::Int32>(publish_topic, get_qos());
} }

@ -1 +1 @@
Subproject commit a2741afc9a204758ec047cb25c6a85c92b3298a6 Subproject commit 91c2a42d6bc0130cec32447373a0fec0def34a26