fix race conditions in guard condition handling (#325)

This commit is contained in:
gerkey 2017-04-27 18:47:13 -07:00 committed by GitHub
parent 675ad04c76
commit 100417a98d
2 changed files with 14 additions and 11 deletions

View file

@ -70,6 +70,12 @@ bool
ClientBase::wait_for_service_nanoseconds(std::chrono::nanoseconds timeout) ClientBase::wait_for_service_nanoseconds(std::chrono::nanoseconds timeout)
{ {
auto start = std::chrono::steady_clock::now(); auto start = std::chrono::steady_clock::now();
// make an event to reuse, rather than create a new one each time
auto node_ptr = node_graph_.lock();
if (!node_ptr) {
throw InvalidNodeError();
}
auto event = node_ptr->get_graph_event();
// check to see if the server is ready immediately // check to see if the server is ready immediately
if (this->service_is_ready()) { if (this->service_is_ready()) {
return true; return true;
@ -78,12 +84,6 @@ ClientBase::wait_for_service_nanoseconds(std::chrono::nanoseconds timeout)
// check was non-blocking, return immediately // check was non-blocking, return immediately
return false; return false;
} }
// make an event to reuse, rather than create a new one each time
auto node_ptr = node_graph_.lock();
if (!node_ptr) {
throw InvalidNodeError();
}
auto event = node_ptr->get_graph_event();
// update the time even on the first loop to account for time spent in the first call // update the time even on the first loop to account for time spent in the first call
// to this->server_is_ready() // to this->server_is_ready()
std::chrono::nanoseconds time_to_wait = timeout - (std::chrono::steady_clock::now() - start); std::chrono::nanoseconds time_to_wait = timeout - (std::chrono::steady_clock::now() - start);

View file

@ -205,13 +205,13 @@ NodeGraph::get_graph_event()
{ {
auto event = rclcpp::event::Event::make_shared(); auto event = rclcpp::event::Event::make_shared();
std::lock_guard<std::mutex> graph_changed_lock(graph_mutex_); std::lock_guard<std::mutex> graph_changed_lock(graph_mutex_);
graph_events_.push_back(event);
graph_users_count_++;
// on first call, add node to graph_listener_ // on first call, add node to graph_listener_
if (should_add_to_graph_listener_.exchange(false)) { if (should_add_to_graph_listener_.exchange(false)) {
graph_listener_->add_node(this); graph_listener_->add_node(this);
graph_listener_->start_if_not_started(); graph_listener_->start_if_not_started();
} }
graph_events_.push_back(event);
graph_users_count_++;
return event; return event;
} }
@ -238,10 +238,13 @@ NodeGraph::wait_for_graph_change(
throw EventNotRegisteredError(); throw EventNotRegisteredError();
} }
} }
std::unique_lock<std::mutex> graph_lock(graph_mutex_); auto pred = [&event]() {
graph_cv_.wait_for(graph_lock, timeout, [&event]() {
return event->check() || !rclcpp::utilities::ok(); return event->check() || !rclcpp::utilities::ok();
}); };
std::unique_lock<std::mutex> graph_lock(graph_mutex_);
if (!pred()) {
graph_cv_.wait_for(graph_lock, timeout, pred);
}
} }
size_t size_t