diff --git a/src/core/ddsc/src/dds_init.c b/src/core/ddsc/src/dds_init.c index aa9662a..b23c38d 100644 --- a/src/core/ddsc/src/dds_init.c +++ b/src/core/ddsc/src/dds_init.c @@ -105,19 +105,22 @@ dds_init(void) ut_avlInit(&dds_domaintree_def, &dds_global.m_domains); - /* Start monitoring the liveliness of all threads and renewing the - service lease if everything seems well. */ - - gv.servicelease = nn_servicelease_new(0, 0); - if (gv.servicelease == NULL) + /* Start monitoring the liveliness of all threads. */ + if (!config.liveliness_monitoring) + gv.servicelease = NULL; + else { - ret = DDS_ERRNO(DDS_RETCODE_OUT_OF_RESOURCES, "Failed to create a servicelease."); - goto fail_servicelease_new; - } - if (nn_servicelease_start_renewing(gv.servicelease) < 0) - { - ret = DDS_ERRNO(DDS_RETCODE_ERROR, "Failed to start the servicelease."); - goto fail_servicelease_start; + gv.servicelease = nn_servicelease_new(0, 0); + if (gv.servicelease == NULL) + { + ret = DDS_ERRNO(DDS_RETCODE_OUT_OF_RESOURCES, "Failed to create a servicelease."); + goto fail_servicelease_new; + } + if (nn_servicelease_start_renewing(gv.servicelease) < 0) + { + ret = DDS_ERRNO(DDS_RETCODE_ERROR, "Failed to start the servicelease."); + goto fail_servicelease_start; + } } if (rtps_init() < 0) @@ -159,7 +162,8 @@ skip: fail_rtps_init: fail_servicelease_start: - nn_servicelease_free (gv.servicelease); + if (gv.servicelease) + nn_servicelease_free (gv.servicelease); gv.servicelease = NULL; fail_servicelease_new: thread_states_fini(); @@ -195,7 +199,8 @@ extern void dds_fini (void) dds__builtin_fini(); rtps_term (); - nn_servicelease_free (gv.servicelease); + if (gv.servicelease) + nn_servicelease_free (gv.servicelease); gv.servicelease = NULL; downgrade_main_thread (); thread_states_fini (); diff --git a/src/core/ddsi/include/ddsi/q_config.h b/src/core/ddsi/include/ddsi/q_config.h index b23e812..12d7835 100644 --- a/src/core/ddsi/include/ddsi/q_config.h +++ b/src/core/ddsi/include/ddsi/q_config.h @@ -256,6 +256,7 @@ struct config int squash_participants; int startup_mode_full; int forward_all_messages; + int liveliness_monitoring; int noprogress_log_stacktraces; int prioritize_retransmit; int xpack_send_async; diff --git a/src/core/ddsi/src/q_config.c b/src/core/ddsi/src/q_config.c index 44ae570..58a7e2e 100644 --- a/src/core/ddsi/src/q_config.c +++ b/src/core/ddsi/src/q_config.c @@ -491,6 +491,12 @@ static const struct cfgelem heartbeat_interval_attrs[] = { END_MARKER }; +static const struct cfgelem liveliness_monitoring_attrs[] = { + { LEAF("StackTraces"), 1, "true", ABSOFF(noprogress_log_stacktraces), 0, uf_boolean, 0, pf_boolean, + "
This element controls whether or not to write stack traces to the DDSI2 trace when a thread fails to make progress (on select platforms only).
" }, + END_MARKER +}; + static const struct cfgelem unsupp_cfgelems[] = { { MOVED("MaxMessageSize", "General/MaxMessageSize") }, { MOVED("FragmentSize", "General/FragmentSize") }, @@ -585,8 +591,8 @@ static const struct cfgelem unsupp_cfgelems[] = { "This setting controls the maximum (CDR) serialised size of samples that DDSI2E will forward in either direction. Samples larger than this are discarded with a warning.
" }, { LEAF("WriteBatch"), 1, "false", ABSOFF(whc_batch), 0, uf_boolean, 0, pf_boolean, "This element enables the batching of write operations. By default each write operation writes through the write cache and out onto the transport. Enabling write batching causes multiple small write operations to be aggregated within the write cache into a single larger write. This gives greater throughput at the expense of latency. Currently there is no mechanism for the write cache to automatically flush itself, so that if write batching is enabled, the application may havee to use the dds_write_flush function to ensure thta all samples are written.
" }, -{ LEAF("LogStackTraces"), 1, "true", ABSOFF(noprogress_log_stacktraces), 0, uf_boolean, 0, pf_boolean, -"This element controls whether or not to write stack traces to the DDSI2 trace when a thread fails to make progress (on select platforms only).
" }, +{ LEAF_W_ATTRS("LivelinessMonitoring", liveliness_monitoring_attrs), 1, "false", ABSOFF(liveliness_monitoring), 0, uf_boolean, 0, pf_boolean, +"This element controls whether or not implementation should internally monitor its own liveliness. If liveliness monitoring is enabled, stack traces can be dumped automatically when some thread appears to have stopped making progress.
" }, { LEAF("MonitorPort"), 1, "-1", ABSOFF(monitor_port), 0, uf_int, 0, pf_int, "This element allows configuring a service that dumps a text description of part the internal state to TCP clients. By default (-1), this is disabled; specifying 0 means a kernel-allocated port is used; a positive number is used as the TCP port number.
" }, { LEAF("AssumeMulticastCapable"), 1, "", ABSOFF(assumeMulticastCapable), 0, uf_string, ff_free, pf_string, diff --git a/src/core/ddsi/src/q_thread.c b/src/core/ddsi/src/q_thread.c index 21b617b..c9e34dc 100644 --- a/src/core/ddsi/src/q_thread.c +++ b/src/core/ddsi/src/q_thread.c @@ -295,7 +295,7 @@ static void reap_thread_state (_Inout_ struct thread_state1 *ts1, _In_ int sync_ { os_mutexLock (&thread_states.lock); ts1->state = THREAD_STATE_ZERO; - if (sync_with_servicelease) + if (sync_with_servicelease && gv.servicelease) nn_servicelease_statechange_barrier (gv.servicelease); if (ts1->name != main_thread_name) os_free (ts1->name);