diff --git a/src/core/ddsi/include/dds/ddsi/q_config.h b/src/core/ddsi/include/dds/ddsi/q_config.h index d7cd25d..a83fbf8 100644 --- a/src/core/ddsi/include/dds/ddsi/q_config.h +++ b/src/core/ddsi/include/dds/ddsi/q_config.h @@ -330,7 +330,6 @@ struct config uint32_t rmsg_chunk_size; /**<< size of a chunk in the receive buffer */ uint32_t rbuf_size; /* << size of a single receiver buffer */ enum besmode besmode; - int conservative_builtin_reader_startup; int meas_hb_to_ack_latency; int unicast_response_to_spdp_messages; int synchronous_delivery_priority_threshold; diff --git a/src/core/ddsi/include/dds/ddsi/q_entity.h b/src/core/ddsi/include/dds/ddsi/q_entity.h index 4c0c4bd..603b88f 100644 --- a/src/core/ddsi/include/dds/ddsi/q_entity.h +++ b/src/core/ddsi/include/dds/ddsi/q_entity.h @@ -123,7 +123,6 @@ struct pwr_rd_match { union { struct { seqno_t end_of_tl_seq; /* when seq >= end_of_tl_seq, it's in sync, =0 when not tl */ - seqno_t end_of_out_of_sync_seq; /* when seq >= end_of_tl_seq, it's in sync, =0 when not tl */ struct nn_reorder *reorder; /* can be done (mostly) per proxy writer, but that is harder; only when state=OUT_OF_SYNC */ } not_in_sync; } u; @@ -574,6 +573,9 @@ uint64_t writer_instance_id (const struct nn_guid *guid); rebuild them all (which only makes sense after previously having emptied them all). */ void rebuild_or_clear_writer_addrsets(int rebuild); + +void local_reader_ary_setfastpath_ok (struct local_reader_ary *x, bool fastpath_ok); + #if defined (__cplusplus) } #endif diff --git a/src/core/ddsi/src/q_config.c b/src/core/ddsi/src/q_config.c index 382d2c1..98f030b 100644 --- a/src/core/ddsi/src/q_config.c +++ b/src/core/ddsi/src/q_config.c @@ -509,9 +509,9 @@ static const struct cfgelem unsupp_cfgelems[] = { { MOVED("FragmentSize", "CycloneDDS/General/FragmentSize") }, { LEAF("DeliveryQueueMaxSamples"), 1, "256", ABSOFF(delivery_queue_maxsamples), 0, uf_uint, 0, pf_uint, BLURB("
This element controls the Maximum size of a delivery queue, expressed in samples. Once a delivery queue is full, incoming samples destined for that queue are dropped until space becomes available again.
") }, - { LEAF("PrimaryReorderMaxSamples"), 1, "64", ABSOFF(primary_reorder_maxsamples), 0, uf_uint, 0, pf_uint, + { LEAF("PrimaryReorderMaxSamples"), 1, "128", ABSOFF(primary_reorder_maxsamples), 0, uf_uint, 0, pf_uint, BLURB("This element sets the maximum size in samples of a primary re-order administration. Each proxy writer has one primary re-order administration to buffer the packet flow in case some packets arrive out of order. Old samples are forwarded to secondary re-order administrations associated with readers in need of historical data.
") }, - { LEAF("SecondaryReorderMaxSamples"), 1, "16", ABSOFF(secondary_reorder_maxsamples), 0, uf_uint, 0, pf_uint, + { LEAF("SecondaryReorderMaxSamples"), 1, "128", ABSOFF(secondary_reorder_maxsamples), 0, uf_uint, 0, pf_uint, BLURB("This element sets the maximum size in samples of a secondary re-order administration. The secondary re-order administration is per reader in need of historical data.
") }, { LEAF("DefragUnreliableMaxSamples"), 1, "4", ABSOFF(defrag_unreliable_maxsamples), 0, uf_uint, 0, pf_uint, BLURB("This element sets the maximum number of samples that can be defragmented simultaneously for a best-effort writers.
") }, @@ -523,9 +523,6 @@ static const struct cfgelem unsupp_cfgelems[] = {The default is writers, as this is thought to be compliant and reasonably efficient. Minimal may or may not be compliant but is most efficient, and full is inefficient but certain to be compliant. See also Internal/ConservativeBuiltinReaderStartup.
") }, - { LEAF("ConservativeBuiltinReaderStartup"), 1, "false", ABSOFF(conservative_builtin_reader_startup), 0, uf_boolean, 0, pf_boolean, - BLURB("This element forces all DDSI2E built-in discovery-related readers to request all historical data, instead of just one for each \"topic\". There is no indication that any of the current DDSI implementations requires changing of this setting, but it is conceivable that an implementation might track which participants have been informed of the existence of endpoints and which have not been, refusing communication with those that have \"can't\" know.
\n\ -Should it be necessary to hide DDSI2E's shared discovery behaviour, set this to true and Internal/BuiltinEndpointSet to full.
") }, { LEAF("MeasureHbToAckLatency"), 1, "false", ABSOFF(meas_hb_to_ack_latency), 0, uf_boolean, 0, pf_boolean, BLURB("This element enables heartbeat-to-ack latency among DDSI2E services by prepending timestamps to Heartbeat and AckNack messages and calculating round trip times. This is non-standard behaviour. The measured latencies are quite noisy and are currently not used anywhere.
") }, { LEAF("UnicastResponseToSPDPMessages"), 1, "true", ABSOFF(unicast_response_to_spdp_messages), 0, uf_boolean, 0, pf_boolean, diff --git a/src/core/ddsi/src/q_debmon.c b/src/core/ddsi/src/q_debmon.c index 35ed218..9ea929f 100644 --- a/src/core/ddsi/src/q_debmon.c +++ b/src/core/ddsi/src/q_debmon.c @@ -286,7 +286,7 @@ static int print_proxy_participants (struct thread_state1 * const ts1, ddsi_tran x += cpf (conn, " tl-catchup end_of_tl_seq %lld\n", m->u.not_in_sync.end_of_tl_seq); break; case PRMSS_OUT_OF_SYNC: - x += cpf (conn, " out-of-sync end_of_tl_seq %lld end_of_out_of_sync_seq %lld\n", m->u.not_in_sync.end_of_tl_seq, m->u.not_in_sync.end_of_out_of_sync_seq); + x += cpf (conn, " out-of-sync end_of_tl_seq %lld\n", m->u.not_in_sync.end_of_tl_seq); break; } } diff --git a/src/core/ddsi/src/q_entity.c b/src/core/ddsi/src/q_entity.c index f7d3480..9dc0024 100644 --- a/src/core/ddsi/src/q_entity.c +++ b/src/core/ddsi/src/q_entity.c @@ -235,6 +235,14 @@ void local_reader_ary_remove (struct local_reader_ary *x, struct reader *rd) ddsrt_mutex_unlock (&x->rdary_lock); } +void local_reader_ary_setfastpath_ok (struct local_reader_ary *x, bool fastpath_ok) +{ + ddsrt_mutex_lock (&x->rdary_lock); + if (x->valid) + x->fastpath_ok = fastpath_ok; + ddsrt_mutex_unlock (&x->rdary_lock); +} + void local_reader_ary_setinvalid (struct local_reader_ary *x) { ddsrt_mutex_lock (&x->rdary_lock); @@ -1492,9 +1500,17 @@ static void proxy_writer_drop_connection (const struct nn_guid *pwr_guid, struct { ddsrt_avl_delete (&pwr_readers_treedef, &pwr->readers, m); if (m->in_sync != PRMSS_SYNC) - pwr->n_readers_out_of_sync--; + { + if (--pwr->n_readers_out_of_sync == 0) + local_reader_ary_setfastpath_ok (&pwr->rdary, true); + } if (rd->reliable) pwr->n_reliable_readers--; + /* If no reliable readers left, there is no reason to believe the heartbeats will keep + coming and therefore reset have_seen_heartbeat so the next reader to be created + doesn't get initialised based on stale data */ + if (pwr->n_reliable_readers == 0) + pwr->have_seen_heartbeat = 0; local_reader_ary_remove (&pwr->rdary, rd); } ddsrt_mutex_unlock (&pwr->e.lock); @@ -1775,7 +1791,6 @@ static void proxy_writer_add_connection (struct proxy_writer *pwr, struct reader { struct pwr_rd_match *m = ddsrt_malloc (sizeof (*m)); ddsrt_avl_ipath_t path; - seqno_t last_deliv_seq; ddsrt_mutex_lock (&pwr->e.lock); if (ddsrt_avl_lookup_ipath (&pwr_readers_treedef, &pwr->readers, &rd->e.guid, &path)) @@ -1794,7 +1809,6 @@ static void proxy_writer_add_connection (struct proxy_writer *pwr, struct reader m->rd_guid = rd->e.guid; m->tcreate = now_mt (); - /* We track the last heartbeat count value per reader--proxy-writer pair, so that we can correctly handle directed heartbeats. The only reason to bother is to prevent a directed heartbeat (with @@ -1813,34 +1827,61 @@ static void proxy_writer_add_connection (struct proxy_writer *pwr, struct reader /* These can change as a consequence of handling data and/or discovery activities. The safe way of dealing with them is to lock the proxy writer */ - last_deliv_seq = nn_reorder_next_seq (pwr->reorder) - 1; - if (!rd->handle_as_transient_local) + if (is_builtin_entityid (rd->e.guid.entityid, NN_VENDORID_ECLIPSE) && !ddsrt_avl_is_empty (&pwr->readers)) { + /* builtins really don't care about multiple copies or anything */ m->in_sync = PRMSS_SYNC; } - else if (!config.conservative_builtin_reader_startup && is_builtin_entityid (rd->e.guid.entityid, NN_VENDORID_ECLIPSE) && !ddsrt_avl_is_empty (&pwr->readers)) + else if (!pwr->have_seen_heartbeat) { - /* builtins really don't care about multiple copies */ + /* Proxy writer hasn't seen a heartbeat yet: means we have no + clue from what sequence number to start accepting data, nor + where historical data ends and live data begins. + + A transient-local reader should always get all historical + data, and so can always start-out as "out-of-sync". Cyclone + refuses to retransmit already ACK'd samples to a Cyclone + reader, so if the other side is Cyclone, we can always start + from sequence number 1. + + For non-Cyclone, if the reader is volatile, we have to just + start from the most recent sample, even though that means + the first samples written after matching the reader may be + lost. The alternative not only gets too much historical data + but may also result in "sample lost" notifications because the + writer is (may not be) retaining samples on behalf of this + reader for the oldest samples and so this reader may end up + with a partial set of old-ish samples. Even when both are + using KEEP_ALL and the connection doesn't fail ... */ + if (rd->handle_as_transient_local) + m->in_sync = PRMSS_OUT_OF_SYNC; + else if (vendor_is_eclipse (pwr->c.vendor)) + m->in_sync = PRMSS_OUT_OF_SYNC; + else + m->in_sync = PRMSS_SYNC; + m->u.not_in_sync.end_of_tl_seq = MAX_SEQ_NUMBER; + } + else if (!rd->handle_as_transient_local) + { + /* volatile reader, writer has seen a heartbeat: it's in sync + (there is a risk of it getting some historical data: that + happens to be cached in the writer's reorder admin at this + point) */ m->in_sync = PRMSS_SYNC; } else { - /* normal transient-local, reader is behind proxy writer */ + /* transient-local reader; range of sequence numbers is already + known */ m->in_sync = PRMSS_OUT_OF_SYNC; - if (last_deliv_seq == 0) - { - m->u.not_in_sync.end_of_out_of_sync_seq = MAX_SEQ_NUMBER; - m->u.not_in_sync.end_of_tl_seq = MAX_SEQ_NUMBER; - } - else - { - m->u.not_in_sync.end_of_tl_seq = pwr->last_seq; - m->u.not_in_sync.end_of_out_of_sync_seq = last_deliv_seq; - } - DDS_LOG(DDS_LC_DISCOVERY, " - out-of-sync %"PRId64, m->u.not_in_sync.end_of_out_of_sync_seq); + m->u.not_in_sync.end_of_tl_seq = pwr->last_seq; } if (m->in_sync != PRMSS_SYNC) + { + DDS_LOG(DDS_LC_DISCOVERY, " - out-of-sync"); pwr->n_readers_out_of_sync++; + local_reader_ary_setfastpath_ok (&pwr->rdary, false); + } m->count = init_count; /* Spec says we may send a pre-emptive AckNack (8.4.2.3.4), hence we schedule it for the configured delay * T_MILLISECOND. From then @@ -3510,7 +3551,6 @@ void new_proxy_participant proxypp->plist = nn_plist_dup (plist); ddsrt_avl_init (&proxypp_groups_treedef, &proxypp->groups); - if (custom_flags & CF_INC_KERNEL_SEQUENCE_NUMBERS) proxypp->kernel_sequence_numbers = 1; else diff --git a/src/core/ddsi/src/q_receive.c b/src/core/ddsi/src/q_receive.c index 842d372..58ae5c3 100644 --- a/src/core/ddsi/src/q_receive.c +++ b/src/core/ddsi/src/q_receive.c @@ -74,7 +74,7 @@ Notes: */ -static void deliver_user_data_synchronously (struct nn_rsample_chain *sc); +static void deliver_user_data_synchronously (struct nn_rsample_chain *sc, const nn_guid_t *rdguid); static void maybe_set_reader_in_sync (struct proxy_writer *pwr, struct pwr_rd_match *wn, seqno_t last_deliv_seq) { @@ -87,11 +87,13 @@ static void maybe_set_reader_in_sync (struct proxy_writer *pwr, struct pwr_rd_ma if (last_deliv_seq >= wn->u.not_in_sync.end_of_tl_seq) { wn->in_sync = PRMSS_SYNC; - pwr->n_readers_out_of_sync--; + if (--pwr->n_readers_out_of_sync == 0) + local_reader_ary_setfastpath_ok (&pwr->rdary, true); } break; case PRMSS_OUT_OF_SYNC: - if (nn_reorder_next_seq (wn->u.not_in_sync.reorder) - 1 >= wn->u.not_in_sync.end_of_out_of_sync_seq) + assert (nn_reorder_next_seq (wn->u.not_in_sync.reorder) <= nn_reorder_next_seq (pwr->reorder)); + if (pwr->have_seen_heartbeat && nn_reorder_next_seq (wn->u.not_in_sync.reorder) == nn_reorder_next_seq (pwr->reorder)) { DDS_TRACE(" msr_in_sync("PGUIDFMT" out-of-sync to tlcatchup)", PGUID (wn->rd_guid)); wn->in_sync = PRMSS_TLCATCHUP; @@ -887,6 +889,8 @@ static int handle_AckNack (struct receiver_state *rst, nn_etime_t tnow, const Ac a future request'll fix it. */ enqueued = 1; seq_xmit = READ_SEQ_XMIT(wr); + const bool gap_for_already_acked = vendor_is_eclipse (rst->vendor) && prd->c.xqos->durability.kind == NN_VOLATILE_DURABILITY_QOS && seqbase <= rn->seq; + const seqno_t min_seq_to_rexmit = gap_for_already_acked ? rn->seq + 1 : 0; for (i = 0; i < numbits && seqbase + i <= seq_xmit && enqueued; i++) { /* Accelerated schedule may run ahead of sequence number set @@ -897,7 +901,7 @@ static int handle_AckNack (struct receiver_state *rst, nn_etime_t tnow, const Ac { seqno_t seq = seqbase + i; struct whc_borrowed_sample sample; - if (whc_borrow_sample (wr->whc, seq, &sample)) + if (seqbase + i >= min_seq_to_rexmit && whc_borrow_sample (wr->whc, seq, &sample)) { if (!wr->retransmitting && sample.unacked) writer_set_retransmitting (wr); @@ -1176,40 +1180,45 @@ static int handle_Heartbeat (struct receiver_state *rst, nn_etime_t tnow, struct if (!rst->forme) { - DDS_TRACE(""PGUIDFMT" -> "PGUIDFMT" not-for-me)", PGUID (src), PGUID (dst)); + DDS_TRACE(PGUIDFMT" -> "PGUIDFMT" not-for-me)", PGUID (src), PGUID (dst)); return 1; } if ((pwr = ephash_lookup_proxy_writer_guid (&src)) == NULL) { - DDS_TRACE(""PGUIDFMT"? -> "PGUIDFMT")", PGUID (src), PGUID (dst)); + DDS_TRACE(PGUIDFMT"? -> "PGUIDFMT")", PGUID (src), PGUID (dst)); return 1; } - /* liveliness is still only implemented partially (with all set to AUTOMATIC, BY_PARTICIPANT, &c.), so we simply renew the proxy participant's lease. */ + /* liveliness is still only implemented partially (with all set to AUTOMATIC, + BY_PARTICIPANT, &c.), so we simply renew the proxy participant's lease. */ if (pwr->assert_pp_lease) lease_renew (ddsrt_atomic_ldvoidp (&pwr->c.proxypp->lease), tnow); - DDS_TRACE(""PGUIDFMT" -> "PGUIDFMT":", PGUID (src), PGUID (dst)); + DDS_TRACE(PGUIDFMT" -> "PGUIDFMT":", PGUID (src), PGUID (dst)); ddsrt_mutex_lock (&pwr->e.lock); + if (pwr->n_reliable_readers == 0) + { + DDS_TRACE(PGUIDFMT" -> "PGUIDFMT" no-reliable-readers)", PGUID (src), PGUID (dst)); + ddsrt_mutex_unlock (&pwr->e.lock); + return 1; + } + if (!pwr->have_seen_heartbeat) { struct nn_rdata *gap; struct nn_rsample_chain sc; int refc_adjust = 0; nn_reorder_result_t res; - nn_defrag_notegap (pwr->defrag, 1, lastseq + 1); gap = nn_rdata_newgap (rmsg); - if ((res = nn_reorder_gap (&sc, pwr->reorder, gap, 1, lastseq + 1, &refc_adjust)) > 0) - { - if (pwr->deliver_synchronously) - deliver_user_data_synchronously (&sc); - else - nn_dqueue_enqueue (pwr->dqueue, &sc, res); - } + res = nn_reorder_gap (&sc, pwr->reorder, gap, 1, lastseq + 1, &refc_adjust); + /* proxy writer is not accepting data until it has received a heartbeat, so + there can't be any data to deliver */ + assert (res <= 0); + (void) res; nn_fragchain_adjust_refcount (gap, refc_adjust); pwr->have_seen_heartbeat = 1; } @@ -1243,7 +1252,7 @@ static int handle_Heartbeat (struct receiver_state *rst, nn_etime_t tnow, struct if ((res = nn_reorder_gap (&sc, pwr->reorder, gap, 1, firstseq, &refc_adjust)) > 0) { if (pwr->deliver_synchronously) - deliver_user_data_synchronously (&sc); + deliver_user_data_synchronously (&sc, NULL); else nn_dqueue_enqueue (pwr->dqueue, &sc, res); } @@ -1262,13 +1271,18 @@ static int handle_Heartbeat (struct receiver_state *rst, nn_etime_t tnow, struct case PRMSS_OUT_OF_SYNC: { struct nn_reorder *ro = wn->u.not_in_sync.reorder; if ((res = nn_reorder_gap (&sc, ro, gap, 1, firstseq, &refc_adjust)) > 0) - nn_dqueue_enqueue1 (pwr->dqueue, &wn->rd_guid, &sc, res); + { + if (pwr->deliver_synchronously) + deliver_user_data_synchronously (&sc, &wn->rd_guid); + else + nn_dqueue_enqueue1 (pwr->dqueue, &wn->rd_guid, &sc, res); + } last_deliv_seq = nn_reorder_next_seq (wn->u.not_in_sync.reorder) - 1; } } if (wn->u.not_in_sync.end_of_tl_seq == MAX_SEQ_NUMBER) { - wn->u.not_in_sync.end_of_out_of_sync_seq = wn->u.not_in_sync.end_of_tl_seq = fromSN (msg->lastSN); + wn->u.not_in_sync.end_of_tl_seq = fromSN (msg->lastSN); DDS_TRACE(" end-of-tl-seq(rd "PGUIDFMT" #%"PRId64")", PGUID(wn->rd_guid), wn->u.not_in_sync.end_of_tl_seq); } maybe_set_reader_in_sync (pwr, wn, last_deliv_seq); @@ -1591,7 +1605,7 @@ static int handle_one_gap (struct proxy_writer *pwr, struct pwr_rd_match *wn, se if ((res = nn_reorder_gap (&sc, pwr->reorder, gap, a, b, refc_adjust)) > 0) { if (pwr->deliver_synchronously) - deliver_user_data_synchronously (&sc); + deliver_user_data_synchronously (&sc, NULL); else nn_dqueue_enqueue (pwr->dqueue, &sc, res); } @@ -1617,13 +1631,15 @@ static int handle_one_gap (struct proxy_writer *pwr, struct pwr_rd_match *wn, se case PRMSS_TLCATCHUP: break; case PRMSS_OUT_OF_SYNC: - if (a <= wn->u.not_in_sync.end_of_out_of_sync_seq) + if ((res = nn_reorder_gap (&sc, wn->u.not_in_sync.reorder, gap, a, b, refc_adjust)) > 0) { - if ((res = nn_reorder_gap (&sc, wn->u.not_in_sync.reorder, gap, a, b, refc_adjust)) > 0) + if (pwr->deliver_synchronously) + deliver_user_data_synchronously (&sc, &wn->rd_guid); + else nn_dqueue_enqueue1 (pwr->dqueue, &wn->rd_guid, &sc, res); - if (res >= 0) - gap_was_valuable = 1; } + if (res >= 0) + gap_was_valuable = 1; break; } @@ -1970,94 +1986,90 @@ static int deliver_user_data (const struct nn_rsample_info *sampleinfo, const st goto no_payload; } - /* Generate the DDS_SampleInfo (which is faked to some extent - because we don't actually have a data reader); also note that - the PRISMTECH_WRITER_INFO thing is completely meaningless to - us */ + because we don't actually have a data reader); also note that + the PRISMTECH_WRITER_INFO thing is completely meaningless to + us */ + struct ddsi_tkmap_instance * tk; + if ((tk = ddsi_tkmap_lookup_instance_ref(payload)) != NULL) { - struct ddsi_tkmap_instance * tk; - tk = ddsi_tkmap_lookup_instance_ref(payload); - if (tk) + struct proxy_writer_info pwr_info; + make_proxy_writer_info(&pwr_info, &pwr->e, pwr->c.xqos); + + if (rdguid == NULL) { - struct proxy_writer_info pwr_info; - make_proxy_writer_info(&pwr_info, &pwr->e, pwr->c.xqos); + DDS_TRACE(" %"PRId64"=>EVERYONE\n", sampleinfo->seq); - if (rdguid == NULL) + /* FIXME: pwr->rdary is an array of pointers to attached + readers. There's only one thread delivering data for the + proxy writer (as long as there is only one receive thread), + so could get away with not locking at all, and doing safe + updates + GC of rdary instead. */ + + /* Retry loop, for re-delivery of rejected reliable samples. Is a + temporary hack till throttling back of writer is implemented + (with late acknowledgement of sample and nack). */ + retry: + + ddsrt_mutex_lock (&pwr->rdary.rdary_lock); + if (pwr->rdary.fastpath_ok) { - DDS_TRACE(" %"PRId64"=>EVERYONE\n", sampleinfo->seq); - - /* FIXME: pwr->rdary is an array of pointers to attached - readers. There's only one thread delivering data for the - proxy writer (as long as there is only one receive thread), - so could get away with not locking at all, and doing safe - updates + GC of rdary instead. */ - - /* Retry loop, for re-delivery of rejected reliable samples. Is a - temporary hack till throttling back of writer is implemented - (with late acknowledgement of sample and nack). */ -retry: - - ddsrt_mutex_lock (&pwr->rdary.rdary_lock); - if (pwr->rdary.fastpath_ok) + struct reader ** const rdary = pwr->rdary.rdary; + unsigned i; + for (i = 0; rdary[i]; i++) { - struct reader ** const rdary = pwr->rdary.rdary; - unsigned i; - for (i = 0; rdary[i]; i++) + DDS_TRACE("reader "PGUIDFMT"\n", PGUID (rdary[i]->e.guid)); + if (! (ddsi_plugin.rhc_plugin.rhc_store_fn) (rdary[i]->rhc, &pwr_info, payload, tk)) { - DDS_TRACE("reader "PGUIDFMT"\n", PGUID (rdary[i]->e.guid)); - if (! (ddsi_plugin.rhc_plugin.rhc_store_fn) (rdary[i]->rhc, &pwr_info, payload, tk)) - { - if (pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); - ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); - dds_sleepfor (DDS_MSECS (10)); - if (pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); - goto retry; - } + if (pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); + ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); + dds_sleepfor (DDS_MSECS (10)); + if (pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); + goto retry; } - ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); } - else - { - /* When deleting, pwr is no longer accessible via the hash - tables, and consequently, a reader may be deleted without - it being possible to remove it from rdary. The primary - reason rdary exists is to avoid locking the proxy writer - but this is less of an issue when we are deleting it, so - we fall back to using the GUIDs so that we can deliver all - samples we received from it. As writer being deleted any - reliable samples that are rejected are simply discarded. */ - ddsrt_avl_iter_t it; - struct pwr_rd_match *m; - ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); - if (!pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); - for (m = ddsrt_avl_iter_first (&pwr_readers_treedef, &pwr->readers, &it); m != NULL; m = ddsrt_avl_iter_next (&it)) - { - struct reader *rd; - if ((rd = ephash_lookup_reader_guid (&m->rd_guid)) != NULL) - { - DDS_TRACE("reader-via-guid "PGUIDFMT"\n", PGUID (rd->e.guid)); - (void) (ddsi_plugin.rhc_plugin.rhc_store_fn) (rd->rhc, &pwr_info, payload, tk); - } - } - if (!pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); - } - - ddsrt_atomic_st32 (&pwr->next_deliv_seq_lowword, (uint32_t) (sampleinfo->seq + 1)); + ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); } else { - struct reader *rd = ephash_lookup_reader_guid (rdguid);; - DDS_TRACE(" %"PRId64"=>"PGUIDFMT"%s\n", sampleinfo->seq, PGUID (*rdguid), rd ? "" : "?"); - while (rd && ! (ddsi_plugin.rhc_plugin.rhc_store_fn) (rd->rhc, &pwr_info, payload, tk) && ephash_lookup_proxy_writer_guid (&pwr->e.guid)) + /* When deleting, pwr is no longer accessible via the hash + tables, and consequently, a reader may be deleted without + it being possible to remove it from rdary. The primary + reason rdary exists is to avoid locking the proxy writer + but this is less of an issue when we are deleting it, so + we fall back to using the GUIDs so that we can deliver all + samples we received from it. As writer being deleted any + reliable samples that are rejected are simply discarded. */ + ddsrt_avl_iter_t it; + struct pwr_rd_match *m; + ddsrt_mutex_unlock (&pwr->rdary.rdary_lock); + if (!pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); + for (m = ddsrt_avl_iter_first (&pwr_readers_treedef, &pwr->readers, &it); m != NULL; m = ddsrt_avl_iter_next (&it)) { - if (pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); - dds_sleepfor (DDS_MSECS (1)); - if (pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); + struct reader *rd; + if ((rd = ephash_lookup_reader_guid (&m->rd_guid)) != NULL && m->in_sync == PRMSS_SYNC) + { + DDS_TRACE("reader-via-guid "PGUIDFMT"\n", PGUID (rd->e.guid)); + (void) (ddsi_plugin.rhc_plugin.rhc_store_fn) (rd->rhc, &pwr_info, payload, tk); + } } + if (!pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); } - ddsi_tkmap_instance_unref (tk); + + ddsrt_atomic_st32 (&pwr->next_deliv_seq_lowword, (uint32_t) (sampleinfo->seq + 1)); } + else + { + struct reader *rd = ephash_lookup_reader_guid (rdguid);; + DDS_TRACE(" %"PRId64"=>"PGUIDFMT"%s\n", sampleinfo->seq, PGUID (*rdguid), rd ? "" : "?"); + while (rd && ! (ddsi_plugin.rhc_plugin.rhc_store_fn) (rd->rhc, &pwr_info, payload, tk) && ephash_lookup_proxy_writer_guid (&pwr->e.guid)) + { + if (pwr_locked) ddsrt_mutex_unlock (&pwr->e.lock); + dds_sleepfor (DDS_MSECS (1)); + if (pwr_locked) ddsrt_mutex_lock (&pwr->e.lock); + } + } + ddsi_tkmap_instance_unref (tk); } ddsi_serdata_unref (payload); no_payload: @@ -2072,7 +2084,7 @@ int user_dqueue_handler (const struct nn_rsample_info *sampleinfo, const struct return res; } -static void deliver_user_data_synchronously (struct nn_rsample_chain *sc) +static void deliver_user_data_synchronously (struct nn_rsample_chain *sc, const nn_guid_t *rdguid) { while (sc->first) { @@ -2084,7 +2096,7 @@ static void deliver_user_data_synchronously (struct nn_rsample_chain *sc) sample_lost events. Also note that the synchronous path is _never_ used for historical data, and therefore never has the GUID of a reader to deliver to */ - deliver_user_data (e->sampleinfo, e->fragchain, NULL, 1); + deliver_user_data (e->sampleinfo, e->fragchain, rdguid, 1); } nn_fragchain_unref (e->fragchain); } @@ -2215,7 +2227,7 @@ static void handle_regular (struct receiver_state *rst, nn_etime_t tnow, struct by the current mishandling of resource limits */ if (*deferred_wakeup) dd_dqueue_enqueue_trigger (*deferred_wakeup); - deliver_user_data_synchronously (&sc); + deliver_user_data_synchronously (&sc, NULL); } else { @@ -2226,28 +2238,22 @@ static void handle_regular (struct receiver_state *rst, nn_etime_t tnow, struct *deferred_wakeup = pwr->dqueue; } } - if (pwr->n_readers_out_of_sync > 0) - { - /* Those readers catching up with TL but in sync with the proxy - writer may have become in sync with the proxy writer and the - writer; those catching up with TL all by themselves go through - the "TOO_OLD" path below. */ - ddsrt_avl_iter_t it; - struct pwr_rd_match *wn; - for (wn = ddsrt_avl_iter_first (&pwr_readers_treedef, &pwr->readers, &it); wn != NULL; wn = ddsrt_avl_iter_next (&it)) - if (wn->in_sync == PRMSS_TLCATCHUP) - maybe_set_reader_in_sync (pwr, wn, sampleinfo->seq); - } } - else if (rres == NN_REORDER_TOO_OLD) + + if (pwr->n_readers_out_of_sync > 0) { + /* Those readers catching up with TL but in sync with the proxy + writer may have become in sync with the proxy writer and the + writer; those catching up with TL all by themselves go through + the "TOO_OLD" path below. */ + ddsrt_avl_iter_t it; struct pwr_rd_match *wn; struct nn_rsample *rsample_dup = NULL; int reuse_rsample_dup = 0; - for (wn = ddsrt_avl_find_min (&pwr_readers_treedef, &pwr->readers); wn != NULL; wn = ddsrt_avl_find_succ (&pwr_readers_treedef, &pwr->readers, wn)) + for (wn = ddsrt_avl_iter_first (&pwr_readers_treedef, &pwr->readers, &it); wn != NULL; wn = ddsrt_avl_iter_next (&it)) { nn_reorder_result_t rres2; - if (wn->in_sync != PRMSS_OUT_OF_SYNC || sampleinfo->seq > wn->u.not_in_sync.end_of_out_of_sync_seq) + if (wn->in_sync == PRMSS_SYNC) continue; if (!reuse_rsample_dup) rsample_dup = nn_reorder_rsample_dup (rmsg, rsample); @@ -2273,17 +2279,26 @@ static void handle_regular (struct receiver_state *rst, nn_etime_t tnow, struct in-order, and those few microseconds can't hurt in catching up on transient-local data. See also NN_REORDER_DELIVER case in outer switch. */ - nn_dqueue_enqueue1 (pwr->dqueue, &wn->rd_guid, &sc, rres2); + if (pwr->deliver_synchronously) + { + /* FIXME: just in case the synchronous delivery runs into a delay caused + by the current mishandling of resource limits */ + deliver_user_data_synchronously (&sc, &wn->rd_guid); + } + else + { + if (*deferred_wakeup && *deferred_wakeup != pwr->dqueue) + { + dd_dqueue_enqueue_trigger (*deferred_wakeup); + *deferred_wakeup = NULL; + } + nn_dqueue_enqueue1 (pwr->dqueue, &wn->rd_guid, &sc, rres2); + } break; } } } -#ifndef NDEBUG - else - { - assert (rres == NN_REORDER_ACCEPT || rres == NN_REORDER_REJECT); - } -#endif + nn_fragchain_adjust_refcount (fragchain, refc_adjust); } ddsrt_mutex_unlock (&pwr->e.lock); diff --git a/src/core/xtests/CMakeLists.txt b/src/core/xtests/CMakeLists.txt index 02b7bfd..72bdb94 100644 --- a/src/core/xtests/CMakeLists.txt +++ b/src/core/xtests/CMakeLists.txt @@ -9,18 +9,5 @@ # # SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause # -idlc_generate(RhcTypes RhcTypes.idl) - -add_executable(rhc_torture rhc_torture.c) - -target_include_directories( - rhc_torture PRIVATE - "$