cyclonedds/src/core/ddsc/tests/builtin_topics.c

343 lines
12 KiB
C
Raw Normal View History

2018-04-10 17:03:59 +02:00
/*
* Copyright(c) 2006 to 2018 ADLINK Technology Limited and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0, or the Eclipse Distribution License
* v. 1.0 which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
*/
Rearrange and fixup abstraction layer - Replace os_result by dds_retcode_t and move DDS return code defines down. Eliminates the need to convert between different return code types. - Move dds_time_t down and remove os_time. Eliminates the need to convert between different time representations and reduces code duplication. - Remove use of Microsoft source-code annotation language (SAL). SAL annotations are Microsoft specific and not very well documented. This makes it very difficult for contributers to write. - Rearrange the abstraction layer to be feature-based. The previous layout falsely assumed that the operating system dictates which implementation is best suited. For general purpose operating systems this is mostly true, but embedded targets require a slightly different approach and may not even offer all features. The new layout makes it possible to mix-and-match feature implementations and allows for features to not be implemented at all. - Replace the os prefix by ddsrt to avoid name collisions. - Remove various portions of unused and unwanted code. - Export thread names on all supported platforms. - Return native thread identifier on POSIX compatible platforms. - Add timed wait for condition variables that takes an absolute time. - Remove system abstraction for errno. The os_getErrno and os_setErrno were incorrect. Functions that might fail now simply return a DDS return code instead. - Remove thread-specific memory abstraction. os_threadMemGet and accompanying functions were a mess and their use has been eliminated by other changes in this commit. - Replace attribute (re)defines by ddsrt_ prefixed equivalents to avoid name collisions and problems with faulty __nonnull__ attributes. Signed-off-by: Jeroen Koekkoek <jeroen@koekkoek.nl>
2019-01-18 14:10:19 +01:00
#include "dds/dds.h"
2018-04-10 17:03:59 +02:00
#include "RoundTrip.h"
#include "Space.h"
#include "test-common.h"
#include "CUnit/Test.h"
2018-04-10 17:03:59 +02:00
static dds_entity_t g_participant = 0;
static dds_entity_t g_subscriber = 0;
static dds_entity_t g_publisher = 0;
static dds_entity_t g_writer = 0;
static dds_entity_t g_reader = 0;
2018-04-10 17:03:59 +02:00
static dds_entity_t g_topic = 0;
#define MAX_SAMPLES 2
2018-04-10 17:03:59 +02:00
static dds_sample_info_t g_info[MAX_SAMPLES];
static void
qos_init(void)
{
}
static void
qos_fini(void)
{
}
static void
setup(void)
{
qos_init();
g_participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
CU_ASSERT_FATAL(g_participant > 0);
2018-04-10 17:03:59 +02:00
g_topic = dds_create_topic(g_participant, &RoundTripModule_DataType_desc, "RoundTrip", NULL, NULL);
CU_ASSERT_FATAL(g_topic > 0);
2018-04-10 17:03:59 +02:00
g_subscriber = dds_create_subscriber(g_participant, NULL, NULL);
CU_ASSERT_FATAL(g_subscriber > 0);
2018-04-10 17:03:59 +02:00
g_publisher = dds_create_publisher(g_participant, NULL, NULL);
CU_ASSERT_FATAL(g_publisher > 0);
g_writer = dds_create_writer(g_publisher, g_topic, NULL, NULL);
CU_ASSERT_FATAL(g_writer > 0);
g_reader = dds_create_reader(g_subscriber, g_topic, NULL, NULL);
CU_ASSERT_FATAL(g_reader > 0);
2018-04-10 17:03:59 +02:00
}
static void
teardown(void)
{
qos_fini();
dds_delete(g_participant);
}
static void
lift limits on handle allocation and reuse (#95) The old entity handle mechanism suffered from a number of problems, the most terrible one being that it would only ever allocate 1000 handles (not even have at most 1000 in use at the same time). Secondarily, it was protected by a single mutex that actually does show up as a limiting factor in, say, a polling-based throughput test with small messages. Thirdly, it tried to provide for various use cases that don't exist in practice but add complexity and overhead. This commit totally rewrites the mechanism, by replacing the old array with a hash table and allowing a near-arbitrary number of handles as well as reuse of handles. It also removes the entity "kind" bits in the most significant bits of the handles, because they only resulted in incorrect checking of argument validity. All that is taken out, but there is still more cleaning up to be done. It furthermore removes an indirection in the handle-to-entity lookup by embedding the "dds_handle_link" structure in the entity. Handle allocation is randomized to avoid the have a high probability of quickly finding an available handle (the total number of handles is limited to a number much smaller than the domain from which they are allocated). The likelihood of handle reuse is still dependent on the number of allocated handles -- the fewer handles there are, the longer the expected time to reuse. Non-randomized handles would give a few guarantees more, though. It moreover moves the code from the "util" to the "core/ddsc" component, because it really is only used for entities, and besides the new implementation relies on the deferred freeing (a.k.a. garbage collection mechanism) implemented in the core. The actual handle management has two variants, selectable with a macro: the preferred embodiment uses a concurrent hash table, the actually used one performs all operations inside a single mutex and uses a non-concurrent version of the hash table. The reason the less-predeferred embodiment is used is that the concurrent version requires the freeing of entity objects to be deferred (much like the GUID-to-entity hash tables in DDSI function, or indeed the key value to instance handle mapping). That is a fair bit of work, and the non-concurrent version is a reasonable intermediate step. Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
check_default_qos_of_builtin_entity(dds_entity_t entity, bool isread)
2018-04-10 17:03:59 +02:00
{
dds_return_t ret;
int64_t deadline;
int64_t liveliness_lease_duration;
int64_t minimum_separation;
int64_t max_blocking_time;
int64_t autopurge_nowriter_samples_delay;
int64_t autopurge_disposed_samples_delay;
dds_durability_kind_t durability_kind;
dds_presentation_access_scope_kind_t presentation_access_scope_kind;
bool presentation_coherent_access;
bool presentation_ordered_access;
2018-04-10 17:03:59 +02:00
dds_ownership_kind_t ownership_kind;
dds_liveliness_kind_t liveliness_kind;
dds_reliability_kind_t reliability_kind;
dds_destination_order_kind_t destination_order_kind;
dds_history_kind_t history_kind;
int32_t history_depth;
int32_t resource_limits_max_samples;
int32_t resource_limits_max_instances;
int32_t resource_limits_max_samples_per_instance;
2018-04-10 17:03:59 +02:00
char **partitions;
uint32_t plen;
dds_qos_t *qos = dds_create_qos();
CU_ASSERT_FATAL(qos != NULL);
2018-04-10 17:03:59 +02:00
ret = dds_get_qos(entity, qos);
CU_ASSERT_FATAL(ret == DDS_RETCODE_OK);
2018-04-10 17:03:59 +02:00
dds_qget_durability(qos, &durability_kind);
dds_qget_presentation(qos, &presentation_access_scope_kind, &presentation_coherent_access, &presentation_ordered_access);
2018-04-10 17:03:59 +02:00
dds_qget_deadline(qos, &deadline);
dds_qget_ownership(qos, &ownership_kind);
dds_qget_liveliness(qos, &liveliness_kind, &liveliness_lease_duration);
dds_qget_time_based_filter(qos, &minimum_separation);
dds_qget_reliability(qos, &reliability_kind, &max_blocking_time);
dds_qget_destination_order(qos, &destination_order_kind);
dds_qget_history(qos, &history_kind, &history_depth);
dds_qget_resource_limits(qos, &resource_limits_max_samples, &resource_limits_max_instances, &resource_limits_max_samples_per_instance);
2018-04-10 17:03:59 +02:00
dds_qget_reader_data_lifecycle(qos, &autopurge_nowriter_samples_delay, &autopurge_disposed_samples_delay);
dds_qget_partition(qos, &plen, &partitions);
// no getter for ENTITY_FACTORY
lift limits on handle allocation and reuse (#95) The old entity handle mechanism suffered from a number of problems, the most terrible one being that it would only ever allocate 1000 handles (not even have at most 1000 in use at the same time). Secondarily, it was protected by a single mutex that actually does show up as a limiting factor in, say, a polling-based throughput test with small messages. Thirdly, it tried to provide for various use cases that don't exist in practice but add complexity and overhead. This commit totally rewrites the mechanism, by replacing the old array with a hash table and allowing a near-arbitrary number of handles as well as reuse of handles. It also removes the entity "kind" bits in the most significant bits of the handles, because they only resulted in incorrect checking of argument validity. All that is taken out, but there is still more cleaning up to be done. It furthermore removes an indirection in the handle-to-entity lookup by embedding the "dds_handle_link" structure in the entity. Handle allocation is randomized to avoid the have a high probability of quickly finding an available handle (the total number of handles is limited to a number much smaller than the domain from which they are allocated). The likelihood of handle reuse is still dependent on the number of allocated handles -- the fewer handles there are, the longer the expected time to reuse. Non-randomized handles would give a few guarantees more, though. It moreover moves the code from the "util" to the "core/ddsc" component, because it really is only used for entities, and besides the new implementation relies on the deferred freeing (a.k.a. garbage collection mechanism) implemented in the core. The actual handle management has two variants, selectable with a macro: the preferred embodiment uses a concurrent hash table, the actually used one performs all operations inside a single mutex and uses a non-concurrent version of the hash table. The reason the less-predeferred embodiment is used is that the concurrent version requires the freeing of entity objects to be deferred (much like the GUID-to-entity hash tables in DDSI function, or indeed the key value to instance handle mapping). That is a fair bit of work, and the non-concurrent version is a reasonable intermediate step. Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
if (!isread) {
CU_ASSERT_FATAL(plen == 1);
CU_ASSERT_STRING_EQUAL_FATAL(partitions[0], "__BUILT-IN PARTITION__");
2018-04-10 17:03:59 +02:00
} else {
CU_ASSERT_FATAL(durability_kind == DDS_DURABILITY_TRANSIENT_LOCAL);
CU_ASSERT_FATAL(presentation_access_scope_kind == DDS_PRESENTATION_TOPIC);
CU_ASSERT_FATAL(presentation_coherent_access == false);
CU_ASSERT_FATAL(presentation_ordered_access == false);
CU_ASSERT_FATAL(deadline == DDS_INFINITY);
CU_ASSERT_FATAL(ownership_kind == DDS_OWNERSHIP_SHARED);
CU_ASSERT_FATAL(liveliness_kind == DDS_LIVELINESS_AUTOMATIC);
CU_ASSERT_FATAL(minimum_separation == 0);
CU_ASSERT_FATAL(reliability_kind == DDS_RELIABILITY_RELIABLE);
CU_ASSERT_FATAL(max_blocking_time == DDS_MSECS(100));
CU_ASSERT_FATAL(destination_order_kind == DDS_DESTINATIONORDER_BY_RECEPTION_TIMESTAMP);
CU_ASSERT_FATAL(history_kind == DDS_HISTORY_KEEP_LAST);
CU_ASSERT_FATAL(history_depth == 1);
CU_ASSERT_FATAL(resource_limits_max_instances == DDS_LENGTH_UNLIMITED);
CU_ASSERT_FATAL(resource_limits_max_samples == DDS_LENGTH_UNLIMITED);
CU_ASSERT_FATAL(resource_limits_max_samples_per_instance == DDS_LENGTH_UNLIMITED);
CU_ASSERT_FATAL(autopurge_nowriter_samples_delay == DDS_INFINITY);
CU_ASSERT_FATAL(autopurge_disposed_samples_delay == DDS_INFINITY);
2018-04-10 17:03:59 +02:00
}
if (plen > 0) {
for (uint32_t i = 0; i < plen; i++) {
dds_free(partitions[i]);
}
dds_free(partitions);
}
dds_delete_qos(qos);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, availability_builtin_topics, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
/* FIXME: Successful lookup doesn't rhyme with them not being returned when looking at the children of the participant ... */
2018-04-10 17:03:59 +02:00
dds_entity_t topic;
topic = dds_find_topic(g_participant, "DCPSParticipant");
CU_ASSERT_FATAL(topic < 0);
//dds_delete(topic);
2018-04-10 17:03:59 +02:00
topic = dds_find_topic(g_participant, "DCPSTopic");
CU_ASSERT_FATAL(topic < 0);
//dds_delete(topic);
2018-04-10 17:03:59 +02:00
topic = dds_find_topic(g_participant, "DCPSSubscription");
CU_ASSERT_FATAL(topic < 0);
//dds_delete(topic);
topic = dds_find_topic(g_participant, "DCPSPublication");
CU_ASSERT_FATAL(topic < 0);
//dds_delete(topic);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, read_publication_data, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
dds_entity_t reader;
dds_return_t ret;
dds_builtintopic_endpoint_t *data;
2018-04-10 17:03:59 +02:00
void *samples[MAX_SAMPLES];
reader = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSPUBLICATION, NULL, NULL);
CU_ASSERT_FATAL(reader > 0);
2018-04-10 17:03:59 +02:00
samples[0] = NULL;
2018-04-10 17:03:59 +02:00
ret = dds_read(reader, samples, g_info, MAX_SAMPLES, MAX_SAMPLES);
data = samples[0];
CU_ASSERT_FATAL(ret > 0);
CU_ASSERT_STRING_EQUAL_FATAL(data->topic_name, "RoundTrip");
dds_return_loan(reader, samples, ret);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, read_subscription_data, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
dds_entity_t reader;
dds_return_t ret;
void * samples[MAX_SAMPLES];
const char *exp[] = { "DCPSSubscription", "RoundTrip" };
unsigned seen = 0;
dds_qos_t *qos;
2018-04-10 17:03:59 +02:00
reader = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSSUBSCRIPTION, NULL, NULL);
CU_ASSERT_FATAL(reader > 0);
2018-04-10 17:03:59 +02:00
samples[0] = NULL;
2018-04-10 17:03:59 +02:00
ret = dds_read(reader, samples, g_info, MAX_SAMPLES, MAX_SAMPLES);
CU_ASSERT_FATAL(ret == 2);
qos = dds_create_qos();
for (int i = 0; i < ret; i++) {
dds_builtintopic_endpoint_t *data = samples[i];
for (size_t j = 0; j < sizeof (exp) / sizeof (exp[0]); j++) {
if (strcmp (data->topic_name, exp[j]) == 0) {
seen |= 1u << j;
dds_return_t get_qos_ret = dds_get_qos(j == 0 ? reader : g_reader, qos);
CU_ASSERT_FATAL(get_qos_ret == DDS_RETCODE_OK);
const bool eq = dds_qos_equal(qos, data->qos);
CU_ASSERT_FATAL(eq);
}
}
2018-04-10 17:03:59 +02:00
}
CU_ASSERT_FATAL(seen == 3);
dds_delete_qos(qos);
2018-04-10 17:03:59 +02:00
dds_return_loan(reader, samples, ret);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, read_participant_data, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
dds_entity_t reader;
dds_return_t ret;
//dds_builtintopic_participant_t *data;
2018-04-10 17:03:59 +02:00
void * samples[MAX_SAMPLES];
reader = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSPARTICIPANT, NULL, NULL);
CU_ASSERT_FATAL(reader > 0);
2018-04-10 17:03:59 +02:00
samples[0] = NULL;
2018-04-10 17:03:59 +02:00
ret = dds_read(reader, samples, g_info, MAX_SAMPLES, MAX_SAMPLES);
CU_ASSERT_FATAL(ret > 0);
dds_return_loan(reader, samples, ret);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, read_topic_data, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
#if 0 /* disabled pending CHAM-347 */
dds_entity_t reader;
2018-04-10 17:03:59 +02:00
dds_return_t ret;
DDS_TopicBuiltinTopicData *data;
void * samples[MAX_SAMPLES];
reader = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSTOPIC, NULL, NULL);
CU_ASSERT_FATAL(reader > 0);
2018-04-10 17:03:59 +02:00
samples[0] = NULL;
2018-04-10 17:03:59 +02:00
ret = dds_read(reader, samples, g_info, MAX_SAMPLES, MAX_SAMPLES);
CU_ASSERT_FATAL(ret > 0);
2018-04-10 17:03:59 +02:00
data = (DDS_TopicBuiltinTopicData *)samples;
CU_ASSERT_STRING_EQUAL_FATAL(data->name, "DCPSSubscription");
dds_return_loan(reader, samples, ret);
2018-04-10 17:03:59 +02:00
#endif
}
CU_Test(ddsc_builtin_topics, same_subscriber, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
dds_entity_t subscription_rdr;
dds_entity_t subscription_subscriber;
dds_entity_t publication_rdr;
dds_entity_t publication_subscriber;
dds_entity_t participant_rdr;
dds_entity_t participant_subscriber;
#if 0
2018-04-10 17:03:59 +02:00
dds_entity_t topic_rdr;
dds_entity_t topic_subscriber;
#endif
2018-04-10 17:03:59 +02:00
subscription_rdr = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSSUBSCRIPTION, NULL, NULL);
CU_ASSERT_FATAL(subscription_rdr > 0);
2018-04-10 17:03:59 +02:00
subscription_subscriber = dds_get_parent(subscription_rdr);
CU_ASSERT_FATAL(subscription_subscriber > 0);
2018-04-10 17:03:59 +02:00
publication_rdr = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSPUBLICATION, NULL, NULL);
CU_ASSERT_FATAL(publication_rdr > 0);
2018-04-10 17:03:59 +02:00
publication_subscriber = dds_get_parent(publication_rdr);
CU_ASSERT_FATAL(publication_subscriber > 0);
2018-04-10 17:03:59 +02:00
CU_ASSERT_FATAL(subscription_subscriber == publication_subscriber);
2018-04-10 17:03:59 +02:00
participant_rdr = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSPARTICIPANT, NULL, NULL);
CU_ASSERT_FATAL(participant_rdr > 0);
2018-04-10 17:03:59 +02:00
participant_subscriber = dds_get_parent(participant_rdr);
CU_ASSERT_FATAL(participant_subscriber > 0);
2018-04-10 17:03:59 +02:00
CU_ASSERT_FATAL(publication_subscriber == participant_subscriber);
2018-04-10 17:03:59 +02:00
#if 0
2018-04-10 17:03:59 +02:00
topic_rdr = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSTOPIC, NULL, NULL);
CU_ASSERT_FATAL(topic_rdr > 0);
2018-04-10 17:03:59 +02:00
topic_subscriber = dds_get_parent(topic_rdr);
CU_ASSERT_FATAL(topic_subscriber > 0);
2018-04-10 17:03:59 +02:00
CU_ASSERT_FATAL(participant_subscriber == topic_subscriber);
#endif
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, builtin_qos, .init = setup, .fini = teardown)
2018-04-10 17:03:59 +02:00
{
dds_entity_t dds_sub_rdr;
dds_entity_t dds_sub_subscriber;
dds_sub_rdr = dds_create_reader(g_participant, DDS_BUILTIN_TOPIC_DCPSSUBSCRIPTION, NULL, NULL);
CU_ASSERT_FATAL(dds_sub_rdr > 0);
lift limits on handle allocation and reuse (#95) The old entity handle mechanism suffered from a number of problems, the most terrible one being that it would only ever allocate 1000 handles (not even have at most 1000 in use at the same time). Secondarily, it was protected by a single mutex that actually does show up as a limiting factor in, say, a polling-based throughput test with small messages. Thirdly, it tried to provide for various use cases that don't exist in practice but add complexity and overhead. This commit totally rewrites the mechanism, by replacing the old array with a hash table and allowing a near-arbitrary number of handles as well as reuse of handles. It also removes the entity "kind" bits in the most significant bits of the handles, because they only resulted in incorrect checking of argument validity. All that is taken out, but there is still more cleaning up to be done. It furthermore removes an indirection in the handle-to-entity lookup by embedding the "dds_handle_link" structure in the entity. Handle allocation is randomized to avoid the have a high probability of quickly finding an available handle (the total number of handles is limited to a number much smaller than the domain from which they are allocated). The likelihood of handle reuse is still dependent on the number of allocated handles -- the fewer handles there are, the longer the expected time to reuse. Non-randomized handles would give a few guarantees more, though. It moreover moves the code from the "util" to the "core/ddsc" component, because it really is only used for entities, and besides the new implementation relies on the deferred freeing (a.k.a. garbage collection mechanism) implemented in the core. The actual handle management has two variants, selectable with a macro: the preferred embodiment uses a concurrent hash table, the actually used one performs all operations inside a single mutex and uses a non-concurrent version of the hash table. The reason the less-predeferred embodiment is used is that the concurrent version requires the freeing of entity objects to be deferred (much like the GUID-to-entity hash tables in DDSI function, or indeed the key value to instance handle mapping). That is a fair bit of work, and the non-concurrent version is a reasonable intermediate step. Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
check_default_qos_of_builtin_entity(dds_sub_rdr, 1);
2018-04-10 17:03:59 +02:00
dds_sub_subscriber = dds_get_parent(dds_sub_rdr);
CU_ASSERT_FATAL(dds_sub_subscriber > 0);
lift limits on handle allocation and reuse (#95) The old entity handle mechanism suffered from a number of problems, the most terrible one being that it would only ever allocate 1000 handles (not even have at most 1000 in use at the same time). Secondarily, it was protected by a single mutex that actually does show up as a limiting factor in, say, a polling-based throughput test with small messages. Thirdly, it tried to provide for various use cases that don't exist in practice but add complexity and overhead. This commit totally rewrites the mechanism, by replacing the old array with a hash table and allowing a near-arbitrary number of handles as well as reuse of handles. It also removes the entity "kind" bits in the most significant bits of the handles, because they only resulted in incorrect checking of argument validity. All that is taken out, but there is still more cleaning up to be done. It furthermore removes an indirection in the handle-to-entity lookup by embedding the "dds_handle_link" structure in the entity. Handle allocation is randomized to avoid the have a high probability of quickly finding an available handle (the total number of handles is limited to a number much smaller than the domain from which they are allocated). The likelihood of handle reuse is still dependent on the number of allocated handles -- the fewer handles there are, the longer the expected time to reuse. Non-randomized handles would give a few guarantees more, though. It moreover moves the code from the "util" to the "core/ddsc" component, because it really is only used for entities, and besides the new implementation relies on the deferred freeing (a.k.a. garbage collection mechanism) implemented in the core. The actual handle management has two variants, selectable with a macro: the preferred embodiment uses a concurrent hash table, the actually used one performs all operations inside a single mutex and uses a non-concurrent version of the hash table. The reason the less-predeferred embodiment is used is that the concurrent version requires the freeing of entity objects to be deferred (much like the GUID-to-entity hash tables in DDSI function, or indeed the key value to instance handle mapping). That is a fair bit of work, and the non-concurrent version is a reasonable intermediate step. Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
check_default_qos_of_builtin_entity(dds_sub_subscriber, 0);
2018-04-10 17:03:59 +02:00
}
CU_Test(ddsc_builtin_topics, read_nothing)
{
dds_entity_t pp;
dds_entity_t rd;
dds_return_t ret;
dds_sample_info_t si;
void *raw1, *raw2;
int32_t n1, n2;
pp = dds_create_participant (DDS_DOMAIN_DEFAULT, NULL, NULL);
CU_ASSERT_FATAL (pp > 0);
rd = dds_create_reader (pp, DDS_BUILTIN_TOPIC_DCPSSUBSCRIPTION, NULL, NULL);
CU_ASSERT_FATAL (rd > 0);
/* Can't guarantee there's no other process around with a publication, but
we can take until nothing remains. The point is checking handling of
freeing memory when a loan was outstanding, memory had to be allocated,
and subsequently had to be freed because of an absence of data. */
raw1 = raw2 = NULL;
n1 = dds_take (rd, &raw1, &si, 1, 1);
CU_ASSERT_FATAL (n1 >= 0);
n2 = dds_take (rd, &raw2, &si, 1, 1);
CU_ASSERT_FATAL (n2 >= 0);
ret = dds_return_loan (rd, &raw1, n1);
CU_ASSERT_FATAL (ret == 0);
ret = dds_return_loan (rd, &raw2, n2);
CU_ASSERT_FATAL (ret == 0);
ret = dds_delete (pp);
CU_ASSERT_FATAL (ret == 0);
}