2018-04-10 17:03:59 +02:00
|
|
|
/*
|
|
|
|
* Copyright(c) 2006 to 2018 ADLINK Technology Limited and others
|
|
|
|
*
|
|
|
|
* This program and the accompanying materials are made available under the
|
|
|
|
* terms of the Eclipse Public License v. 2.0 which is available at
|
|
|
|
* http://www.eclipse.org/legal/epl-2.0, or the Eclipse Distribution License
|
|
|
|
* v. 1.0 which is available at
|
|
|
|
* http://www.eclipse.org/org/documents/edl-v10.php.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
|
|
|
|
*/
|
2019-01-18 14:10:19 +01:00
|
|
|
#include <limits.h>
|
|
|
|
|
|
|
|
#include "dds/dds.h"
|
2018-12-04 23:01:46 +01:00
|
|
|
#include "CUnit/Test.h"
|
|
|
|
#include "CUnit/Theory.h"
|
2018-04-10 17:03:59 +02:00
|
|
|
#include "RoundTrip.h"
|
|
|
|
|
2019-01-18 14:10:19 +01:00
|
|
|
#include "dds/ddsrt/misc.h"
|
|
|
|
#include "dds/ddsrt/process.h"
|
|
|
|
#include "dds/ddsrt/threads.h"
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* Test fixtures
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
|
|
|
|
static dds_entity_t g_keep = 0;
|
|
|
|
static dds_entity_t g_participant = 0;
|
|
|
|
static dds_entity_t g_topic = 0;
|
|
|
|
static dds_entity_t g_subscriber = 0;
|
|
|
|
static dds_entity_t g_publisher = 0;
|
|
|
|
static dds_entity_t g_reader = 0;
|
|
|
|
static dds_entity_t g_writer = 0;
|
|
|
|
static dds_entity_t g_readcond = 0;
|
|
|
|
static dds_entity_t g_querycond = 0;
|
|
|
|
|
|
|
|
/* Dummy query condition callback. */
|
|
|
|
static bool
|
|
|
|
accept_all(const void * sample)
|
|
|
|
{
|
2018-08-07 17:30:17 +02:00
|
|
|
(void)sample;
|
2018-04-10 17:03:59 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char*
|
|
|
|
create_topic_name(const char *prefix, char *name, size_t size)
|
|
|
|
{
|
|
|
|
/* Get semi random g_topic name. */
|
2019-01-18 14:10:19 +01:00
|
|
|
ddsrt_pid_t pid = ddsrt_getpid();
|
|
|
|
ddsrt_tid_t tid = ddsrt_gettid();
|
|
|
|
(void) snprintf(name, size, "%s_pid%"PRIdPID"_tid%"PRIdTID"", prefix, pid, tid);
|
2018-04-10 17:03:59 +02:00
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hierarchy_init(void)
|
|
|
|
{
|
|
|
|
uint32_t mask = DDS_ANY_SAMPLE_STATE | DDS_ANY_VIEW_STATE | DDS_ANY_INSTANCE_STATE;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
g_participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_participant > 0 );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_topic = dds_create_topic(g_participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_hierarchy_test", name, sizeof name), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_publisher = dds_create_publisher(g_participant, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_publisher > 0 );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_subscriber = dds_create_subscriber(g_participant, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_subscriber > 0 );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_writer = dds_create_writer(g_publisher, g_topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_writer > 0 );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_reader = dds_create_reader(g_subscriber, g_topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_reader > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_readcond = dds_create_readcondition(g_reader, mask);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_readcond > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
g_querycond = dds_create_querycondition(g_reader, mask, accept_all);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_querycond > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* The deletion of the last participant will close down every thing. This
|
|
|
|
* means that the API will react differently after that. Because the
|
|
|
|
* testing we're doing here is quite generic, we'd like to not close down
|
|
|
|
* everything when we delete our participant. For that, we create a second
|
|
|
|
* participant, which will keep everything running.
|
|
|
|
*/
|
|
|
|
g_keep = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_keep > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hierarchy_fini(void)
|
|
|
|
{
|
|
|
|
dds_delete(g_querycond);
|
|
|
|
dds_delete(g_readcond);
|
|
|
|
dds_delete(g_reader);
|
|
|
|
dds_delete(g_writer);
|
|
|
|
dds_delete(g_subscriber);
|
|
|
|
dds_delete(g_publisher);
|
|
|
|
dds_delete(g_topic);
|
|
|
|
dds_delete(g_participant);
|
|
|
|
dds_delete(g_keep);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#else
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the recursive deletion.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_delete, recursive, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_domainid_t id;
|
|
|
|
dds_return_t ret;
|
|
|
|
|
|
|
|
/* First be sure that 'dds_get_domainid' returns ok. */
|
|
|
|
ret = dds_get_domainid(g_participant, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_topic, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_publisher, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_subscriber, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_writer, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_reader, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_readcond, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_querycond, &id);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Deleting the top dog (participant) should delete all children. */
|
|
|
|
ret = dds_delete(g_participant);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Check if all the entities are deleted now. */
|
|
|
|
ret = dds_get_domainid(g_participant, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_topic, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_publisher, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_subscriber, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_writer, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_reader, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_readcond, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_querycond, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_delete, recursive_with_deleted_topic)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_domainid_t id;
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
/* Internal handling of topic is different from all the other entities.
|
|
|
|
* It's very interesting if this recursive deletion still works and
|
|
|
|
* doesn't crash when the topic is already deleted (CHAM-424). */
|
|
|
|
|
|
|
|
/* First, create a topic and a writer with that topic. */
|
|
|
|
g_participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
g_topic = dds_create_topic(g_participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_hierarchy_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
g_writer = dds_create_writer(g_participant, g_topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_writer> 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
g_keep = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(g_keep > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Second, delete the topic to make sure that the writer holds the last
|
|
|
|
* reference to the topic and thus will delete it when it itself is
|
|
|
|
* deleted. */
|
|
|
|
ret = dds_delete(g_topic);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Third, deleting the participant should delete all children of which
|
|
|
|
* the writer with the last topic reference is one. */
|
|
|
|
ret = dds_delete(g_participant);
|
|
|
|
/* Before the CHAM-424 fix, we would not get here because of a crash,
|
|
|
|
* or it (incidentally) continued but returned an error. */
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Check if the entities are actually deleted. */
|
|
|
|
ret = dds_get_domainid(g_participant, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER );
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_topic, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_domainid(g_writer, &id);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(g_keep);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_participant in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_participant, valid_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_participant, valid_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
participant = dds_get_participant(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(participant, g_participant);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_participant, deleted_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_participant, deleted_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_delete(*entity);
|
|
|
|
participant = dds_get_participant(*entity);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(participant), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_participant, invalid_entities) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_participant, invalid_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t participant;
|
|
|
|
|
|
|
|
participant = dds_get_participant(entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(participant), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_parent in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_parent, conditions) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_parent, conditions, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
parent = dds_get_parent(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(parent, g_reader);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_parent, reader, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
parent = dds_get_parent(g_reader);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(parent, g_subscriber);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_parent, writer, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
parent = dds_get_parent(g_writer);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(parent, g_publisher);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_parent, pubsubtop) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_publisher, &g_subscriber, &g_topic),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_parent, pubsubtop, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
parent = dds_get_parent(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(parent, g_participant);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_parent, participant, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
parent = dds_get_parent(g_participant);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(parent), DDS_ENTITY_NIL);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_parent, deleted_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_parent, deleted_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t parent;
|
|
|
|
dds_delete(*entity);
|
|
|
|
parent = dds_get_parent(*entity);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(parent), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_parent, invalid_entities) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_parent, invalid_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t parent;
|
|
|
|
|
|
|
|
parent = dds_get_parent(entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(parent), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_children in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, null, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
ret = dds_get_children(g_participant, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 3);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, invalid_size, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t child;
|
|
|
|
ret = dds_get_children(g_participant, &child, INT32_MAX);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, too_small, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t children[2];
|
|
|
|
ret = dds_get_children(g_participant, children, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 3);
|
|
|
|
CU_ASSERT_FATAL((children[0] == g_publisher) || (children[0] == g_subscriber) || (children[0] == g_topic));
|
|
|
|
CU_ASSERT_FATAL((children[1] == g_publisher) || (children[1] == g_subscriber) || (children[1] == g_topic));
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(children[0], children[1]);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, participant, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t children[4];
|
|
|
|
ret = dds_get_children(g_participant, children, 4);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 3);
|
|
|
|
CU_ASSERT_FATAL((children[0] == g_publisher) || (children[0] == g_subscriber) || (children[0] == g_topic));
|
|
|
|
CU_ASSERT_FATAL((children[1] == g_publisher) || (children[1] == g_subscriber) || (children[1] == g_topic));
|
|
|
|
CU_ASSERT_FATAL((children[2] == g_publisher) || (children[2] == g_subscriber) || (children[2] == g_topic));
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(children[0], children[1]);
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(children[0], children[2]);
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(children[1], children[2]);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, topic, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t child;
|
|
|
|
ret = dds_get_children(g_topic, &child, 1);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, publisher, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t child;
|
|
|
|
ret = dds_get_children(g_publisher, &child, 1);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 1);
|
|
|
|
CU_ASSERT_EQUAL_FATAL(child, g_writer);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, subscriber, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t children[2];
|
|
|
|
ret = dds_get_children(g_subscriber, children, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 1);
|
|
|
|
CU_ASSERT_EQUAL_FATAL(children[0], g_reader);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, writer, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
ret = dds_get_children(g_writer, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, reader, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t children[2];
|
|
|
|
ret = dds_get_children(g_reader, children, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
|
|
|
CU_ASSERT_FATAL((children[0] == g_readcond) || (children[0] == g_querycond));
|
|
|
|
CU_ASSERT_FATAL((children[1] == g_readcond) || (children[1] == g_querycond));
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(children[0], children[1]);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_children, conditions) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_children, conditions, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t child;
|
|
|
|
ret = dds_get_children(*entity, &child, 1);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_children, deleted_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_children, deleted_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_return_t ret;
|
|
|
|
dds_entity_t children[4];
|
|
|
|
dds_delete(*entity);
|
|
|
|
ret = dds_get_children(*entity, children, 4);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_children, invalid_entities) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_children, invalid_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t children[4];
|
|
|
|
dds_return_t ret;
|
|
|
|
|
|
|
|
ret = dds_get_children(entity, children, 4);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_topic in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_topic, data_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_writer),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_topic, data_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t topic;
|
|
|
|
topic = dds_get_topic(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(topic, g_topic );
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_topic, deleted_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader, &g_writer),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_topic, deleted_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_delete(*entity);
|
|
|
|
topic = dds_get_topic(*entity);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(topic), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_topic, invalid_entities) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_topic, invalid_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t topic;
|
|
|
|
|
|
|
|
topic = dds_get_topic(entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(topic), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_topic, non_data_entities) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_subscriber, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_topic, non_data_entities, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t topic;
|
|
|
|
topic = dds_get_topic(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(topic), DDS_RETCODE_ILLEGAL_OPERATION);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_publisher in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_publisher, writer, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t publisher;
|
|
|
|
publisher = dds_get_publisher(g_writer);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(publisher, g_publisher);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_publisher, deleted_writer, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t publisher;
|
|
|
|
dds_delete(g_writer);
|
|
|
|
publisher = dds_get_publisher(g_writer);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(publisher), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_publisher, invalid_writers) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_publisher, invalid_writers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t publisher;
|
|
|
|
|
|
|
|
publisher = dds_get_publisher(entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(publisher), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_publisher, non_writers) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_publisher, &g_reader, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *cond), ddsc_entity_get_publisher, non_writers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t publisher;
|
|
|
|
publisher = dds_get_publisher(*cond);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(publisher), DDS_RETCODE_ILLEGAL_OPERATION);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_subscriber in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_subscriber, readers) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_subscriber, readers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t subscriber;
|
|
|
|
subscriber = dds_get_subscriber(*entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(subscriber, g_subscriber);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_subscriber, deleted_readers) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond, &g_reader),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *entity), ddsc_entity_get_subscriber, deleted_readers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t subscriber;
|
|
|
|
dds_delete(*entity);
|
|
|
|
subscriber = dds_get_subscriber(*entity);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(subscriber), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_subscriber, invalid_readers) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t entity), ddsc_entity_get_subscriber, invalid_readers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t subscriber;
|
|
|
|
|
|
|
|
subscriber = dds_get_subscriber(entity);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(subscriber), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_subscriber, non_readers) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *cond), ddsc_entity_get_subscriber, non_readers, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t subscriber;
|
|
|
|
subscriber = dds_get_subscriber(*cond);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(subscriber), DDS_RETCODE_ILLEGAL_OPERATION);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**************************************************************************************************
|
|
|
|
*
|
|
|
|
* These will check the dds_get_datareader in various ways.
|
|
|
|
*
|
|
|
|
*************************************************************************************************/
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_datareader, conditions) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *cond), ddsc_entity_get_datareader, conditions, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t reader;
|
|
|
|
reader = dds_get_datareader(*cond);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(reader, g_reader);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_datareader, deleted_conds) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_readcond, &g_querycond),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *cond), ddsc_entity_get_datareader, deleted_conds, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t reader;
|
|
|
|
dds_delete(*cond);
|
|
|
|
reader = dds_get_datareader(*cond);
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(reader), DDS_RETCODE_BAD_PARAMETER);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_datareader, invalid_conds) = {
|
lift limits on handle allocation and reuse (#95)
The old entity handle mechanism suffered from a number of problems, the
most terrible one being that it would only ever allocate 1000 handles
(not even have at most 1000 in use at the same time). Secondarily, it
was protected by a single mutex that actually does show up as a limiting
factor in, say, a polling-based throughput test with small messages.
Thirdly, it tried to provide for various use cases that don't exist in
practice but add complexity and overhead.
This commit totally rewrites the mechanism, by replacing the old array
with a hash table and allowing a near-arbitrary number of handles as
well as reuse of handles. It also removes the entity "kind" bits in the
most significant bits of the handles, because they only resulted in
incorrect checking of argument validity. All that is taken out, but
there is still more cleaning up to be done. It furthermore removes an
indirection in the handle-to-entity lookup by embedding the
"dds_handle_link" structure in the entity.
Handle allocation is randomized to avoid the have a high probability of
quickly finding an available handle (the total number of handles is
limited to a number much smaller than the domain from which they are
allocated). The likelihood of handle reuse is still dependent on the
number of allocated handles -- the fewer handles there are, the longer
the expected time to reuse. Non-randomized handles would give a few
guarantees more, though.
It moreover moves the code from the "util" to the "core/ddsc" component,
because it really is only used for entities, and besides the new
implementation relies on the deferred freeing (a.k.a. garbage collection
mechanism) implemented in the core.
The actual handle management has two variants, selectable with a macro:
the preferred embodiment uses a concurrent hash table, the actually used
one performs all operations inside a single mutex and uses a
non-concurrent version of the hash table. The reason the
less-predeferred embodiment is used is that the concurrent version
requires the freeing of entity objects to be deferred (much like the
GUID-to-entity hash tables in DDSI function, or indeed the key value to
instance handle mapping). That is a fair bit of work, and the
non-concurrent version is a reasonable intermediate step.
Signed-off-by: Erik Boasson <eb@ilities.com>
2019-02-19 10:57:21 +01:00
|
|
|
CU_DataPoints(dds_entity_t, -2, -1, 0, INT_MAX, INT_MIN),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t cond), ddsc_entity_get_datareader, invalid_conds, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t exp = DDS_RETCODE_BAD_PARAMETER * -1;
|
|
|
|
dds_entity_t reader;
|
|
|
|
|
|
|
|
reader = dds_get_datareader(cond);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(reader), dds_err_nr(exp));
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_TheoryDataPoints(ddsc_entity_get_datareader, non_conds) = {
|
|
|
|
CU_DataPoints(dds_entity_t*, &g_reader, &g_subscriber, &g_writer, &g_publisher, &g_topic, &g_participant),
|
2018-04-10 17:03:59 +02:00
|
|
|
};
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Theory((dds_entity_t *cond), ddsc_entity_get_datareader, non_conds, .init=hierarchy_init, .fini=hierarchy_fini)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t reader;
|
|
|
|
reader = dds_get_datareader(*cond);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(reader), DDS_RETCODE_ILLEGAL_OPERATION);
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_implicit_publisher, deleted)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t writer;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_publisher_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
writer = dds_create_writer(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(writer > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
ret = dds_get_children(participant, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(writer);
|
|
|
|
|
|
|
|
ret = dds_get_children(participant, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 1);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(topic);
|
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_implicit_publisher, invalid_topic)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t writer;
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
/* Disable SAL warning on intentional misuse of the API */
|
2019-01-18 14:10:19 +01:00
|
|
|
DDSRT_WARNING_MSVC_OFF(28020);
|
2018-04-10 17:03:59 +02:00
|
|
|
writer = dds_create_writer(participant, 0, NULL, NULL);
|
|
|
|
/* Disable SAL warning on intentional misuse of the API */
|
2019-01-18 14:10:19 +01:00
|
|
|
DDSRT_WARNING_MSVC_ON(28020);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(writer < 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(writer);
|
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_implicit_subscriber, deleted)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t reader;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_subscriber_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
reader = dds_create_reader(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(reader > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
ret = dds_get_children(participant, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(reader);
|
|
|
|
|
|
|
|
ret = dds_get_children(participant, NULL, 0);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 1);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(topic);
|
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_explicit_subscriber, invalid_topic)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t reader;
|
|
|
|
dds_entity_t subscriber;
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
subscriber = dds_create_subscriber(participant, NULL,NULL);
|
|
|
|
/* Disable SAL warning on intentional misuse of the API */
|
2019-01-18 14:10:19 +01:00
|
|
|
DDSRT_WARNING_MSVC_OFF(28020);
|
2018-04-10 17:03:59 +02:00
|
|
|
reader = dds_create_reader(subscriber, 0, NULL, NULL);
|
2019-01-18 14:10:19 +01:00
|
|
|
DDSRT_WARNING_MSVC_ON(28020);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(reader < 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(reader);
|
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, implicit_publisher)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
2018-08-07 17:30:17 +02:00
|
|
|
dds_entity_t publisher = 0;
|
2018-04-10 17:03:59 +02:00
|
|
|
dds_entity_t writer;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_entity_t child[2], child2[2];
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_publisher_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
writer = dds_create_writer(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(writer > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_children(participant, child, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
2018-04-10 17:03:59 +02:00
|
|
|
if(child[0] == topic){
|
|
|
|
publisher = child[1];
|
|
|
|
} else if(child[1] == topic){
|
|
|
|
publisher = child[0];
|
|
|
|
} else{
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_FAIL_FATAL("topic was not returned");
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(publisher, topic);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(publisher > 0);
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(publisher, writer);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(writer);
|
|
|
|
|
|
|
|
ret = dds_get_children(participant, child2, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
|
|
|
CU_ASSERT_FATAL( (child2[0] == child[0]) || (child2[0] == child[1]) );
|
|
|
|
CU_ASSERT_FATAL( (child2[1] == child[0]) || (child2[1] == child[1]) );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(topic);
|
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_children, implicit_subscriber)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
2018-08-07 17:30:17 +02:00
|
|
|
dds_entity_t subscriber = 0;
|
2018-04-10 17:03:59 +02:00
|
|
|
dds_entity_t reader;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_entity_t child[2], child2[2];
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_subscriber_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
reader = dds_create_reader(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(reader > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
ret = dds_get_children(participant, child, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
2018-04-10 17:03:59 +02:00
|
|
|
if(child[0] == topic){
|
|
|
|
subscriber = child[1];
|
|
|
|
} else if(child[1] == topic){
|
|
|
|
subscriber = child[0];
|
|
|
|
} else{
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_FAIL_FATAL("topic was not returned");
|
2018-04-10 17:03:59 +02:00
|
|
|
}
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(subscriber, topic);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(subscriber > 0);
|
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(subscriber, reader);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(reader);
|
|
|
|
|
|
|
|
ret = dds_get_children(participant, child2, 2);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(ret, 2);
|
|
|
|
CU_ASSERT_FATAL( (child2[0] == child[0]) || (child2[0] == child[1]) );
|
|
|
|
CU_ASSERT_FATAL( (child2[1] == child[0]) || (child2[1] == child[1]) );
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(topic);
|
|
|
|
dds_delete(participant);
|
|
|
|
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_parent, implicit_publisher)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t writer;
|
|
|
|
dds_entity_t parent;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_publisher_promotion_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
writer = dds_create_writer(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(writer > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
parent = dds_get_parent(writer);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(parent, participant);
|
|
|
|
CU_ASSERT_FATAL(parent > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(writer);
|
|
|
|
|
|
|
|
ret = dds_delete(parent);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
dds_delete(participant);
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_Test(ddsc_entity_get_parent, implicit_subscriber)
|
2018-04-10 17:03:59 +02:00
|
|
|
{
|
|
|
|
dds_entity_t participant;
|
|
|
|
dds_entity_t reader;
|
|
|
|
dds_entity_t parent;
|
|
|
|
dds_entity_t topic;
|
|
|
|
dds_return_t ret;
|
|
|
|
char name[100];
|
|
|
|
|
|
|
|
participant = dds_create_participant(DDS_DOMAIN_DEFAULT, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(participant > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
topic = dds_create_topic(participant, &RoundTripModule_DataType_desc, create_topic_name("ddsc_entity_implicit_subscriber_promotion_test", name, 100), NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(topic > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
reader = dds_create_reader(participant, topic, NULL, NULL);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_FATAL(reader > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
parent = dds_get_parent(reader);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_NOT_EQUAL_FATAL(parent, participant);
|
|
|
|
CU_ASSERT_FATAL(parent > 0);
|
2018-04-10 17:03:59 +02:00
|
|
|
|
|
|
|
dds_delete(reader);
|
|
|
|
|
|
|
|
ret = dds_delete(parent);
|
2018-12-04 23:01:46 +01:00
|
|
|
CU_ASSERT_EQUAL_FATAL(dds_err_nr(ret), DDS_RETCODE_OK);
|
2018-04-10 17:03:59 +02:00
|
|
|
dds_delete(participant);
|
|
|
|
|
|
|
|
}
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
/*************************************************************************************************/
|
|
|
|
|
|
|
|
#endif
|