cyclonedds/src/core/ddsc/src/dds_init.c
Erik Boasson 57d20e07a4 Rearrange things to make RHC interface public
This makes it possible to write one's own RHC implementation.  This is
not a stable interface.  It shuffles a few things around and renames
some types used throughout the code to stick to having a "dds" prefix
for all the external things.

Signed-off-by: Erik Boasson <eb@ilities.com>
2019-09-11 10:06:24 +02:00

185 lines
5.4 KiB
C

/*
* Copyright(c) 2006 to 2018 ADLINK Technology Limited and others
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0, or the Eclipse Distribution License
* v. 1.0 which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: EPL-2.0 OR BSD-3-Clause
*/
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include "dds/ddsrt/cdtors.h"
#include "dds/ddsrt/environ.h"
#include "dds/ddsrt/process.h"
#include "dds/ddsrt/heap.h"
#include "dds__init.h"
#include "dds/ddsc/dds_rhc.h"
#include "dds__domain.h"
#include "dds__builtin.h"
#include "dds__whc_builtintopic.h"
#include "dds__entity.h"
#include "dds/ddsi/ddsi_iid.h"
#include "dds/ddsi/ddsi_tkmap.h"
#include "dds/ddsi/ddsi_serdata.h"
#include "dds/ddsi/ddsi_threadmon.h"
#include "dds/ddsi/q_entity.h"
#include "dds/ddsi/q_config.h"
#include "dds/ddsi/q_gc.h"
#include "dds/ddsi/q_globals.h"
#include "dds/version.h"
static dds_return_t dds_close (struct dds_entity *e);
static dds_return_t dds_fini (struct dds_entity *e);
const struct dds_entity_deriver dds_entity_deriver_cyclonedds = {
.close = dds_close,
.delete = dds_fini,
.set_qos = dds_entity_deriver_dummy_set_qos,
.validate_status = dds_entity_deriver_dummy_validate_status
};
dds_cyclonedds_entity dds_global;
enum dds_cyclonedds_state {
CDDS_STATE_ZERO,
CDDS_STATE_STARTING,
CDDS_STATE_READY,
CDDS_STATE_STOPPING
};
static enum dds_cyclonedds_state dds_state;
static void common_cleanup (void)
{
downgrade_main_thread ();
thread_states_fini ();
ddsi_iid_fini ();
ddsrt_cond_destroy (&dds_global.m_cond);
ddsrt_mutex_destroy (&dds_global.m_mutex);
dds_state = CDDS_STATE_ZERO;
ddsrt_cond_broadcast (ddsrt_get_singleton_cond ());
}
static bool cyclonedds_entity_ready (void)
{
assert (dds_state != CDDS_STATE_ZERO);
if (dds_state == CDDS_STATE_STARTING || dds_state == CDDS_STATE_STOPPING)
return false;
else
{
struct dds_handle_link *x;
bool ready;
if (dds_handle_pin (DDS_CYCLONEDDS_HANDLE, &x) < 0)
ready = false;
else
{
ddsrt_mutex_lock (&dds_global.m_entity.m_mutex);
ready = !dds_handle_is_closed (x);
if (ready)
dds_entity_add_ref_locked (&dds_global.m_entity);
ddsrt_mutex_unlock (&dds_global.m_entity.m_mutex);
dds_handle_unpin (x);
}
return ready;
}
}
dds_return_t dds_init (void)
{
dds_return_t ret;
ddsrt_init ();
ddsrt_mutex_t * const init_mutex = ddsrt_get_singleton_mutex ();
ddsrt_cond_t * const init_cond = ddsrt_get_singleton_cond ();
ddsrt_mutex_lock (init_mutex);
while (dds_state != CDDS_STATE_ZERO && !cyclonedds_entity_ready ())
ddsrt_cond_wait (init_cond, init_mutex);
switch (dds_state)
{
case CDDS_STATE_READY:
assert (dds_global.m_entity.m_hdllink.hdl == DDS_CYCLONEDDS_HANDLE);
ddsrt_mutex_unlock (init_mutex);
return DDS_RETCODE_OK;
case CDDS_STATE_ZERO:
dds_state = CDDS_STATE_STARTING;
break;
default:
ddsrt_mutex_unlock (init_mutex);
ddsrt_fini ();
return DDS_RETCODE_ERROR;
}
ddsrt_mutex_init (&dds_global.m_mutex);
ddsrt_cond_init (&dds_global.m_cond);
ddsi_iid_init ();
thread_states_init_static ();
thread_states_init (64);
upgrade_main_thread ();
if (dds_handle_server_init () != DDS_RETCODE_OK)
{
DDS_ERROR ("Failed to initialize internal handle server\n");
ret = DDS_RETCODE_ERROR;
goto fail_handleserver;
}
dds_entity_init (&dds_global.m_entity, NULL, DDS_KIND_CYCLONEDDS, NULL, NULL, 0);
dds_global.m_entity.m_iid = ddsi_iid_gen ();
dds_global.m_entity.m_flags = DDS_ENTITY_IMPLICIT;
ddsrt_mutex_lock (&dds_global.m_entity.m_mutex);
dds_entity_add_ref_locked (&dds_global.m_entity);
ddsrt_mutex_unlock (&dds_global.m_entity.m_mutex);
dds_state = CDDS_STATE_READY;
ddsrt_mutex_unlock (init_mutex);
return DDS_RETCODE_OK;
fail_handleserver:
assert (dds_state == CDDS_STATE_STARTING);
common_cleanup ();
ddsrt_mutex_unlock (init_mutex);
ddsrt_fini ();
return ret;
}
static dds_return_t dds_close (struct dds_entity *e)
{
(void) e;
ddsrt_mutex_t * const init_mutex = ddsrt_get_singleton_mutex ();
ddsrt_cond_t * const init_cond = ddsrt_get_singleton_cond ();
ddsrt_mutex_lock (init_mutex);
assert (dds_state == CDDS_STATE_READY);
dds_state = CDDS_STATE_STOPPING;
ddsrt_cond_broadcast (init_cond);
ddsrt_mutex_unlock (init_mutex);
return DDS_RETCODE_OK;
}
static dds_return_t dds_fini (struct dds_entity *e)
{
(void) e;
ddsrt_mutex_t * const init_mutex = ddsrt_get_singleton_mutex ();
/* If there are multiple domains shutting down simultaneously, the one "deleting" the top-level
entity (and thus arriving here) may have overtaken another thread that is still in the process
of deleting its domain object. For most entities such races are not an issue, but here we tear
down the run-time, so here we must wait until everyone else is out. */
ddsrt_mutex_lock (&dds_global.m_mutex);
while (!ddsrt_avl_is_empty (&dds_global.m_domains))
ddsrt_cond_wait (&dds_global.m_cond, &dds_global.m_mutex);
ddsrt_mutex_unlock (&dds_global.m_mutex);
ddsrt_mutex_lock (init_mutex);
assert (dds_state == CDDS_STATE_STOPPING);
dds_entity_final_deinit_before_free (e);
dds_handle_server_fini ();
common_cleanup ();
ddsrt_mutex_unlock (init_mutex);
ddsrt_fini ();
return DDS_RETCODE_NO_DATA;
}