Avoid race causing thread-state pointer aliasing

The thread_states array resets the "state" to ZERO on thread termination
to indicate that the slot was unused, but it leaves the thread id
unchanged because some platforms don't have a defined value that will
never be used as a thread id.  A consequence is that a newly created
thread may result in multiple slots containing their own thread id, but
generally there will only be one that is not in state ZERO.

However, the code for create_thread used to set the state to ALIVE prior
to creating the thread, and so if the events get scheduled like:

1. thread A: X.state = ALIVE
2. create new thread B, storing tid in X.tid
3. thread A: Y.state = ALIVE
4. new thread B: lookup self (and cache pointer)
5. create new thread C, storing tid in Y.tid
6. new thread C: lookup self (and cache pointer)

Then B will observe two slots in the ALIVE state, with X.tid certain to
match and Y.tid undefined (and hence possibly matching).  It may
therefore pick Y.  C will (in this schedule) of course always choose Y.
They cache the pointer and never look at X and Y again, except for
updating their virtual clocks.

These virtual clocks are updated non-atomically (by design it is private
to the thread) and so if both B & C use Y they can end up racing each
other in updating the virtual clock and cause the nesting level of the
"awake" state controlling garbage collection to get stuck (or wrap
around, or do other horrible things).  The consequence can be anything,
from a somewhat benign variant where GC effectively stops and some
operations (deleting readers and writers and shutting down) block
forever, to use-after-free and the undefined behaviour that implies.

This commit avoids looking up the slot in the newly created threads,
instead passing the correct address in the argument.  It also adds an
intermediate state INIT that serves to reserve the slot until the new
thread is actually running.  It does make the look-up safe (if one were
to do it), and as it is essentially free and gives more insight in the
state of the system when viewed from a debugger, it appears a useful
addition.

Signed-off-by: Erik Boasson <eb@ilities.com>
This commit is contained in:
Erik Boasson 2020-02-26 13:26:20 +01:00 committed by eboasson
parent 6e0faae196
commit 1ee2dfe08f
2 changed files with 145 additions and 87 deletions

View file

@ -50,6 +50,8 @@ typedef int32_t svtime_t; /* signed version */
enum thread_state { enum thread_state {
THREAD_STATE_ZERO, /* known to be dead */ THREAD_STATE_ZERO, /* known to be dead */
THREAD_STATE_STOPPED, /* internal thread, stopped-but-not-reaped */
THREAD_STATE_INIT, /* internal thread, initializing */
THREAD_STATE_LAZILY_CREATED, /* lazily created in response because an application used it. Reclaimed if the thread terminates, but not considered an error if all of Cyclone is shutdown while this thread hasn't terminated yet */ THREAD_STATE_LAZILY_CREATED, /* lazily created in response because an application used it. Reclaimed if the thread terminates, but not considered an error if all of Cyclone is shutdown while this thread hasn't terminated yet */
THREAD_STATE_ALIVE /* known to be alive - for Cyclone internal threads */ THREAD_STATE_ALIVE /* known to be alive - for Cyclone internal threads */
}; };
@ -67,13 +69,34 @@ struct ddsrt_log_cfg;
* *
* gv is constant for internal threads, i.e., for threads with state = ALIVE * gv is constant for internal threads, i.e., for threads with state = ALIVE
* gv is non-NULL for internal threads except thread liveliness monitoring * gv is non-NULL for internal threads except thread liveliness monitoring
*
* Q_THREAD_DEBUG enables some really costly debugging stuff that may not be fully
* portable (I used it once, might as well keep it)
*/ */
#define Q_THREAD_DEBUG 0
#if Q_THREAD_DEBUG
#define Q_THREAD_NSTACKS 20
#define Q_THREAD_STACKDEPTH 10
#define Q_THREAD_BASE_DEBUG \
void *stks[Q_THREAD_NSTACKS][Q_THREAD_STACKDEPTH]; \
int stks_depth[Q_THREAD_NSTACKS]; \
int stks_idx;
struct thread_state1;
void thread_vtime_trace (struct thread_state1 *ts1);
#else /* Q_THREAD_DEBUG */
#define Q_THREAD_BASE_DEBUG
#define thread_vtime_trace(ts1) do { } while (0)
#endif /* Q_THREAD_DEBUG */
#define THREAD_BASE \ #define THREAD_BASE \
ddsrt_atomic_uint32_t vtime; \ ddsrt_atomic_uint32_t vtime; \
ddsrt_atomic_voidp_t gv; \
enum thread_state state; \ enum thread_state state; \
ddsrt_atomic_voidp_t gv; \
ddsrt_thread_t tid; \ ddsrt_thread_t tid; \
ddsrt_thread_t extTid; \ uint32_t (*f) (void *arg); \
void *f_arg; \
Q_THREAD_BASE_DEBUG /* note: no semicolon! */ \
char name[24] /* note: no semicolon! */ char name[24] /* note: no semicolon! */
struct thread_state_base { struct thread_state_base {
@ -107,8 +130,6 @@ DDS_EXPORT dds_return_t create_thread (struct thread_state1 **ts, const struct d
DDS_EXPORT struct thread_state1 *lookup_thread_state_real (void); DDS_EXPORT struct thread_state1 *lookup_thread_state_real (void);
DDS_EXPORT dds_return_t join_thread (struct thread_state1 *ts1); DDS_EXPORT dds_return_t join_thread (struct thread_state1 *ts1);
DDS_EXPORT void log_stack_traces (const struct ddsrt_log_cfg *logcfg, const struct ddsi_domaingv *gv); DDS_EXPORT void log_stack_traces (const struct ddsrt_log_cfg *logcfg, const struct ddsi_domaingv *gv);
DDS_EXPORT void reset_thread_state (struct thread_state1 *ts1);
DDS_EXPORT int thread_exists (const char *name);
DDS_EXPORT inline struct thread_state1 *lookup_thread_state (void) { DDS_EXPORT inline struct thread_state1 *lookup_thread_state (void) {
struct thread_state1 *ts1 = tsd_thread_state; struct thread_state1 *ts1 = tsd_thread_state;
@ -154,6 +175,7 @@ DDS_EXPORT inline void thread_state_asleep (struct thread_state1 *ts1)
assert (vtime_awake_p (vt)); assert (vtime_awake_p (vt));
/* nested calls a rare and an extra fence doesn't break things */ /* nested calls a rare and an extra fence doesn't break things */
ddsrt_atomic_fence_rel (); ddsrt_atomic_fence_rel ();
thread_vtime_trace (ts1);
if ((vt & VTIME_NEST_MASK) == 1) if ((vt & VTIME_NEST_MASK) == 1)
vt += (1u << VTIME_TIME_SHIFT) - 1u; vt += (1u << VTIME_TIME_SHIFT) - 1u;
else else
@ -167,6 +189,7 @@ DDS_EXPORT inline void thread_state_awake (struct thread_state1 *ts1, const stru
assert ((vt & VTIME_NEST_MASK) < VTIME_NEST_MASK); assert ((vt & VTIME_NEST_MASK) < VTIME_NEST_MASK);
assert (gv != NULL); assert (gv != NULL);
assert (ts1->state != THREAD_STATE_ALIVE || gv == ddsrt_atomic_ldvoidp (&ts1->gv)); assert (ts1->state != THREAD_STATE_ALIVE || gv == ddsrt_atomic_ldvoidp (&ts1->gv));
thread_vtime_trace (ts1);
ddsrt_atomic_stvoidp (&ts1->gv, (struct ddsi_domaingv *) gv); ddsrt_atomic_stvoidp (&ts1->gv, (struct ddsi_domaingv *) gv);
ddsrt_atomic_fence_stst (); ddsrt_atomic_fence_stst ();
ddsrt_atomic_st32 (&ts1->vtime, vt + 1u); ddsrt_atomic_st32 (&ts1->vtime, vt + 1u);
@ -179,6 +202,7 @@ DDS_EXPORT inline void thread_state_awake_domain_ok (struct thread_state1 *ts1)
vtime_t vt = ddsrt_atomic_ld32 (&ts1->vtime); vtime_t vt = ddsrt_atomic_ld32 (&ts1->vtime);
assert ((vt & VTIME_NEST_MASK) < VTIME_NEST_MASK); assert ((vt & VTIME_NEST_MASK) < VTIME_NEST_MASK);
assert (ddsrt_atomic_ldvoidp (&ts1->gv) != NULL); assert (ddsrt_atomic_ldvoidp (&ts1->gv) != NULL);
thread_vtime_trace (ts1);
ddsrt_atomic_st32 (&ts1->vtime, vt + 1u); ddsrt_atomic_st32 (&ts1->vtime, vt + 1u);
/* nested calls a rare and an extra fence doesn't break things */ /* nested calls a rare and an extra fence doesn't break things */
ddsrt_atomic_fence_acq (); ddsrt_atomic_fence_acq ();
@ -196,6 +220,7 @@ DDS_EXPORT inline void thread_state_awake_to_awake_no_nest (struct thread_state1
vtime_t vt = ddsrt_atomic_ld32 (&ts1->vtime); vtime_t vt = ddsrt_atomic_ld32 (&ts1->vtime);
assert ((vt & VTIME_NEST_MASK) == 1); assert ((vt & VTIME_NEST_MASK) == 1);
ddsrt_atomic_fence_rel (); ddsrt_atomic_fence_rel ();
thread_vtime_trace (ts1);
ddsrt_atomic_st32 (&ts1->vtime, vt + (1u << VTIME_TIME_SHIFT)); ddsrt_atomic_st32 (&ts1->vtime, vt + (1u << VTIME_TIME_SHIFT));
ddsrt_atomic_fence_acq (); ddsrt_atomic_fence_acq ();
} }

View file

@ -47,6 +47,24 @@ extern inline void thread_state_awake_to_awake_no_nest (struct thread_state1 *ts
static struct thread_state1 *init_thread_state (const char *tname, const struct ddsi_domaingv *gv, enum thread_state state); static struct thread_state1 *init_thread_state (const char *tname, const struct ddsi_domaingv *gv, enum thread_state state);
static void reap_thread_state (struct thread_state1 *ts1); static void reap_thread_state (struct thread_state1 *ts1);
DDSRT_STATIC_ASSERT(THREAD_STATE_ZERO == 0 &&
THREAD_STATE_ZERO < THREAD_STATE_STOPPED &&
THREAD_STATE_STOPPED < THREAD_STATE_INIT &&
THREAD_STATE_INIT < THREAD_STATE_LAZILY_CREATED &&
THREAD_STATE_INIT < THREAD_STATE_ALIVE);
#if Q_THREAD_DEBUG
#include <execinfo.h>
void thread_vtime_trace (struct thread_state1 *ts1)
{
if (++ts1->stks_idx == Q_THREAD_NSTACKS)
ts1->stks_idx = 0;
const int i = ts1->stks_idx;
ts1->stks_depth[i] = backtrace (ts1->stks[i], Q_THREAD_STACKDEPTH);
}
#endif
static void *ddsrt_malloc_aligned_cacheline (size_t size) static void *ddsrt_malloc_aligned_cacheline (size_t size)
{ {
/* This wastes some space, but we use it only once and it isn't a /* This wastes some space, but we use it only once and it isn't a
@ -84,15 +102,6 @@ void thread_states_init (unsigned maxthreads)
thread_states.nthreads = maxthreads; thread_states.nthreads = maxthreads;
thread_states.ts = ddsrt_malloc_aligned_cacheline (maxthreads * sizeof (*thread_states.ts)); thread_states.ts = ddsrt_malloc_aligned_cacheline (maxthreads * sizeof (*thread_states.ts));
memset (thread_states.ts, 0, maxthreads * sizeof (*thread_states.ts)); memset (thread_states.ts, 0, maxthreads * sizeof (*thread_states.ts));
/* The compiler doesn't realize that ts is large enough. */
DDSRT_WARNING_MSVC_OFF(6386);
for (uint32_t i = 0; i < thread_states.nthreads; i++)
{
thread_states.ts[i].state = THREAD_STATE_ZERO;
ddsrt_atomic_st32 (&thread_states.ts[i].vtime, 0);
memset (thread_states.ts[i].name, 0, sizeof (thread_states.ts[i].name));
}
DDSRT_WARNING_MSVC_ON(6386);
} }
/* This thread should be at the same address as before, or never have had a slot /* This thread should be at the same address as before, or never have had a slot
@ -126,8 +135,18 @@ bool thread_states_fini (void)
ddsrt_mutex_lock (&thread_states.lock); ddsrt_mutex_lock (&thread_states.lock);
for (uint32_t i = 0; i < thread_states.nthreads; i++) for (uint32_t i = 0; i < thread_states.nthreads; i++)
{ {
assert (thread_states.ts[i].state != THREAD_STATE_ALIVE); switch (thread_states.ts[i].state)
others += (thread_states.ts[i].state == THREAD_STATE_LAZILY_CREATED); {
case THREAD_STATE_ZERO:
break;
case THREAD_STATE_LAZILY_CREATED:
others++;
break;
case THREAD_STATE_STOPPED:
case THREAD_STATE_INIT:
case THREAD_STATE_ALIVE:
assert (0);
}
} }
ddsrt_mutex_unlock (&thread_states.lock); ddsrt_mutex_unlock (&thread_states.lock);
if (others == 0) if (others == 0)
@ -143,30 +162,35 @@ bool thread_states_fini (void)
} }
} }
ddsrt_attribute_no_sanitize (("thread"))
static struct thread_state1 *find_thread_state (ddsrt_thread_t tid) static struct thread_state1 *find_thread_state (ddsrt_thread_t tid)
{ {
if (thread_states.ts) { if (thread_states.ts)
{
ddsrt_mutex_lock (&thread_states.lock);
for (uint32_t i = 0; i < thread_states.nthreads; i++) for (uint32_t i = 0; i < thread_states.nthreads; i++)
{ {
if (ddsrt_thread_equal (thread_states.ts[i].tid, tid) && thread_states.ts[i].state != THREAD_STATE_ZERO) if (thread_states.ts[i].state > THREAD_STATE_INIT && ddsrt_thread_equal (thread_states.ts[i].tid, tid))
{
ddsrt_mutex_unlock (&thread_states.lock);
return &thread_states.ts[i]; return &thread_states.ts[i];
}
} }
ddsrt_mutex_unlock (&thread_states.lock);
} }
return NULL; return NULL;
} }
static void cleanup_thread_state (void *data) static void cleanup_thread_state (void *data)
{ {
struct thread_state1 *ts = find_thread_state(ddsrt_thread_self()); struct thread_state1 *ts1 = find_thread_state (ddsrt_thread_self ());
(void)data; (void) data;
if (ts) if (ts1)
{ {
assert(ts->state == THREAD_STATE_LAZILY_CREATED); assert (ts1->state == THREAD_STATE_LAZILY_CREATED);
assert(vtime_asleep_p(ddsrt_atomic_ld32 (&ts->vtime))); assert (vtime_asleep_p (ddsrt_atomic_ld32 (&ts1->vtime)));
reset_thread_state(ts); reap_thread_state (ts1);
} }
ddsrt_fini(); ddsrt_fini ();
} }
static struct thread_state1 *lazy_create_thread_state (ddsrt_thread_t self) static struct thread_state1 *lazy_create_thread_state (ddsrt_thread_t self)
@ -178,9 +202,9 @@ static struct thread_state1 *lazy_create_thread_state (ddsrt_thread_t self)
char name[128]; char name[128];
ddsrt_thread_getname (name, sizeof (name)); ddsrt_thread_getname (name, sizeof (name));
ddsrt_mutex_lock (&thread_states.lock); ddsrt_mutex_lock (&thread_states.lock);
if ((ts1 = init_thread_state (name, NULL, THREAD_STATE_LAZILY_CREATED)) != NULL) { if ((ts1 = init_thread_state (name, NULL, THREAD_STATE_LAZILY_CREATED)) != NULL)
{
ddsrt_init (); ddsrt_init ();
ts1->extTid = self;
ts1->tid = self; ts1->tid = self;
DDS_LOG (DDS_LC_TRACE, "started application thread %s\n", name); DDS_LOG (DDS_LC_TRACE, "started application thread %s\n", name);
ddsrt_thread_cleanup_push (&cleanup_thread_state, NULL); ddsrt_thread_cleanup_push (&cleanup_thread_state, NULL);
@ -199,38 +223,29 @@ struct thread_state1 *lookup_thread_state_real (void)
ts1 = lazy_create_thread_state (self); ts1 = lazy_create_thread_state (self);
tsd_thread_state = ts1; tsd_thread_state = ts1;
} }
assert(ts1 != NULL); assert (ts1 != NULL);
return ts1; return ts1;
} }
struct thread_context {
struct thread_state1 *self;
uint32_t (*f) (void *arg);
void *arg;
};
static uint32_t create_thread_wrapper (void *ptr) static uint32_t create_thread_wrapper (void *ptr)
{ {
uint32_t ret; struct thread_state1 * const ts1 = ptr;
struct thread_context *ctx = ptr; struct ddsi_domaingv const * const gv = ddsrt_atomic_ldvoidp (&ts1->gv);
struct ddsi_domaingv const * const gv = ddsrt_atomic_ldvoidp (&ctx->self->gv);
if (gv) if (gv)
GVTRACE ("started new thread %"PRIdTID": %s\n", ddsrt_gettid (), ctx->self->name); GVTRACE ("started new thread %"PRIdTID": %s\n", ddsrt_gettid (), ts1->name);
ctx->self->tid = ddsrt_thread_self (); assert (ts1->state == THREAD_STATE_INIT);
ret = ctx->f (ctx->arg); tsd_thread_state = ts1;
ddsrt_free (ctx); ddsrt_mutex_lock (&thread_states.lock);
ts1->state = THREAD_STATE_ALIVE;
ddsrt_mutex_unlock (&thread_states.lock);
const uint32_t ret = ts1->f (ts1->f_arg);
ddsrt_mutex_lock (&thread_states.lock);
ts1->state = THREAD_STATE_STOPPED;
ddsrt_mutex_unlock (&thread_states.lock);
tsd_thread_state = NULL;
return ret; return ret;
} }
static int find_free_slot (const char *name)
{
for (uint32_t i = 0; i < thread_states.nthreads; i++)
if (thread_states.ts[i].state == THREAD_STATE_ZERO)
return (int) i;
DDS_FATAL ("create_thread: %s: no free slot\n", name ? name : "(anon)");
return -1;
}
const struct config_thread_properties_listelem *lookup_thread_properties (const struct config *config, const char *name) const struct config_thread_properties_listelem *lookup_thread_properties (const struct config *config, const char *name)
{ {
const struct config_thread_properties_listelem *e; const struct config_thread_properties_listelem *e;
@ -242,36 +257,37 @@ const struct config_thread_properties_listelem *lookup_thread_properties (const
static struct thread_state1 *init_thread_state (const char *tname, const struct ddsi_domaingv *gv, enum thread_state state) static struct thread_state1 *init_thread_state (const char *tname, const struct ddsi_domaingv *gv, enum thread_state state)
{ {
int cand; uint32_t i;
struct thread_state1 *ts; for (i = 0; i < thread_states.nthreads; i++)
if (thread_states.ts[i].state == THREAD_STATE_ZERO)
if ((cand = find_free_slot (tname)) < 0) break;
if (i == thread_states.nthreads)
{
DDS_FATAL ("create_thread: %s: no free slot\n", tname ? tname : "(anon)");
return NULL; return NULL;
}
ts = &thread_states.ts[cand]; struct thread_state1 * const ts1 = &thread_states.ts[i];
ddsrt_atomic_stvoidp (&ts->gv, (struct ddsi_domaingv *) gv); assert (vtime_asleep_p (ddsrt_atomic_ld32 (&ts1->vtime)));
assert (vtime_asleep_p (ddsrt_atomic_ld32 (&ts->vtime))); memset (ts1, 0, sizeof (*ts1));
(void) ddsrt_strlcpy (ts->name, tname, sizeof (ts->name)); ddsrt_atomic_stvoidp (&ts1->gv, (struct ddsi_domaingv *) gv);
ts->state = state; (void) ddsrt_strlcpy (ts1->name, tname, sizeof (ts1->name));
ts1->state = state;
return ts; return ts1;
} }
static dds_return_t create_thread_int (struct thread_state1 **ts1, const struct ddsi_domaingv *gv, struct config_thread_properties_listelem const * const tprops, const char *name, uint32_t (*f) (void *arg), void *arg) static dds_return_t create_thread_int (struct thread_state1 **ts1_out, const struct ddsi_domaingv *gv, struct config_thread_properties_listelem const * const tprops, const char *name, uint32_t (*f) (void *arg), void *arg)
{ {
ddsrt_threadattr_t tattr; ddsrt_threadattr_t tattr;
ddsrt_thread_t tid; struct thread_state1 *ts1;
struct thread_context *ctxt;
ctxt = ddsrt_malloc (sizeof (*ctxt));
ddsrt_mutex_lock (&thread_states.lock); ddsrt_mutex_lock (&thread_states.lock);
*ts1 = init_thread_state (name, gv, THREAD_STATE_ALIVE); ts1 = *ts1_out = init_thread_state (name, gv, THREAD_STATE_INIT);
if (*ts1 == NULL) if (ts1 == NULL)
goto fatal; goto fatal;
ctxt->self = *ts1; ts1->f = f;
ctxt->f = f; ts1->f_arg = arg;
ctxt->arg = arg;
ddsrt_threadattr_init (&tattr); ddsrt_threadattr_init (&tattr);
if (tprops != NULL) if (tprops != NULL)
{ {
@ -286,19 +302,17 @@ static dds_return_t create_thread_int (struct thread_state1 **ts1, const struct
GVTRACE ("create_thread: %s: class %d priority %"PRId32" stack %"PRIu32"\n", name, (int) tattr.schedClass, tattr.schedPriority, tattr.stackSize); GVTRACE ("create_thread: %s: class %d priority %"PRId32" stack %"PRIu32"\n", name, (int) tattr.schedClass, tattr.schedPriority, tattr.stackSize);
} }
if (ddsrt_thread_create (&tid, name, &tattr, &create_thread_wrapper, ctxt) != DDS_RETCODE_OK) if (ddsrt_thread_create (&ts1->tid, name, &tattr, &create_thread_wrapper, ts1) != DDS_RETCODE_OK)
{ {
(*ts1)->state = THREAD_STATE_ZERO; ts1->state = THREAD_STATE_ZERO;
DDS_FATAL ("create_thread: %s: ddsrt_thread_create failed\n", name); DDS_FATAL ("create_thread: %s: ddsrt_thread_create failed\n", name);
goto fatal; goto fatal;
} }
(*ts1)->extTid = tid; /* overwrite the temporary value with the correct external one */
ddsrt_mutex_unlock (&thread_states.lock); ddsrt_mutex_unlock (&thread_states.lock);
return DDS_RETCODE_OK; return DDS_RETCODE_OK;
fatal: fatal:
ddsrt_mutex_unlock (&thread_states.lock); ddsrt_mutex_unlock (&thread_states.lock);
ddsrt_free (ctxt); *ts1_out = NULL;
*ts1 = NULL;
abort (); abort ();
return DDS_RETCODE_ERROR; return DDS_RETCODE_ERROR;
} }
@ -317,34 +331,53 @@ dds_return_t create_thread (struct thread_state1 **ts1, const struct ddsi_domain
static void reap_thread_state (struct thread_state1 *ts1) static void reap_thread_state (struct thread_state1 *ts1)
{ {
ddsrt_mutex_lock (&thread_states.lock); ddsrt_mutex_lock (&thread_states.lock);
ts1->state = THREAD_STATE_ZERO; switch (ts1->state)
{
case THREAD_STATE_INIT:
case THREAD_STATE_STOPPED:
case THREAD_STATE_LAZILY_CREATED:
ts1->state = THREAD_STATE_ZERO;
break;
case THREAD_STATE_ZERO:
case THREAD_STATE_ALIVE:
assert (0);
}
ddsrt_mutex_unlock (&thread_states.lock); ddsrt_mutex_unlock (&thread_states.lock);
} }
dds_return_t join_thread (struct thread_state1 *ts1) dds_return_t join_thread (struct thread_state1 *ts1)
{ {
dds_return_t ret; dds_return_t ret;
assert (ts1->state == THREAD_STATE_ALIVE); ddsrt_mutex_lock (&thread_states.lock);
ret = ddsrt_thread_join (ts1->extTid, NULL); switch (ts1->state)
{
case THREAD_STATE_INIT:
case THREAD_STATE_STOPPED:
case THREAD_STATE_ALIVE:
break;
case THREAD_STATE_ZERO:
case THREAD_STATE_LAZILY_CREATED:
assert (0);
}
ddsrt_mutex_unlock (&thread_states.lock);
ret = ddsrt_thread_join (ts1->tid, NULL);
assert (vtime_asleep_p (ddsrt_atomic_ld32 (&ts1->vtime))); assert (vtime_asleep_p (ddsrt_atomic_ld32 (&ts1->vtime)));
reap_thread_state (ts1); reap_thread_state (ts1);
return ret; return ret;
} }
void reset_thread_state (struct thread_state1 *ts1)
{
if (ts1)
reap_thread_state (ts1);
}
void log_stack_traces (const struct ddsrt_log_cfg *logcfg, const struct ddsi_domaingv *gv) void log_stack_traces (const struct ddsrt_log_cfg *logcfg, const struct ddsi_domaingv *gv)
{ {
for (uint32_t i = 0; i < thread_states.nthreads; i++) for (uint32_t i = 0; i < thread_states.nthreads; i++)
{ {
if (thread_states.ts[i].state != THREAD_STATE_ZERO && struct thread_state1 * const ts1 = &thread_states.ts[i];
(gv == NULL || ddsrt_atomic_ldvoidp (&thread_states.ts[i].gv) == gv)) if (ts1->state > THREAD_STATE_INIT && (gv == NULL || ddsrt_atomic_ldvoidp (&ts1->gv) == gv))
{ {
log_stacktrace (logcfg, thread_states.ts[i].name, thread_states.ts[i].tid); /* There's a race condition here that may cause us to call log_stacktrace with an invalid
thread id (or even with a thread id mapping to a newly created thread that isn't really
relevant in this context!) but this is an optional debug feature, so it's not worth the
bother to avoid it. */
log_stacktrace (logcfg, ts1->name, ts1->tid);
} }
} }
} }