fix vendorid used for entities discovered via cloud
This commit is contained in:
parent
e335f4f43e
commit
e0068e6dae
2 changed files with 15 additions and 2 deletions
|
@ -565,7 +565,7 @@ static const struct cfgelem unsupp_cfgelems[] = {
|
||||||
"<p>Forward all messages from a writer, rather than trying to forward each sample only once. The default of trying to forward each sample only once filters out duplicates for writers in multiple partitions under nearly all circumstances, but may still publish the odd duplicate. Note: the current implementation also can lose in contrived test cases, that publish more than 2**32 samples using a single data writer in conjunction with carefully controlled management of the writer history via cooperating local readers.</p>" },
|
"<p>Forward all messages from a writer, rather than trying to forward each sample only once. The default of trying to forward each sample only once filters out duplicates for writers in multiple partitions under nearly all circumstances, but may still publish the odd duplicate. Note: the current implementation also can lose in contrived test cases, that publish more than 2**32 samples using a single data writer in conjunction with carefully controlled management of the writer history via cooperating local readers.</p>" },
|
||||||
{ LEAF("RetryOnRejectBestEffort"), 1, "false", ABSOFF(retry_on_reject_besteffort), 0, uf_boolean, 0, pf_boolean,
|
{ LEAF("RetryOnRejectBestEffort"), 1, "false", ABSOFF(retry_on_reject_besteffort), 0, uf_boolean, 0, pf_boolean,
|
||||||
"<p>Whether or not to locally retry pushing a received best-effort sample into the reader caches when resource limits are reached.</p>" },
|
"<p>Whether or not to locally retry pushing a received best-effort sample into the reader caches when resource limits are reached.</p>" },
|
||||||
{ LEAF("GenerateKeyhash"), 1, "true", ABSOFF(generate_keyhash), 0, uf_boolean, 0, pf_boolean,
|
{ LEAF("GenerateKeyhash"), 1, "false", ABSOFF(generate_keyhash), 0, uf_boolean, 0, pf_boolean,
|
||||||
"<p>When true, include keyhashes in outgoing data for topics with keys.</p>" },
|
"<p>When true, include keyhashes in outgoing data for topics with keys.</p>" },
|
||||||
{ LEAF("MaxSampleSize"), 1, "2147483647 B", ABSOFF(max_sample_size), 0, uf_memsize, 0, pf_memsize,
|
{ LEAF("MaxSampleSize"), 1, "2147483647 B", ABSOFF(max_sample_size), 0, uf_memsize, 0, pf_memsize,
|
||||||
"<p>This setting controls the maximum (CDR) serialised size of samples that DDSI2E will forward in either direction. Samples larger than this are discarded with a warning.</p>" },
|
"<p>This setting controls the maximum (CDR) serialised size of samples that DDSI2E will forward in either direction. Samples larger than this are discarded with a warning.</p>" },
|
||||||
|
|
|
@ -1074,6 +1074,7 @@ static struct proxy_participant *implicitly_create_proxypp (const nn_guid_t *ppg
|
||||||
|
|
||||||
if (vendor_is_cloud (vendorid))
|
if (vendor_is_cloud (vendorid))
|
||||||
{
|
{
|
||||||
|
nn_vendorid_t actual_vendorid;
|
||||||
/* Some endpoint that we discovered through the DS, but then it must have at least some locators */
|
/* Some endpoint that we discovered through the DS, but then it must have at least some locators */
|
||||||
TRACE ((" from-DS %x:%x:%x:%x", PGUID (privguid)));
|
TRACE ((" from-DS %x:%x:%x:%x", PGUID (privguid)));
|
||||||
/* avoid "no address" case, so we never create the proxy participant for nothing (FIXME: rework some of this) */
|
/* avoid "no address" case, so we never create the proxy participant for nothing (FIXME: rework some of this) */
|
||||||
|
@ -1083,7 +1084,19 @@ static struct proxy_participant *implicitly_create_proxypp (const nn_guid_t *ppg
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
nn_log (LC_DISCOVERY, " new-proxypp %x:%x:%x:%x\n", PGUID (*ppguid));
|
nn_log (LC_DISCOVERY, " new-proxypp %x:%x:%x:%x\n", PGUID (*ppguid));
|
||||||
new_proxy_participant(ppguid, 0, 0, &privguid, new_addrset(), new_addrset(), &pp_plist, T_NEVER, vendorid, CF_IMPLICITLY_CREATED_PROXYPP, timestamp);
|
/* We need to handle any source of entities, but we really want to try to keep the GIDs (and
|
||||||
|
certainly the systemId component) unchanged for OSPL. The new proxy participant will take
|
||||||
|
the GID from the GUID if it is from a "modern" OSPL that advertises it includes all GIDs in
|
||||||
|
the endpoint discovery; else if it is OSPL it will take at the systemId and fake the rest.
|
||||||
|
However, (1) Cloud filters out the GIDs from the discovery, and (2) DDSI2 deliberately
|
||||||
|
doesn't include the GID for internally generated endpoints (such as the fictitious transient
|
||||||
|
data readers) to signal that these are internal and have no GID (and not including a GID if
|
||||||
|
there is none is quite a reasonable approach). Point (2) means we have no reliable way of
|
||||||
|
determining whether GIDs are included based on the first endpoint, and so there is no point
|
||||||
|
doing anything about (1). That means we fall back to the legacy mode of locally generating
|
||||||
|
GIDs but leaving the system id unchanged if the remote is OSPL. */
|
||||||
|
actual_vendorid = (datap->present & PP_VENDORID) ? datap->vendorid : vendorid;
|
||||||
|
new_proxy_participant(ppguid, 0, 0, &privguid, new_addrset(), new_addrset(), &pp_plist, T_NEVER, actual_vendorid, CF_IMPLICITLY_CREATED_PROXYPP, timestamp);
|
||||||
}
|
}
|
||||||
else if (ppguid->prefix.u[0] == src_guid_prefix->u[0] && vendor_is_opensplice (vendorid))
|
else if (ppguid->prefix.u[0] == src_guid_prefix->u[0] && vendor_is_opensplice (vendorid))
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue