diff --git a/matching/subscriptions.py b/matching/subscriptions.py index 13743ab..d3d461e 100644 --- a/matching/subscriptions.py +++ b/matching/subscriptions.py @@ -47,7 +47,7 @@ class TKind(Enum): # language=PythonRegExp ns_sep = r"(?P::)" # language=PythonRegExp - unknown_symbol = r"(?P\?)" + unknown_token = r"(?P.)" def __repr__(self): return self.name @@ -128,7 +128,6 @@ TR_BLACKLIST = [ def cl_deps_to_tr_deps(matches: Set[Tuple], tr: TrContext, cl: ClContext): - ################################################## # Narrow down matches ################################################## @@ -314,7 +313,11 @@ def match_and_modify_children(node: ASTEntry, match_func): def sanitize(sig: str): - ast = build_ast(sig) + try: + ast = build_ast(sig) + except Exception as e: + print(f"[ERROR] Could not build AST for {sig}, returning it as-is. {e}") + return str def _remove_qualifiers(node: ASTEntry): match node: @@ -341,7 +344,6 @@ def sanitize(sig: str): callee_ret, ASTNode('()', children=[*callee_ptr, ASTNode('()', bind_args)]), ASTNode('()') as replacement_args])]: - return [callee_ret] + head + [ASTNode('()', callee_ptr, parent, ASTLeaf(TKind.par_open, '('), ASTLeaf(TKind.par_close, ')')), @@ -389,7 +391,7 @@ def sanitize(sig: str): return None case ASTNode(): return match_and_modify_children(node, _child_seq_matcher) - case ASTLeaf(TKind.ref | TKind.ptr | TKind.ns_sep | TKind.unknown_symbol): + case ASTLeaf(TKind.ref | TKind.ptr | TKind.ns_sep | TKind.unknown_token): return None return node @@ -420,14 +422,22 @@ def sanitize(sig: str): return node - ast = traverse(ast, _remove_qualifiers) - ast = traverse(ast, _remove_std_wrappers) - ast = traverse(ast, _remove_std_bind) - ast = traverse(ast, _unwrap_lambda) - ast = traverse(ast, _remove_artifacts) - ast = traverse(ast, _replace_verbose_types) - ast = traverse(ast, _replace_lambda_enumerations) - #ast = _remove_return_types(ast) + sanitization_actions = [ + _remove_qualifiers, + _remove_std_wrappers, + _remove_std_bind, + _unwrap_lambda, + _remove_artifacts, + _replace_verbose_types, + _replace_lambda_enumerations + ] + + for action in sanitization_actions: + try: + ast = traverse(ast, action) + except Exception as e: + print(f"[WARN] Could not apply {action.__name__} sanitization, skipped: {e}") + return ast diff --git a/trace-analysis.ipynb b/trace-analysis.ipynb index d093228..c8c30b6 100644 --- a/trace-analysis.ipynb +++ b/trace-analysis.ipynb @@ -61,7 +61,7 @@ "# Path to trace directory (e.g. ~/.ros/my-trace/ust) or to a converted trace file.\n", "# Using the path \"/ust\" at the end is optional but greatly reduces processing time\n", "# if kernel traces are also present.\n", - "TR_PATH = \"/home/max/Projects/optimization_framework/artifacts/iteration0_worker1/aw_replay/tracing/scenario-trace/ust\"\n", + "TR_PATH = \"/home/max/Projects/optimization_framework/artifacts/iteration0_worker0/aw_replay/tracing/scenario-trace/ust\"\n", "\n", "# Path to the folder all artifacts from this notebook are saved to.\n", "# This entails plots as well as data tables.\n", @@ -119,10 +119,10 @@ "\n", "# All topics containing any of these RegEx patterns are considered output topics in E2E latency calculations\n", "# E.g. r\"^/control/\" will cover all control topics\n", - "E2E_OUTPUT_TOPIC_PATTERNS = [r\"^/control/\", r\"^/vehicle\"]\n", + "E2E_OUTPUT_TOPIC_PATTERNS = [r\"^/control/.*?/control_cmd\"]\n", "# All topics containing any of these RegEx patterns are considered input topics in E2E latency calculations\n", "# E.g. r\"^/sensing/\" will cover all sensing topics\n", - "E2E_INPUT_TOPIC_PATTERNS = [r\"^/vehicle/status/\", r\"^/sensing/imu\"]\n", + "E2E_INPUT_TOPIC_PATTERNS = [r\"^/vehicle/status/\", r\"^/sensing/\"]\n", "\n", "# This code overrides the above constants with environment variables, do not edit.\n", "for env_key, env_value in os.environ.items():\n", @@ -223,7 +223,7 @@ "execution_count": null, "outputs": [], "source": [ - "for topic in topics:\n", + "for topic in sorted(topics, key=lambda t: t.name):\n", " topic: TrTopic\n", " print(f\"{topic.name:.<120s} | {sum(map(lambda p: len(p.instances), topic.publishers))}\")" ], @@ -294,7 +294,7 @@ "node_namespace_mapping = {\n", " 'perception': 'perception',\n", " 'sensing': 'sensing',\n", - " 'planning': 'planning',\n", + " 'plannitokenizeng': 'planning',\n", " 'control': 'control',\n", " 'awapi': None,\n", " 'autoware_api': None,\n", @@ -1380,7 +1380,10 @@ "\n", "out_df = pd.DataFrame(columns=[\"path\", \"timestamp\", \"e2e_latency\"])\n", "for key, paths in items:\n", - " path_records = [(\" -> \".join(key), p[0].timestamp, p[0].timestamp - p[-1].timestamp) for p in paths]\n", + " path_records = [{\"path\": \" -> \".join(key),\n", + " \"timestamp\": p[0].timestamp,\n", + " \"e2e_latency\": p[0].timestamp - p[-1].timestamp}\n", + " for p in paths]\n", " out_df.append(path_records)\n", " print(\n", " f\"======== {len(paths)}x: {sum(map(lambda p: p[0].timestamp - p[-1].timestamp, paths)) / len(paths) * 1000:.3f}ms\")\n", @@ -1464,4 +1467,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file