diff --git a/rcl/package.xml b/rcl/package.xml
index d18f8d4..cb39835 100644
--- a/rcl/package.xml
+++ b/rcl/package.xml
@@ -38,6 +38,7 @@
rmw_implementation_cmake
launch
launch_testing
+ launch_testing_ament_cmake
osrf_testing_tools_cpp
test_msgs
diff --git a/rcl/test/CMakeLists.txt b/rcl/test/CMakeLists.txt
index 9cf17c1..a66c95e 100644
--- a/rcl/test/CMakeLists.txt
+++ b/rcl/test/CMakeLists.txt
@@ -1,5 +1,5 @@
find_package(ament_cmake_gtest REQUIRED)
-find_package(ament_cmake_pytest REQUIRED)
+find_package(launch_testing_ament_cmake REQUIRED)
find_package(test_msgs REQUIRED)
@@ -253,9 +253,9 @@ function(test_target_function)
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/test/test_rmw_impl_id_check${target_suffix}_$.py"
INPUT "${CMAKE_CURRENT_BINARY_DIR}/test_rmw_impl_id_check${target_suffix}.py.configure"
)
- ament_add_pytest_test(
- test_rmw_impl_id_check${target_suffix}
+ add_launch_test(
"${CMAKE_CURRENT_BINARY_DIR}/test/test_rmw_impl_id_check${target_suffix}_$.py"
+ TARGET test_rmw_impl_id_check${target_suffix}
APPEND_LIBRARY_DIRS "${extra_lib_dirs}"
${SKIP_TEST}
)
diff --git a/rcl/test/cmake/rcl_add_custom_launch_test.cmake b/rcl/test/cmake/rcl_add_custom_launch_test.cmake
index 2855346..d4d2e22 100644
--- a/rcl/test/cmake/rcl_add_custom_launch_test.cmake
+++ b/rcl/test/cmake/rcl_add_custom_launch_test.cmake
@@ -15,6 +15,8 @@
if(rcl_add_custom_launch_test_INCLUDED)
return()
endif()
+
+find_package(launch_testing_ament_cmake REQUIRED)
set(rcl_add_custom_launch_test_INCLUDED TRUE)
macro(rcl_add_custom_launch_test test_name executable1 executable2)
@@ -32,7 +34,11 @@ macro(rcl_add_custom_launch_test test_name executable1 executable2)
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/test/${test_name}${target_suffix}_$.py"
INPUT "${CMAKE_CURRENT_BINARY_DIR}/${test_name}${target_suffix}.py.configure"
)
- ament_add_pytest_test(${test_name}${target_suffix} "${CMAKE_CURRENT_BINARY_DIR}/test/${test_name}${target_suffix}_$.py" ${ARGN})
+ add_launch_test(
+ "${CMAKE_CURRENT_BINARY_DIR}/test/${test_name}${target_suffix}_$.py"
+ TARGET ${test_name}${target_suffix}
+ ${ARGN}
+ )
if(TEST ${test_name}${target_suffix})
set_tests_properties(${test_name}${target_suffix} PROPERTIES DEPENDS "${executable1}${target_suffix} ${executable2}${target_suffix}")
endif()
diff --git a/rcl/test/rcl/test_rmw_impl_id_check.py.in b/rcl/test/rcl/test_rmw_impl_id_check.py.in
index a3311d6..0c293bb 100644
--- a/rcl/test/rcl/test_rmw_impl_id_check.py.in
+++ b/rcl/test/rcl/test_rmw_impl_id_check.py.in
@@ -3,15 +3,38 @@
import os
from launch import LaunchDescription
-from launch import LaunchService
from launch.actions import ExecuteProcess
-from launch_testing import LaunchTestService
+from launch.actions import OpaqueFunction
+import launch_testing
+import launch_testing.asserts
+import launch_testing.util
-def launch_test(
- rmw_implementation_env=None, rcl_assert_rmw_id_matches_env=None, expect_failure=False
+import unittest
+
+@launch_testing.parametrize(
+ 'rmw_implementation_env,rcl_assert_rmw_id_matches_env',
+ [
+ # Test RMW_IMPLEMENTATION only.
+ ('@rmw_implementation@', None),
+ ('garbage', None),
+ ('', None),
+ # Test RCL_ASSERT_RMW_ID_MATCHES only.
+ (None, 'garbage'),
+ (None, ''),
+ # Test both.
+ ('@rmw_implementation@', '@rmw_implementation@'),
+ ('garbage', '@rmw_implementation@'),
+ ('@rmw_implementation@', 'garbage'),
+ ('garbage', 'garbage'),
+ ('', ''),
+ ]
+)
+def generate_test_description(
+ rmw_implementation_env,
+ rcl_assert_rmw_id_matches_env,
+ ready_fn
):
- launch_test = LaunchTestService()
launch_description = LaunchDescription()
env = dict(os.environ)
@@ -20,62 +43,38 @@ def launch_test(
if rcl_assert_rmw_id_matches_env is not None:
env['RCL_ASSERT_RMW_ID_MATCHES'] = rcl_assert_rmw_id_matches_env
- launch_test.add_test_action(
- launch_description, ExecuteProcess(
- cmd=['@TEST_RMW_IMPL_ID_CHECK_EXECUTABLE_NAME@'],
- name='@TEST_RMW_IMPL_ID_CHECK_EXECUTABLE_NAME@',
- env=env,
- )
+ executable_under_test = ExecuteProcess(
+ cmd=['@TEST_RMW_IMPL_ID_CHECK_EXECUTABLE_NAME@'],
+ name='@TEST_RMW_IMPL_ID_CHECK_EXECUTABLE_NAME@',
+ env=env,
)
+ launch_description.add_action(executable_under_test)
+ # Keep the test fixture alive till tests end.
+ launch_description.add_action(launch_testing.util.KeepAliveProc())
+ launch_description.add_action(
+ OpaqueFunction(function=lambda context: ready_fn())
+ )
+ return launch_description, locals()
- launch_service = LaunchService()
- launch_service.include_launch_description(launch_description)
- rc = launch_test.run(launch_service)
+class TestRMWImplementationIDCheck(unittest.TestCase):
- if expect_failure:
- assert rc != 0, 'The executable did not fail as expected.'
- else:
- assert rc == 0, "The executable failed with exit code '" + str(rc) + "'. "
+ def test_process_terminates_in_a_finite_amount_of_time(self, executable_under_test):
+ """Test that executable under test terminates in a finite amount of time."""
+ self.proc_info.assertWaitForShutdown(process=executable_under_test, timeout=10)
+@launch_testing.post_shutdown_test()
+class TestRMWImplementationIDCheckAfterShutdown(unittest.TestCase):
-def test_rmw_implementation_env():
- launch_test(rmw_implementation_env='@rmw_implementation@', expect_failure=False)
- launch_test(rmw_implementation_env='', expect_failure=False)
- launch_test(rmw_implementation_env='garbage', expect_failure=True)
-
-
-def test_rcl_assert_rmw_id_matches_env():
- # Note(dhood): we don't test _only_ setting RCL_ASSERT_RMW_ID_MATCHES because if support for
- # multiple RMW implementations is available then RMW_IMPLEMENTATION must be used in order to
- # get non-default RMW implementation(s).
- launch_test(rcl_assert_rmw_id_matches_env='', expect_failure=False)
- launch_test(rcl_assert_rmw_id_matches_env='garbage', expect_failure=True)
-
-
-def test_both():
- launch_test(
- rmw_implementation_env='@rmw_implementation@',
- rcl_assert_rmw_id_matches_env='@rmw_implementation@',
- expect_failure=False)
- launch_test(
- rmw_implementation_env='',
- rcl_assert_rmw_id_matches_env='',
- expect_failure=False)
- launch_test(
- rmw_implementation_env='@rmw_implementation@',
- rcl_assert_rmw_id_matches_env='garbage',
- expect_failure=True)
- launch_test(
- rmw_implementation_env='garbage',
- rcl_assert_rmw_id_matches_env='@rmw_implementation@',
- expect_failure=True)
- launch_test(
- rmw_implementation_env='garbage',
- rcl_assert_rmw_id_matches_env='garbage',
- expect_failure=True)
-
-
-if __name__ == '__main__':
- test_rmw_implementation_env()
- test_rcl_assert_rmw_id_matches_env()
- test_both()
+ def test_process_terminates_as_expected(
+ self,
+ proc_info,
+ executable_under_test,
+ rmw_implementation_env,
+ rcl_assert_rmw_id_matches_env
+ ):
+ """Test that the executable under test terminates as expected."""
+ assertion_method = self.assertEqual
+ if 'garbage' in (rmw_implementation_env, rcl_assert_rmw_id_matches_env):
+ assertion_method = self.assertNotEqual
+ executable_info = proc_info[executable_under_test]
+ assertion_method(executable_info.returncode, launch_testing.asserts.EXIT_OK)
diff --git a/rcl/test/rcl/test_two_executables.py.in b/rcl/test/rcl/test_two_executables.py.in
index bffaa70..bc1615c 100644
--- a/rcl/test/rcl/test_two_executables.py.in
+++ b/rcl/test/rcl/test_two_executables.py.in
@@ -1,35 +1,49 @@
# generated from rcl/test/test_two_executables.py.in
+import os
+
from launch import LaunchDescription
-from launch import LaunchService
from launch.actions import ExecuteProcess
-from launch_testing import LaunchTestService
+from launch.actions import OpaqueFunction
+
+import launch_testing
+import launch_testing.asserts
+
+import unittest
-def @TEST_NAME@():
- launch_test = LaunchTestService()
+def generate_test_description(ready_fn):
launch_description = LaunchDescription()
- launch_test.add_fixture_action(
- launch_description, ExecuteProcess(
+ launch_description.add_action(
+ ExecuteProcess(
cmd=['@TEST_EXECUTABLE1@'],
name='@TEST_EXECUTABLE1_NAME@',
- ), exit_allowed=True
- )
-
- launch_test.add_test_action(
- launch_description, ExecuteProcess(
- cmd=['@TEST_EXECUTABLE2@', '@TEST_EXECUTABLE1_NAME@'],
- name='@TEST_EXECUTABLE2_NAME@',
)
)
- launch_service = LaunchService()
- launch_service.include_launch_description(launch_description)
- rc = launch_test.run(launch_service)
+ executable_under_test = ExecuteProcess(
+ cmd=['@TEST_EXECUTABLE2@', '@TEST_EXECUTABLE1_NAME@'],
+ name='@TEST_EXECUTABLE2_NAME@',
+ )
+ launch_description.add_action(executable_under_test)
- assert rc == 0, "The launch file failed with exit code '" + str(rc) + "'. "
+ launch_description.add_action(
+ OpaqueFunction(function=lambda context: ready_fn())
+ )
+ return launch_description, locals()
-if __name__ == '__main__':
- @TEST_NAME@()
+class TestTwoExecutables(unittest.TestCase):
+
+ def @TEST_NAME@(self, executable_under_test):
+ """Test that the executable under test terminates after a finite amount of time."""
+ self.proc_info.assertWaitForShutdown(process=executable_under_test, timeout=10)
+
+
+@launch_testing.post_shutdown_test()
+class TestTwoExecutablesAfterShutdown(unittest.TestCase):
+
+ def @TEST_NAME@(self, executable_under_test):
+ """Test that the executable under test finished cleanly."""
+ launch_testing.asserts.assertExitCodes(self.proc_info, process=executable_under_test)