diff --git a/gdb/testsuite/gdb.rocm/coop-group-grid-sync.cpp b/gdb/testsuite/gdb.rocm/coop-group-grid-sync.cpp
new file mode 100644
index 00000000000..d8b1ead0537
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/coop-group-grid-sync.cpp
@@ -0,0 +1,151 @@
+/* Copyright 2026 Free Software Foundation, Inc.
+ Copyright (C) 2026 Advanced Micro Devices, Inc. All rights reserved.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see . */
+
+/* This program exercises a single-device cooperative kernel launch.
+ The kernel uses cooperative_groups::this_grid().sync (), which is
+ implemented on AMD GPUs using the Global Wave Sync (GWS) hardware
+ mechanism. It is launched through hipLaunchCooperativeKernel so the
+ whole grid is co-resident and can synchronize at the grid level.
+
+ The companion .exp file uses this program to exercise the debugger
+ while a kernel is running with GWS-based grid synchronization. */
+
+#include
+#include
+#include
+#include
+
+#include "rocm-test-utils.h"
+
+namespace cg = cooperative_groups;
+
+/* Use small but non-trivial launch dimensions so the test runs quickly
+ while still creating multiple waves participating in the GWS
+ barrier. GROUP_SIZE = 64 means 1 wave on wave64 architectures and
+ 2 waves on wave32 architectures, for a total of 2 (or 4) waves.
+
+ N is exactly one slot per thread so Phase 2 has no intra-thread race
+ and the host-side expected values can be computed straightforwardly. */
+
+constexpr unsigned int group_size = 64;
+constexpr unsigned int num_groups = 2;
+constexpr int total_threads = group_size * num_groups;
+constexpr int N = total_threads;
+
+/* Two-phase grid-cooperative kernel.
+
+ Phase 1: every thread writes its slot in IN.
+ GWS : the whole grid synchronizes via cooperative_groups::this_grid ().
+ Phase 2: every thread reads a slot owned by a thread in a *different*
+ workgroup and stores it into OUT.
+
+ Because the Phase 2 read targets a slot written by a different
+ workgroup during Phase 1, this is only correct if grid.sync ()
+ actually synchronized every wave in the grid. */
+
+__global__ void
+coop_grid_sync_kernel (int *in_buf, int *out_buf)
+{
+ cg::grid_group grid = cg::this_grid ();
+
+ int tid = blockIdx.x * blockDim.x + threadIdx.x;
+
+ /* Phase 1: each thread writes its own value. */
+ in_buf[tid] = tid + 1; /* before-sync line */
+
+ /* Grid-wide synchronization (GWS). */
+ grid.sync (); /* sync line */
+
+ /* Phase 2: read the slot owned by a thread in a different workgroup
+ and store it in our own slot of the output buffer. */
+ int peer = (tid + (int) blockDim.x) % total_threads;
+ int v = in_buf[peer]; /* after-sync line */
+ out_buf[tid] = v;
+}
+
+int
+main ()
+{
+ int device_id = 0;
+ CHECK (hipSetDevice (device_id));
+
+ hipDeviceProp_t props;
+ CHECK (hipGetDeviceProperties (&props, device_id));
+ if (!props.cooperativeLaunch)
+ {
+ printf ("Device does not support cooperative launch, skipping.\n");
+ return 0;
+ }
+
+ int *in_d = nullptr;
+ int *out_d = nullptr;
+ CHECK (hipMalloc ((void **) &in_d, N * sizeof (int)));
+ CHECK (hipMalloc ((void **) &out_d, N * sizeof (int)));
+ CHECK (hipMemset (in_d, 0, N * sizeof (int)));
+ CHECK (hipMemset (out_d, 0, N * sizeof (int)));
+
+ dim3 grid_dim (num_groups, 1, 1);
+ dim3 block_dim (group_size, 1, 1);
+
+ void *kernel_args[2];
+ kernel_args[0] = (void *) &in_d;
+ kernel_args[1] = (void *) &out_d;
+
+ /* Launch the kernel cooperatively. This is the API that enables
+ grid.sync () support via GWS. */
+ CHECK (hipLaunchCooperativeKernel (
+ reinterpret_cast (coop_grid_sync_kernel),
+ grid_dim, block_dim, kernel_args,
+ /*sharedMem=*/0, /*stream=*/0));
+
+ CHECK (hipDeviceSynchronize ());
+
+ int out_h[N];
+ CHECK (hipMemcpy (out_h, out_d, N * sizeof (int), hipMemcpyDeviceToHost));
+
+ CHECK (hipFree (in_d));
+ CHECK (hipFree (out_d));
+
+ /* Each thread tid stores in_buf[(tid + group_size) % total_threads]
+ into out_buf[tid]. After Phase 1, in_buf[k] == k + 1, so the
+ expected value at out_h[tid] is ((tid + group_size) %
+ total_threads) + 1. */
+ int errors = 0;
+ for (int tid = 0; tid < N; tid++)
+ {
+ int peer = (tid + (int) group_size) % total_threads;
+ int expected = peer + 1;
+ if (out_h[tid] != expected)
+ {
+ if (errors < 4)
+ fprintf (stderr,
+ "mismatch at %d: got %d, expected %d\n",
+ tid, out_h[tid], expected);
+ errors++;
+ }
+ }
+
+ if (errors != 0)
+ {
+ fprintf (stderr, "FAILED: %d mismatches\n", errors);
+ return EXIT_FAILURE;
+ }
+
+ printf ("PASSED\n");
+ return EXIT_SUCCESS;
+}
diff --git a/gdb/testsuite/gdb.rocm/coop-group-grid-sync.exp b/gdb/testsuite/gdb.rocm/coop-group-grid-sync.exp
new file mode 100644
index 00000000000..50de494add7
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/coop-group-grid-sync.exp
@@ -0,0 +1,173 @@
+# Copyright 2026 Free Software Foundation, Inc.
+# Copyright (C) 2026 Advanced Micro Devices, Inc. All rights reserved.
+
+# This file is part of GDB.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Tests for debugging a single-device cooperative kernel that uses
+# cooperative_groups::this_grid ().sync (), implemented on AMD GPUs
+# using the Global Wave Sync (GWS) hardware mechanism.
+#
+# The test verifies that GDB can:
+# - hit a breakpoint placed before grid.sync () in a cooperatively
+# launched kernel (i.e. when the dispatch was created with
+# hipLaunchCooperativeKernel),
+# - list the cooperative dispatch via "info dispatches",
+# - observe multiple GPU waves participating in the grid sync,
+# - hit a breakpoint placed after grid.sync (), and
+# - resume the kernel to normal completion.
+#
+# This intentionally exercises the same cooperative-launch +
+# this_grid ().sync () code path that earlier CQE GWS tests used, but
+# integrated into the dejagnu testsuite.
+
+load_lib rocm.exp
+
+require allow_hipcc_tests
+
+standard_testfile .cpp
+
+if {[build_executable "failed to prepare" $testfile $srcfile {debug hip}]} {
+ return
+}
+
+# Continue from main until we either hit a breakpoint inside
+# coop_grid_sync_kernel or the program exits normally because the
+# device does not support cooperative launch. Returns 1 if the kernel
+# breakpoint was hit (test can proceed), 0 otherwise.
+
+proc continue_to_kernel_or_skip {tag} {
+ gdb_test_multiple "continue" $tag {
+ -re "hit Breakpoint $::decimal,\[^\r\n\]*coop_grid_sync_kernel\[^\r\n\]*\r\n\[^\r\n\]*\r\n$::gdb_prompt " {
+ pass $tag
+ return 1
+ }
+ -re "Device does not support cooperative launch\[^\r\n\]*\r\n" {
+ unsupported "device does not support cooperative launch"
+ return 0
+ }
+ -re "\\\[Inferior 1 \[^\r\n\]* exited normally\\\]\[^\r\n\]*\r\n$::gdb_prompt " {
+ # Program skipped without our diagnostic line, but still
+ # exited cleanly. Treat as unsupported.
+ unsupported $tag
+ return 0
+ }
+ }
+ return 0
+}
+
+# Test: hit a breakpoint inside a cooperative kernel before grid.sync (),
+# inspect the GPU state, then continue past grid.sync () and to program
+# exit.
+
+proc_with_prefix test_break_around_grid_sync {} {
+ clean_restart
+ gdb_load $::binfile
+
+ with_rocm_gpu_lock {
+ if {![runto_main]} {
+ return
+ }
+
+ # Place a breakpoint before grid.sync (). Use "allow-pending"
+ # because the kernel is loaded as device code at dispatch time.
+ gdb_breakpoint \
+ [gdb_get_line_number "before-sync line"] allow-pending
+
+ # The kernel must hit the pre-sync breakpoint.
+ if {![continue_to_kernel_or_skip "stop before grid.sync"]} {
+ return
+ }
+
+ # We expect at least one AMDGPU Wave thread to be present.
+ # With a 64-thread block size and 2 workgroups we should have
+ # 2 waves on wave64 architectures (or 4 waves on wave32
+ # architectures).
+ set waves [info_thread_get_wave_list]
+ gdb_assert {[llength $waves] >= 2} \
+ "at least two AMDGPU waves are present pre-sync"
+
+ # "info dispatches" should show the cooperative dispatch.
+ gdb_test "info dispatches" \
+ ".*AMDGPU Dispatch\[^\r\n\]*coop_grid_sync_kernel.*" \
+ "info dispatches lists cooperative dispatch"
+
+ # Replace the pre-sync breakpoint with one after grid.sync ().
+ # The after-sync breakpoint fires once all waves have crossed
+ # the GWS barrier, which proves GWS-protected code can be
+ # debugged across the barrier.
+ delete_breakpoints
+ gdb_breakpoint \
+ [gdb_get_line_number "after-sync line"] allow-pending
+
+ gdb_test "continue" \
+ "hit Breakpoint $::decimal,\[^\r\n\]*coop_grid_sync_kernel\[^\r\n\]*\r\n\[^\r\n\]*" \
+ "stop after grid.sync"
+
+ # The kernel should be able to run to completion afterwards.
+ delete_breakpoints
+ gdb_test "continue" \
+ "\\\[Inferior 1 \[^\r\n\]* exited normally\\\]" \
+ "continue to program end"
+ }
+}
+
+# Test: place a breakpoint inside the cooperative kernel, then iterate
+# over every stopped GPU wave and verify that each one has a backtrace
+# pointing into coop_grid_sync_kernel. This exercises the wave
+# organization a debugger sees when several waves participate in a GWS
+# barrier.
+
+proc_with_prefix test_threads_in_coop_kernel {} {
+ clean_restart
+ gdb_load $::binfile
+
+ with_rocm_gpu_lock {
+ if {![runto_main]} {
+ return
+ }
+
+ gdb_breakpoint \
+ [gdb_get_line_number "before-sync line"] allow-pending
+
+ if {![continue_to_kernel_or_skip "stop in cooperative kernel"]} {
+ return
+ }
+
+ # Collect the list of AMDGPU waves and check that every one of
+ # them has a backtrace pointing into coop_grid_sync_kernel.
+ set waves [info_thread_get_wave_list]
+ gdb_assert {[llength $waves] >= 2} "multiple GPU waves listed"
+
+ foreach wave $waves {
+ with_test_prefix "wave $wave" {
+ gdb_test "thread $wave" "Switching to thread $wave.*" \
+ "switch to wave $wave"
+ gdb_test "bt 1" \
+ "#0\[^\r\n\]*coop_grid_sync_kernel\[^\r\n\]*" \
+ "backtrace inside coop_grid_sync_kernel"
+ }
+ }
+
+ # Resume to completion.
+ delete_breakpoints
+ gdb_test "continue" \
+ "\\\[Inferior 1 \[^\r\n\]* exited normally\\\]" \
+ "continue to program end"
+ }
+}
+
+test_break_around_grid_sync
+test_threads_in_coop_kernel
diff --git a/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.cpp b/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.cpp
new file mode 100644
index 00000000000..d405975816a
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.cpp
@@ -0,0 +1,209 @@
+/* Copyright 2026 Free Software Foundation, Inc.
+ Copyright (C) 2026 Advanced Micro Devices, Inc. All rights reserved.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see . */
+
+/* This program exercises a multi-device cooperative kernel launch via
+ hipLaunchCooperativeKernelMultiDevice. The kernel uses both
+ cooperative_groups::this_grid ().sync () (intra-device GWS) and
+ cooperative_groups::this_multi_grid ().sync () (cross-device sync),
+ matching the historical CQE GWS test exercised on multiple ASICs.
+
+ The program is intended to be driven by the dejagnu testsuite (see
+ coop-group-multi-grid-sync.exp). When run on a system with fewer
+ than two GPUs that each support cooperative multi-device launch, the
+ program prints a message and exits cleanly so the .exp file can mark
+ the test UNSUPPORTED. */
+
+#include
+#include
+#include
+#include
+
+#include "rocm-test-utils.h"
+
+namespace cg = cooperative_groups;
+
+constexpr int MAX_GPUS = 8;
+constexpr int N_PER_DEVICE = 256;
+constexpr unsigned int group_size = 64;
+constexpr unsigned int num_groups = 2;
+
+/* Two-phase cooperative kernel running on every device.
+
+ Phase 1: each thread writes (grid_rank + 1) * (i + 1) into its slot.
+ GWS : intra-device grid synchronization via this_grid ().sync ().
+ MGWS : cross-device synchronization via this_multi_grid ().sync ().
+ Phase 2: thread 0 of each grid writes its partial sum into
+ result[grid_rank + 1] (a slot in the shared host-coherent buffer).
+ Then thread 0 of grid 0 aggregates the partials into result[0]. */
+
+__global__ void
+coop_multi_grid_sync_kernel (int *data, int n_elements, long *result)
+{
+ cg::grid_group grid = cg::this_grid ();
+ cg::multi_grid_group mgrid = cg::this_multi_grid ();
+
+ int tid = blockIdx.x * blockDim.x + threadIdx.x;
+ int stride = gridDim.x * blockDim.x;
+
+ unsigned int grid_rank = mgrid.grid_rank ();
+
+ /* Phase 1: each thread writes data into its slot. */
+ for (int i = tid; i < n_elements; i += stride)
+ data[i] = (int) ((grid_rank + 1) * (i + 1)); /* before-grid-sync line */
+
+ /* Intra-device grid sync (GWS). */
+ grid.sync (); /* grid-sync line */
+
+ /* Each grid computes its partial sum and stores it in the shared
+ buffer at slot (grid_rank + 1). Slot 0 is reserved for the
+ final total. */
+ if (tid == 0)
+ {
+ long sum = 0;
+ for (int i = 0; i < n_elements; i++)
+ sum += data[i];
+ result[grid_rank + 1] = sum; /* after-grid-sync line */
+ }
+
+ /* Cross-device sync (multi-grid GWS). */
+ mgrid.sync (); /* multi-grid-sync line */
+
+ /* Grid 0 aggregates partial sums from all grids into result[0]. */
+ if (grid_rank == 0 && tid == 0)
+ {
+ long total = 0;
+ unsigned int n_grids = mgrid.num_grids ();
+ for (unsigned int g = 0; g < n_grids; g++)
+ total += result[g + 1];
+ result[0] = total; /* after-multi-grid-sync line */
+ }
+}
+
+int
+main ()
+{
+ int n_gpus = 0;
+ CHECK (hipGetDeviceCount (&n_gpus));
+ if (n_gpus < 2)
+ {
+ printf ("Multi-device cooperative test needs >= 2 GPUs"
+ " (found %d), skipping.\n", n_gpus);
+ return 0;
+ }
+ if (n_gpus > MAX_GPUS)
+ n_gpus = MAX_GPUS;
+
+ /* Confirm every device supports cooperativeMultiDeviceLaunch. */
+ hipDeviceProp_t props[MAX_GPUS];
+ for (int i = 0; i < n_gpus; i++)
+ {
+ CHECK (hipSetDevice (i));
+ CHECK (hipGetDeviceProperties (&props[i], i));
+ if (!props[i].cooperativeMultiDeviceLaunch)
+ {
+ printf ("Device %d does not support cooperative multi-device"
+ " launch, skipping.\n", i);
+ return 0;
+ }
+ }
+
+ /* eligibility marker - n_gpus is final. The dejagnu .exp file
+ places a breakpoint on this line so it can read the actual number
+ of participating GPUs from the inferior's "n_gpus" local before
+ launching the cooperative dispatch. */
+ int *data_d[MAX_GPUS] = {};
+ hipStream_t stream[MAX_GPUS] = {};
+
+ /* The shared result buffer is host-coherent so every device can
+ read/write it without explicit memcpys. Layout:
+ result[0] -- final total (written by grid 0 only)
+ result[1 .. n_gpus] -- per-grid partial sums. */
+ long *result_h = nullptr;
+
+ for (int i = 0; i < n_gpus; i++)
+ {
+ CHECK (hipSetDevice (i));
+ CHECK (hipMalloc ((void **) &data_d[i], N_PER_DEVICE * sizeof (int)));
+ CHECK (hipMemset (data_d[i], 0, N_PER_DEVICE * sizeof (int)));
+ CHECK (hipStreamCreate (&stream[i]));
+ }
+
+ CHECK (hipSetDevice (0));
+ CHECK (hipHostMalloc ((void **) &result_h, (n_gpus + 1) * sizeof (long),
+ hipHostMallocCoherent));
+ for (int i = 0; i < n_gpus + 1; i++)
+ result_h[i] = 0;
+
+ /* Build per-device launch parameters. Each device gets the same
+ grid/block dims and a pointer to *its own* data buffer, but the
+ result pointer is the single shared host-coherent buffer. */
+ hipLaunchParams launch_params[MAX_GPUS];
+ void *args[MAX_GPUS][3];
+ int n_elements = N_PER_DEVICE;
+
+ for (int i = 0; i < n_gpus; i++)
+ {
+ args[i][0] = (void *) &data_d[i];
+ args[i][1] = (void *) &n_elements;
+ args[i][2] = (void *) &result_h;
+
+ launch_params[i].func
+ = reinterpret_cast (coop_multi_grid_sync_kernel);
+ launch_params[i].gridDim = dim3 (num_groups, 1, 1);
+ launch_params[i].blockDim = dim3 (group_size, 1, 1);
+ launch_params[i].sharedMem = 0;
+ launch_params[i].stream = stream[i];
+ launch_params[i].args = args[i];
+ }
+
+ CHECK (hipLaunchCooperativeKernelMultiDevice (launch_params, n_gpus, 0));
+
+ for (int i = 0; i < n_gpus; i++)
+ {
+ CHECK (hipSetDevice (i));
+ CHECK (hipDeviceSynchronize ());
+ }
+
+ long total = result_h[0];
+
+ for (int i = 0; i < n_gpus; i++)
+ {
+ CHECK (hipSetDevice (i));
+ CHECK (hipFree (data_d[i]));
+ CHECK (hipStreamDestroy (stream[i]));
+ }
+ CHECK (hipSetDevice (0));
+ CHECK (hipHostFree (result_h));
+
+ /* Compute the expected total: for grid g (0-based), the thread that
+ writes slot i stores (g + 1) * (i + 1). Per grid, the sum is
+ (g + 1) * N_PER_DEVICE * (N_PER_DEVICE + 1) / 2. */
+ long per_grid = (long) N_PER_DEVICE * (N_PER_DEVICE + 1) / 2;
+ long expected = 0;
+ for (int g = 0; g < n_gpus; g++)
+ expected += (long) (g + 1) * per_grid;
+
+ if (total != expected)
+ {
+ fprintf (stderr, "FAILED: total %ld, expected %ld\n", total, expected);
+ return EXIT_FAILURE;
+ }
+
+ printf ("PASSED\n");
+ return EXIT_SUCCESS;
+}
diff --git a/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.exp b/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.exp
new file mode 100644
index 00000000000..746e69a53f0
--- /dev/null
+++ b/gdb/testsuite/gdb.rocm/coop-group-multi-grid-sync.exp
@@ -0,0 +1,146 @@
+# Copyright 2026 Free Software Foundation, Inc.
+# Copyright (C) 2026 Advanced Micro Devices, Inc. All rights reserved.
+
+# This file is part of GDB.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Tests for debugging a multi-device cooperative kernel that uses
+# cooperative_groups::this_grid ().sync () within each device and
+# cooperative_groups::this_multi_grid ().sync () across devices. Both
+# barriers are implemented on AMD GPUs using the GWS hardware
+# mechanism.
+#
+# The test verifies that GDB can:
+# - read the number of participating GPUs from the inferior,
+# - hit a kernel breakpoint inside the cooperative dispatch on
+# *every* participating GPU (matched via the per-device
+# "Breakpoint ." subscript that hipLaunchCooperative
+# KernelMultiDevice produces), and
+# - resume the cross-device dispatch through both the intra-device
+# this_grid ().sync () and the cross-device this_multi_grid ().sync ()
+# to normal program completion.
+
+load_lib rocm.exp
+
+require allow_hipcc_tests
+require hip_devices_support_debug_multi_process
+
+standard_testfile .cpp
+
+if {[build_executable "failed to prepare" $testfile $srcfile {debug hip}]} {
+ return
+}
+
+# Test: hit a breakpoint inside the cooperative kernel on every
+# participating GPU, then resume the dispatch through both barriers
+# and to program completion.
+
+proc_with_prefix test_break_in_multi_coop_kernel {} {
+ # Multi-device cooperative dispatches involve waves on more than
+ # one device making forward progress before the multi-grid sync
+ # releases. Run the inferior in non-stop mode so the debugger can
+ # observe per-device stops independently.
+ save_vars { ::GDBFLAGS } {
+ append ::GDBFLAGS " -ex \"set non-stop on\""
+ clean_restart
+ }
+ gdb_load $::binfile
+
+ with_rocm_gpu_lock {
+ if {![runto_main]} {
+ return
+ }
+
+ # First, advance to the eligibility marker so we can read the
+ # final value of "n_gpus" (the count of GPUs the inferior is
+ # actually going to use for the cooperative dispatch). If the
+ # program self-skips before reaching the marker (insufficient
+ # GPUs or no cooperative-multi-device support on some device),
+ # mark the test UNSUPPORTED.
+ gdb_breakpoint \
+ [gdb_get_line_number "eligibility marker"] allow-pending
+
+ set eligible 0
+ gdb_test_multiple "continue" "advance to eligibility marker or skip" {
+ -re "hit Breakpoint $::decimal,\[^\r\n\]*main \\(\\)\[^\r\n\]*\r\n\[^\r\n\]*\r\n$::gdb_prompt " {
+ set eligible 1
+ pass $gdb_test_name
+ }
+ -re "\\\[Inferior 1 \[^\r\n\]* exited normally\\\]\[^\r\n\]*\r\n$::gdb_prompt " {
+ unsupported "multi-device cooperative launch not available"
+ }
+ }
+
+ if { !$eligible } {
+ return
+ }
+
+ set n_gpus [get_integer_valueof "n_gpus" 0]
+ gdb_assert {$n_gpus >= 2} "n_gpus is at least 2 ($n_gpus)"
+ if { $n_gpus < 2 } {
+ return
+ }
+
+ # Replace the marker breakpoint with the kernel breakpoint and
+ # run all threads in the background so the host can finish
+ # launching kernels on every device.
+ delete_breakpoints
+ gdb_breakpoint \
+ [gdb_get_line_number "before-grid-sync line"] allow-pending
+
+ gdb_test_multiple "continue -a &" "background continue" {
+ -re "Continuing\\.\r\n$::gdb_prompt " {
+ pass $gdb_test_name
+ }
+ }
+
+ # In non-stop mode, hipLaunchCooperativeKernelMultiDevice
+ # produces one child breakpoint instance per participating GPU
+ # ("Breakpoint ."). Collect distinct values
+ # until we have observed a stop on every GPU; only then is it
+ # safe to delete the breakpoint and let the dispatch run
+ # through both grid syncs.
+ array unset gpu_seen
+ set distinct_gpus 0
+ gdb_test_multiple "" "breakpoint hit on every participating GPU" {
+ -re "Thread $::decimal\[^\r\n\]* hit Breakpoint $::decimal\\.($::decimal),\[^\r\n\]*coop_multi_grid_sync_kernel\[^\r\n\]*\r\n" {
+ set inst $expect_out(1,string)
+ if {![info exists gpu_seen($inst)]} {
+ set gpu_seen($inst) 1
+ incr distinct_gpus
+ }
+ if {$distinct_gpus >= $n_gpus} {
+ pass $gdb_test_name
+ } else {
+ exp_continue
+ }
+ }
+ }
+
+ # All participating GPUs have been observed stopping at the
+ # kernel breakpoint. Remove the breakpoint and let the
+ # dispatch run through this_grid ().sync () and
+ # this_multi_grid ().sync () to clean program exit.
+ delete_breakpoints
+
+ gdb_test_multiple "continue -a" "continue all to end" {
+ -re "\\\[Inferior 1 \[^\r\n\]* exited normally\\\]\[^\r\n\]*\r\n$::gdb_prompt " {
+ pass $gdb_test_name
+ }
+ }
+ }
+}
+
+test_break_in_multi_coop_kernel