@@ -420,39 +420,67 @@ TEST_CASE("MultipleRuns" * doctest::timeout(300)) {
420
420
// Testcase: ParallelRuns
421
421
// --------------------------------------------------------
422
422
TEST_CASE (" ParallelRuns" * doctest::timeout (300 )) {
423
-
424
- for (size_t w=0 ; w<=32 ; ++w) {
425
-
426
- tf::Executor executor (w);
427
423
428
- std::atomic<int > counter = 0 ;
424
+ std::atomic<int > counter;
425
+ std::vector<std::thread> threads;
429
426
430
- auto make_taskflow = [&] (tf::Taskflow& tf) {
431
- for (int i=0 ; i<1024 ; i++) {
432
- auto A = tf.emplace ([&] () {
433
- counter.fetch_add (1 , std::memory_order_relaxed);
434
- });
435
- auto B = tf.emplace ([&] () {
436
- counter.fetch_add (1 , std::memory_order_relaxed);
437
- });
438
- A.precede (B);
439
- }
440
- };
441
-
442
- std::vector<std::thread> threads;
443
- for (int t=1 ; t<=32 ; t++) {
444
- threads.emplace_back ([&] () {
445
- tf::Taskflow taskflow;
446
- make_taskflow (taskflow);
447
- executor.run (taskflow).wait ();
427
+ auto make_taskflow = [&] (tf::Taskflow& tf) {
428
+ for (int i=0 ; i<1024 ; i++) {
429
+ auto A = tf.emplace ([&] () {
430
+ counter.fetch_add (1 , std::memory_order_relaxed);
448
431
});
432
+ auto B = tf.emplace ([&] () {
433
+ counter.fetch_add (1 , std::memory_order_relaxed);
434
+ });
435
+ A.precede (B);
449
436
}
437
+ };
450
438
451
- for (auto & t : threads) {
452
- t.join ();
453
- }
439
+ SUBCASE (" RunAndWait" ) {
440
+ for (size_t w=0 ; w<=32 ; ++w) {
441
+ tf::Executor executor (w);
442
+ counter = 0 ;
443
+ for (int t=0 ; t<32 ; t++) {
444
+ threads.emplace_back ([&] () {
445
+ tf::Taskflow taskflow;
446
+ make_taskflow (taskflow);
447
+ executor.run (taskflow).wait ();
448
+ });
449
+ }
454
450
455
- REQUIRE (counter.load () == 32 *1024 *2 );
451
+ for (auto & t : threads) {
452
+ t.join ();
453
+ }
454
+ threads.clear ();
455
+
456
+ REQUIRE (counter.load () == 32 *1024 *2 );
457
+ }
458
+ }
459
+
460
+ SUBCASE (" RunAndWaitForAll" ) {
461
+ for (size_t w=0 ; w<=32 ; ++w) {
462
+ tf::Executor executor (w);
463
+ counter = 0 ;
464
+ std::vector<std::unique_ptr<tf::Taskflow>> taskflows (32 );
465
+ std::atomic<size_t > barrier (0 );
466
+ for (int t=0 ; t<32 ; t++) {
467
+ threads.emplace_back ([&, t=t] () {
468
+ taskflows[t] = std::make_unique<tf::Taskflow>();
469
+ make_taskflow (*taskflows[t]);
470
+ executor.run (*taskflows[t]);
471
+ ++barrier; // make sure all runs are issued
472
+ });
473
+ }
474
+
475
+ while (barrier != 32 );
476
+ executor.wait_for_all ();
477
+ REQUIRE (counter.load () == 32 *1024 *2 );
478
+
479
+ for (auto & t : threads) {
480
+ t.join ();
481
+ }
482
+ threads.clear ();
483
+ }
456
484
}
457
485
458
486
}
0 commit comments