mirror of
https://github.com/Kitware/CMake.git
synced 2025-10-16 14:08:35 +08:00
CTest: Fix --test-load regression
The `ctest --test-load` option is implemented in `StartNextTests` by not starting any tests when the load is too high and instead sleeping and then returning. Prior to commit v3.11.0-rc1~117^2 (CTest: Re-implement test process handling using libuv, 2017-12-10) our outer loop in `RunTests` would immediately call `StartNextTests` again. However, now the `uv_run` loop may simply terminate if there are no tests running because no events are left pending. Fix this by converting the sleep in `StartNextTests` into a libuv timer that it starts instead. This avoids leaving `uv_run` with no pending events. In the case that there are other running tests this also allows CTest to detect when they finish even if it during the wait period where we previously slept. This regression was not caught by the test suite because it only verified that we do not start new tests when the load was too high and not that we proceed to start tests when the load drops. Revise the test suite to cover both. Fixes: #18338
This commit is contained in:
@@ -5,7 +5,6 @@
|
||||
#include "cmAffinity.h"
|
||||
#include "cmCTest.h"
|
||||
#include "cmCTestRunTest.h"
|
||||
#include "cmCTestScriptHandler.h"
|
||||
#include "cmCTestTestHandler.h"
|
||||
#include "cmSystemTools.h"
|
||||
#include "cmWorkingDirectory.h"
|
||||
@@ -53,6 +52,7 @@ cmCTestMultiProcessHandler::cmCTestMultiProcessHandler()
|
||||
{
|
||||
this->ParallelLevel = 1;
|
||||
this->TestLoad = 0;
|
||||
this->FakeLoadForTesting = 0;
|
||||
this->Completed = 0;
|
||||
this->RunningCount = 0;
|
||||
this->ProcessorsAvailable = cmAffinity::GetProcessorsAvailable();
|
||||
@@ -97,6 +97,16 @@ void cmCTestMultiProcessHandler::SetParallelLevel(size_t level)
|
||||
void cmCTestMultiProcessHandler::SetTestLoad(unsigned long load)
|
||||
{
|
||||
this->TestLoad = load;
|
||||
|
||||
std::string fake_load_value;
|
||||
if (cmSystemTools::GetEnv("__CTEST_FAKE_LOAD_AVERAGE_FOR_TESTING",
|
||||
fake_load_value)) {
|
||||
if (!cmSystemTools::StringToULong(fake_load_value.c_str(),
|
||||
&this->FakeLoadForTesting)) {
|
||||
cmSystemTools::Error("Failed to parse fake load value: ",
|
||||
fake_load_value.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void cmCTestMultiProcessHandler::RunTests()
|
||||
@@ -259,12 +269,19 @@ bool cmCTestMultiProcessHandler::StartTest(int test)
|
||||
|
||||
void cmCTestMultiProcessHandler::StartNextTests()
|
||||
{
|
||||
size_t numToStart = 0;
|
||||
if (this->TestLoadRetryTimer.get() != nullptr) {
|
||||
// This timer may be waiting to call StartNextTests again.
|
||||
// Since we have been called it is no longer needed.
|
||||
uv_timer_stop(this->TestLoadRetryTimer);
|
||||
}
|
||||
|
||||
if (this->Tests.empty()) {
|
||||
this->TestLoadRetryTimer.reset();
|
||||
return;
|
||||
}
|
||||
|
||||
size_t numToStart = 0;
|
||||
|
||||
if (this->RunningCount < this->ParallelLevel) {
|
||||
numToStart = this->ParallelLevel - this->RunningCount;
|
||||
}
|
||||
@@ -280,7 +297,6 @@ void cmCTestMultiProcessHandler::StartNextTests()
|
||||
}
|
||||
|
||||
bool allTestsFailedTestLoadCheck = false;
|
||||
bool usedFakeLoadForTesting = false;
|
||||
size_t minProcessorsRequired = this->ParallelLevel;
|
||||
std::string testWithMinProcessors;
|
||||
|
||||
@@ -293,15 +309,11 @@ void cmCTestMultiProcessHandler::StartNextTests()
|
||||
allTestsFailedTestLoadCheck = true;
|
||||
|
||||
// Check for a fake load average value used in testing.
|
||||
std::string fake_load_value;
|
||||
if (cmSystemTools::GetEnv("__CTEST_FAKE_LOAD_AVERAGE_FOR_TESTING",
|
||||
fake_load_value)) {
|
||||
usedFakeLoadForTesting = true;
|
||||
if (!cmSystemTools::StringToULong(fake_load_value.c_str(),
|
||||
&systemLoad)) {
|
||||
cmSystemTools::Error("Failed to parse fake load value: ",
|
||||
fake_load_value.c_str());
|
||||
}
|
||||
if (this->FakeLoadForTesting > 0) {
|
||||
systemLoad = this->FakeLoadForTesting;
|
||||
// Drop the fake load for the next iteration to a value low enough
|
||||
// that the next iteration will start tests.
|
||||
this->FakeLoadForTesting = 1;
|
||||
}
|
||||
// If it's not set, look up the true load average.
|
||||
else {
|
||||
@@ -385,18 +397,25 @@ void cmCTestMultiProcessHandler::StartNextTests()
|
||||
}
|
||||
cmCTestLog(this->CTest, HANDLER_OUTPUT, "*****" << std::endl);
|
||||
|
||||
if (usedFakeLoadForTesting) {
|
||||
// Break out of the infinite loop of waiting for our fake load
|
||||
// to come down.
|
||||
this->StopTimePassed = true;
|
||||
} else {
|
||||
// Wait between 1 and 5 seconds before trying again.
|
||||
cmCTestScriptHandler::SleepInSeconds(cmSystemTools::RandomSeed() % 5 +
|
||||
1);
|
||||
// Wait between 1 and 5 seconds before trying again.
|
||||
unsigned int milliseconds = (cmSystemTools::RandomSeed() % 5 + 1) * 1000;
|
||||
if (this->FakeLoadForTesting) {
|
||||
milliseconds = 10;
|
||||
}
|
||||
if (this->TestLoadRetryTimer.get() == nullptr) {
|
||||
this->TestLoadRetryTimer.init(this->Loop, this);
|
||||
}
|
||||
this->TestLoadRetryTimer.start(
|
||||
&cmCTestMultiProcessHandler::OnTestLoadRetryCB, milliseconds, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void cmCTestMultiProcessHandler::OnTestLoadRetryCB(uv_timer_t* timer)
|
||||
{
|
||||
auto self = static_cast<cmCTestMultiProcessHandler*>(timer->data);
|
||||
self->StartNextTests();
|
||||
}
|
||||
|
||||
void cmCTestMultiProcessHandler::FinishTestProcess(cmCTestRunTest* runner,
|
||||
bool started)
|
||||
{
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "cmUVHandlePtr.h"
|
||||
#include "cm_uv.h"
|
||||
|
||||
class cmCTest;
|
||||
@@ -101,6 +102,8 @@ protected:
|
||||
void EraseTest(int index);
|
||||
void FinishTestProcess(cmCTestRunTest* runner, bool started);
|
||||
|
||||
static void OnTestLoadRetryCB(uv_timer_t* timer);
|
||||
|
||||
void RemoveTest(int index);
|
||||
// Check if we need to resume an interrupted test set
|
||||
void CheckResume();
|
||||
@@ -135,7 +138,9 @@ protected:
|
||||
std::vector<cmCTestTestHandler::cmCTestTestResult>* TestResults;
|
||||
size_t ParallelLevel; // max number of process that can be run at once
|
||||
unsigned long TestLoad;
|
||||
unsigned long FakeLoadForTesting;
|
||||
uv_loop_t Loop;
|
||||
cm::uv_timer_ptr TestLoadRetryTimer;
|
||||
cmCTestTestHandler* TestHandler;
|
||||
cmCTest* CTest;
|
||||
bool HasCycles;
|
||||
|
@@ -111,8 +111,8 @@ endfunction()
|
||||
set(ENV{__CTEST_FAKE_LOAD_AVERAGE_FOR_TESTING} 5)
|
||||
|
||||
# Verify that new tests are not started when the load average exceeds
|
||||
# our threshold.
|
||||
run_TestLoad(test-load-fail 2)
|
||||
# our threshold and that they then run once the load average drops.
|
||||
run_TestLoad(test-load-wait 3)
|
||||
|
||||
# Verify that warning message is displayed but tests still start when
|
||||
# an invalid argument is given.
|
||||
|
@@ -1 +0,0 @@
|
||||
No tests were found!!!
|
@@ -1,2 +0,0 @@
|
||||
^Test project .*/Tests/RunCMake/CTestCommandLine/TestLoad
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 2, Smallest test TestLoad[1-2] requires 1\*\*\*\*\*
|
@@ -1 +0,0 @@
|
||||
^$
|
@@ -0,0 +1,8 @@
|
||||
^Test project .*/Tests/RunCMake/CTestCommandLine/TestLoad
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 3, Smallest test TestLoad[1-2] requires 1\*\*\*\*\*
|
||||
Start 1: TestLoad1
|
||||
Start 2: TestLoad2
|
||||
1/2 Test #[1-2]: TestLoad[1-2] ........................ Passed +[0-9.]+ sec
|
||||
2/2 Test #[1-2]: TestLoad[1-2] ........................ Passed +[0-9.]+ sec
|
||||
+
|
||||
100% tests passed, 0 tests failed out of 2
|
@@ -1 +0,0 @@
|
||||
(-1|255)
|
@@ -1 +0,0 @@
|
||||
No tests were found!!!
|
@@ -1,2 +0,0 @@
|
||||
Test project .*/Tests/RunCMake/ctest_test/CTestTestLoadFail-build
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 4, Smallest test RunCMakeVersion requires 1\*\*\*\*\*$
|
8
Tests/RunCMake/ctest_test/CTestTestLoadWait-stdout.txt
Normal file
8
Tests/RunCMake/ctest_test/CTestTestLoadWait-stdout.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
Test project .*/Tests/RunCMake/ctest_test/CTestTestLoadWait-build
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 4, Smallest test RunCMakeVersion requires 1\*\*\*\*\*
|
||||
Start 1: RunCMakeVersion
|
||||
1/1 Test #1: RunCMakeVersion .................. Passed +[0-9.]+ sec
|
||||
+
|
||||
100% tests passed, 0 tests failed out of 1
|
||||
+
|
||||
Total Test time \(real\) = +[0-9.]+ sec$
|
@@ -21,8 +21,8 @@ set(ENV{__CTEST_FAKE_LOAD_AVERAGE_FOR_TESTING} 5)
|
||||
run_ctest_test(TestLoadPass TEST_LOAD 6)
|
||||
|
||||
# Verify that new tests are not started when the load average exceeds
|
||||
# our threshold.
|
||||
run_ctest_test(TestLoadFail TEST_LOAD 2)
|
||||
# our threshold and that they then run once the load average drops.
|
||||
run_ctest_test(TestLoadWait TEST_LOAD 2)
|
||||
|
||||
# Verify that when an invalid "TEST_LOAD" value is given, a warning
|
||||
# message is displayed and the value is ignored.
|
||||
@@ -34,9 +34,9 @@ set(CASE_CTEST_TEST_LOAD 7)
|
||||
run_ctest_test(CTestTestLoadPass)
|
||||
|
||||
# Verify that new tests are not started when the load average exceeds
|
||||
# our threshold.
|
||||
# our threshold and that they then run once the load average drops.
|
||||
set(CASE_CTEST_TEST_LOAD 4)
|
||||
run_ctest_test(CTestTestLoadFail)
|
||||
run_ctest_test(CTestTestLoadWait)
|
||||
|
||||
# Verify that when an invalid "CTEST_TEST_LOAD" value is given,
|
||||
# a warning message is displayed and the value is ignored.
|
||||
|
@@ -1 +0,0 @@
|
||||
(-1|255)
|
@@ -1 +0,0 @@
|
||||
No tests were found!!!
|
@@ -1,2 +0,0 @@
|
||||
Test project .*/Tests/RunCMake/ctest_test/TestLoadFail-build
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 2, Smallest test RunCMakeVersion requires 1\*\*\*\*\*$
|
8
Tests/RunCMake/ctest_test/TestLoadWait-stdout.txt
Normal file
8
Tests/RunCMake/ctest_test/TestLoadWait-stdout.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
Test project .*/Tests/RunCMake/ctest_test/TestLoadWait-build
|
||||
\*\*\*\*\* WAITING, System Load: 5, Max Allowed Load: 2, Smallest test RunCMakeVersion requires 1\*\*\*\*\*
|
||||
Start 1: RunCMakeVersion
|
||||
1/1 Test #1: RunCMakeVersion .................. Passed +[0-9.]+ sec
|
||||
+
|
||||
100% tests passed, 0 tests failed out of 1
|
||||
+
|
||||
Total Test time \(real\) = +[0-9.]+ sec$
|
Reference in New Issue
Block a user