Skip to content

Commit

Permalink
Standardize eigen threading and batch size logic
Browse files Browse the repository at this point in the history
  • Loading branch information
lightvector committed Dec 17, 2023
1 parent 53d2b33 commit ce4e410
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 17 deletions.
4 changes: 2 additions & 2 deletions cpp/command/benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ int MainCmds::benchmark(const vector<string>& args) {
cout << "WARNING: Your nnMaxBatchSize is hardcoded to " + cfg.getString("nnMaxBatchSize") + ", recommend deleting it and using the default (which this benchmark assumes)" << endl;
#ifdef USE_EIGEN_BACKEND
if(cfg.contains("numEigenThreadsPerModel")) {
cout << "Note: Your numEigenThreadsPerModel is hardcoded to " + cfg.getString("numEigenThreadsPerModel") + ", consider deleting it and using the default (which this benchmark assumes when computing its performance stats)" << endl;
cout << "Note: Your numEigenThreadsPerModel is hardcoded to " + cfg.getString("numEigenThreadsPerModel") + ", this benchmark ignores it assumes that it is always set equal to the smaller of the number of search threads and the number of CPU cores on your computer when computing its performance stats." << endl;
}
#endif

Expand Down Expand Up @@ -328,7 +328,7 @@ static void setNumThreads(SearchParams& params, NNEvaluator* nnEval, Logger& log
//Also, disable the logger to suppress the kill and respawn messages.
logger.setDisabled(true);
nnEval->killServerThreads();
nnEval->setNumThreads(vector<int>(numThreads,-1));
nnEval->setNumThreads(vector<int>(Setup::computeDefaultEigenBackendThreads(numThreads,logger),-1));
nnEval->spawnServerThreads();
//Also since we killed and respawned all the threads, re-warm them
Rand seedRand;
Expand Down
29 changes: 14 additions & 15 deletions cpp/program/setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,22 +174,9 @@ vector<NNEvaluator*> Setup::initializeNNEvaluators(
cfg.contains("numNNServerThreadsPerModel") ? cfg.getInt("numNNServerThreadsPerModel",1,1024) : 1;
#else
cfg.markAllKeysUsedWithPrefix("numNNServerThreadsPerModel");
auto getNumCores = [&logger]() {
int numCores = (int)std::thread::hardware_concurrency();
if(numCores <= 0) {
logger.write("Could not determine number of cores on this machine, choosing default parameters as if it were 8");
numCores = 8;
}
return numCores;
};
int numNNServerThreadsPerModel =
cfg.contains("numEigenThreadsPerModel") ? cfg.getInt("numEigenThreadsPerModel",1,1024) :
setupFor == SETUP_FOR_DISTRIBUTED ? std::min(expectedConcurrentEvals,getNumCores()) :
setupFor == SETUP_FOR_MATCH ? std::min(expectedConcurrentEvals,getNumCores()) :
setupFor == SETUP_FOR_ANALYSIS ? std::min(expectedConcurrentEvals,getNumCores()) :
setupFor == SETUP_FOR_GTP ? expectedConcurrentEvals :
setupFor == SETUP_FOR_BENCHMARK ? expectedConcurrentEvals :
cfg.getInt("numEigenThreadsPerModel",1,1024);
computeDefaultEigenBackendThreads(expectedConcurrentEvals,logger);
#endif

vector<int> gpuIdxByServerThread;
Expand Down Expand Up @@ -306,7 +293,7 @@ vector<NNEvaluator*> Setup::initializeNNEvaluators(
//and doesn't greatly benefit from having a bigger chunk of parallelizable work to do on the large scale.
//So we just fix a size here that isn't crazy and saves memory, completely ignore what the user would have
//specified for GPUs.
int nnMaxBatchSize = 4;
int nnMaxBatchSize = 2;
cfg.markAllKeysUsedWithPrefix("nnMaxBatchSize");
(void)defaultMaxBatchSize;
#endif
Expand Down Expand Up @@ -349,6 +336,18 @@ vector<NNEvaluator*> Setup::initializeNNEvaluators(
return nnEvals;
}

int Setup::computeDefaultEigenBackendThreads(int expectedConcurrentEvals, Logger& logger) {
auto getNumCores = [&logger]() {
int numCores = (int)std::thread::hardware_concurrency();
if(numCores <= 0) {
logger.write("Could not determine number of cores on this machine, choosing eigen backend threads as if it were 8");
numCores = 8;
}
return numCores;
};
return std::min(expectedConcurrentEvals,getNumCores());
}

string Setup::loadHomeDataDirOverride(
ConfigParser& cfg
){
Expand Down
2 changes: 2 additions & 0 deletions cpp/program/setup.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ namespace Setup {
constexpr double DEFAULT_ANALYSIS_WIDE_ROOT_NOISE = 0.04;
constexpr bool DEFAULT_ANALYSIS_IGNORE_PRE_ROOT_HISTORY = true;

int computeDefaultEigenBackendThreads(int expectedConcurrentEvals, Logger& logger);

//Loads search parameters for bot from config, by bot idx.
//Fails if no parameters are found.
std::vector<SearchParams> loadParams(
Expand Down

0 comments on commit ce4e410

Please sign in to comment.