Skip to content

Commit

Permalink
Merged PR 17337: fp16 support for training
Browse files Browse the repository at this point in the history
This PR refactors the training graph groups and optimizers to enable and simplify things for fp16 support.

Deprecates old unused graph groups and fixes a couple of MPI issues.
  • Loading branch information
emjotde committed Jan 28, 2021
1 parent 18fd50d commit ba91b39
Show file tree
Hide file tree
Showing 75 changed files with 2,872 additions and 4,020 deletions.
25 changes: 22 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -286,8 +286,8 @@ if(CUDA_FOUND)
option(COMPILE_CUDA_SM75 "Compile GPU version with SM75 support" ON)
endif()
if(CUDA_VERSION VERSION_EQUAL "11.0" OR CUDA_VERSION VERSION_GREATER "11.0")
option(COMPILE_CUDA_SM35 "Compile GPU version with SM35 support" ON)
option(COMPILE_CUDA_SM50 "Compile GPU version with SM50 support" ON)
option(COMPILE_CUDA_SM35 "Compile GPU version with SM35 support" OFF) # deprecated for CUDA 11
option(COMPILE_CUDA_SM50 "Compile GPU version with SM50 support" OFF) # deprecated for CUDA 11
option(COMPILE_CUDA_SM60 "Compile GPU version with SM60 support" ON)
option(COMPILE_CUDA_SM70 "Compile GPU version with SM70 support" ON)
option(COMPILE_CUDA_SM75 "Compile GPU version with SM75 support" ON)
Expand Down Expand Up @@ -439,6 +439,7 @@ endif(USE_MPI)
# Find BLAS library
if(COMPILE_CPU)
set(EXT_LIBS ${EXT_LIBS} intgemm) # Enable intgemm when compiling CPU
add_definitions(-DCOMPILE_CPU=1)
if(USE_APPLE_ACCELERATE)
if(NOT APPLE)
message(FATAL_ERROR "FATAL ERROR: Apple Accelerate only works on macOS.")
Expand Down Expand Up @@ -496,11 +497,29 @@ if(COMPILE_SERVER)
endif(OpenSSL_FOUND)
endif(COMPILE_SERVER)

###############################################################################
# Undo static lib search and put non-static searches here:

if(USE_STATIC_LIBS)
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
endif()

# TODO: move inside if(BOOST_COMPONENTS) ?
# Find MPI
if(USE_MPI)
# 2.0 refers to MPI2 standard. OpenMPI is an implementation of that standard regardless of the specific OpenMPI version
# e.g. OpenMPI 1.10 implements MPI2 and will be found correctly.
find_package(MPI 2.0 REQUIRED)
if(MPI_FOUND)
include_directories(${MPI_INCLUDE_PATH})
set(EXT_LIBS "${EXT_LIBS} ${MPI_LIBRARIES}")
if(USE_STATIC_LIBS) # alternatively this could install OpenMPI like NCCL and link against that statically with greater control
message(WARNING "MPI implementations are notoriously difficult to link statically, linking ${MPI_LIBRARIES} dynamically despite -DUSE_STATIC_LIBS=on")
endif(USE_STATIC_LIBS)
add_definitions(-DMPI_FOUND=1)
endif(MPI_FOUND)
endif(USE_MPI)

# TODO: move inside if(BOOST_COMPONENTS)
if(USE_STATIC_LIBS)
set(Boost_USE_STATIC_LIBS ON)
endif()
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
v1.9.60
v1.9.61
2 changes: 1 addition & 1 deletion regression-tests
Submodule regression-tests updated 161 files
2 changes: 1 addition & 1 deletion src/3rd_party/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ if(CUDA_FOUND)
BUILD_COMMAND
${CMAKE_MAKE_PROGRAM} -f ${CMAKE_CURRENT_SOURCE_DIR}/nccl/Makefile src.build
BUILDDIR=${CMAKE_BINARY_DIR}/local CUDA_HOME=${CUDA_TOOLKIT_ROOT_DIR}
CUDA8_GENCODE=${GENCODE} CXX=${CMAKE_CXX_COMPILER}
CUDA8_GENCODE=${GENCODE} CXX=${CMAKE_CXX_COMPILER} CXX_FLAGS=${NCCL_FLAGS}
INSTALL_COMMAND "")

set_target_properties(nccl PROPERTIES IMPORTED_LOCATION ${CMAKE_BINARY_DIR}/local/lib/libnccl_static.a)
Expand Down
21 changes: 17 additions & 4 deletions src/3rd_party/cnpy/cnpy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,16 @@ char cnpy::map_type(const std::type_info& t)
if(t == typeid(long) ) return 'i';
if(t == typeid(long long) ) return 'i';

if(t == typeid(int8_t) ) return 'i';
if(t == typeid(int16_t) ) return 'i';
if(t == typeid(int32_t) ) return 'i';
if(t == typeid(int64_t) ) return 'i';

if(t == typeid(uint8_t) ) return 'u';
if(t == typeid(uint16_t) ) return 'u';
if(t == typeid(uint32_t) ) return 'u';
if(t == typeid(uint64_t) ) return 'u';

if(t == typeid(unsigned char) ) return 'u';
if(t == typeid(unsigned short) ) return 'u';
if(t == typeid(unsigned long) ) return 'u';
Expand Down Expand Up @@ -60,7 +70,7 @@ template<> std::vector<char>& cnpy::operator+=(std::vector<char>& lhs, const cha
return lhs;
}

void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order) {
void cnpy::parse_npy_header(FILE* fp, char& type, unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order) {
char buffer[256];
size_t res = fread(buffer,sizeof(char),11,fp);
if(res != 11)
Expand Down Expand Up @@ -95,7 +105,8 @@ void cnpy::parse_npy_header(FILE* fp, unsigned int& word_size, unsigned int*& sh
bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian); littleEndian;

//char type = header[loc1+1];
// read a char that describes the numpy data type, this was previously ignored for some reason, but already present in the file format.
type = header[loc1+1];
//assert(type == map_type(T));

std::string str_ws = header.substr(loc1+2);
Expand Down Expand Up @@ -129,7 +140,7 @@ void cnpy::parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& globa
assert(comment_len == 0);

// make compiler happy, otherwise warns with "variable set but not used"
// on the other hand it seems having the asserts in here is useful.
// on the other hand it seems having the asserts in here is useful.
_unused(disk_no);
_unused(disk_start);
_unused(nrecs_on_disk);
Expand All @@ -139,13 +150,15 @@ void cnpy::parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& globa
cnpy::NpyArrayPtr load_the_npy_file(FILE* fp) {
unsigned int* shape;
unsigned int ndims, word_size;
char type;
bool fortran_order;
cnpy::parse_npy_header(fp, word_size, shape, ndims, fortran_order);
cnpy::parse_npy_header(fp, type, word_size, shape, ndims, fortran_order);
unsigned long long size = 1; //long long so no overflow when multiplying by word_size
for(unsigned int i = 0; i < ndims; i++)
size *= shape[i];

auto arr = cnpy::NpyArrayPtr(new cnpy::NpyArray());
arr->type = type;
arr->word_size = word_size;
arr->shape = std::vector<unsigned int>(shape, shape+ndims);
delete[] shape;
Expand Down
11 changes: 9 additions & 2 deletions src/3rd_party/cnpy/cnpy.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,13 @@ namespace cnpy {
struct NpyArray {
std::vector<char> bytes;
std::vector<unsigned int> shape;

// See cnpy::map_type() for a list of valid char codes and their mappings.
// Numpy seems to only understand five types {f, i, u, b, c} paired with
// word_size.
char type;
unsigned int word_size{1};

bool fortran_order{0};

NpyArray() {}
Expand Down Expand Up @@ -56,7 +62,7 @@ namespace cnpy {
char map_type(const std::type_info& t);
static inline std::vector<char> create_npy_header(char type, size_t word_size, const unsigned int* shape, const unsigned int ndims);
template<typename T> std::vector<char> create_npy_header(const T* data, const unsigned int* shape, const unsigned int ndims);
void parse_npy_header(FILE* fp,unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order);
void parse_npy_header(FILE* fp, char& type, unsigned int& word_size, unsigned int*& shape, unsigned int& ndims, bool& fortran_order);
void parse_zip_footer(FILE* fp, unsigned short& nrecs, unsigned int& global_header_size, unsigned int& global_header_offset);
npz_t npz_load(std::string fname);
NpyArrayPtr npz_load(std::string fname, std::string varname);
Expand Down Expand Up @@ -89,9 +95,10 @@ namespace cnpy {
if(fp) {
//file exists. we need to append to it. read the header, modify the array size
unsigned int word_size, tmp_dims;
char type;
unsigned int* tmp_shape = 0;
bool fortran_order;
parse_npy_header(fp,word_size,tmp_shape,tmp_dims,fortran_order);
parse_npy_header(fp,type,word_size,tmp_shape,tmp_dims,fortran_order);
assert(!fortran_order);

if(word_size != sizeof(T)) {
Expand Down
2 changes: 1 addition & 1 deletion src/3rd_party/intgemm
Submodule intgemm updated 61 files
+2 −5 .github/workflows/mac.yml
+27 −0 .github/workflows/ubuntu-gcc5-debug.yml
+25 −0 .github/workflows/ubuntu.yml
+25 −0 .github/workflows/windows.yml
+7 −4 CMakeLists.txt
+30 −0 LICENSE
+7 −2 README.md
+24 −24 benchmarks/benchmark.cc
+9 −9 benchmarks/benchmark_quantizer.cc
+86 −86 benchmarks/biasmultiply.cc
+7 −6 example.cc
+0 −71 intgemm.cc
+0 −1 intgemm/aligned.h
+41 −60 intgemm/avx2_gemm.h
+38 −68 intgemm/avx512_gemm.h
+7 −8 intgemm/avx512vnni_gemm.h
+1 −1 intgemm/callbacks.h
+0 −0 intgemm/callbacks/configs.h
+16 −2 intgemm/callbacks/implementations.inl
+0 −0 intgemm/callbacks/output_buffer_info.h
+21 −28 intgemm/interleave.h
+71 −0 intgemm/intgemm.cc
+17 −11 intgemm/intgemm.h
+0 −0 intgemm/intgemm_config.h.in
+25 −26 intgemm/intrinsics.h
+1 −1 intgemm/kernels.h
+4 −30 intgemm/kernels/implementations.inl
+14 −14 intgemm/multiply.h
+18 −26 intgemm/sse2_gemm.h
+27 −37 intgemm/ssse3_gemm.h
+0 −0 intgemm/stats.h
+1 −1 intgemm/stats.inl
+0 −0 intgemm/types.h
+0 −0 intgemm/utils.h
+0 −0 intgemm/vec_traits.h
+24 −0 test/3rd_party/LICENSE_1_0.txt
+0 −0 test/3rd_party/catch.hpp
+105 −105 test/add127_test.cc
+2 −2 test/kernels/add_bias_test.cc
+2 −2 test/kernels/bitwise_not_test.cc
+2 −2 test/kernels/downcast_test.cc
+2 −2 test/kernels/exp_test.cc
+2 −2 test/kernels/floor_test.cc
+0 −54 test/kernels/multiply_sat_test.cc
+2 −3 test/kernels/multiply_test.cc
+2 −2 test/kernels/quantize_test.cc
+2 −3 test/kernels/relu_test.cc
+2 −3 test/kernels/rescale_test.cc
+2 −2 test/kernels/sigmoid_test.cc
+2 −2 test/kernels/tanh_test.cc
+2 −2 test/kernels/unquantize_test.cc
+6 −3 test/kernels/upcast_test.cc
+2 −2 test/kernels/write_test.cc
+115 −115 test/multiply_test.cc
+12 −12 test/prepare_b_quantized_transposed.cc
+12 −12 test/prepare_b_transposed.cc
+19 −19 test/quantize_test.cc
+6 −4 test/test.cc
+8 −8 test/test.h
+1 −1 test/utils_test.cc
+0 −328 test_mull.cpp
2 changes: 2 additions & 0 deletions src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ set(MARIAN_SOURCES
optimizers/quantizer.cpp
optimizers/clippers.cpp
optimizers/optimizers.cpp
optimizers/exponential_smoothing.cpp

models/model_factory.cpp
models/encoder_decoder.cpp
Expand Down Expand Up @@ -164,6 +165,7 @@ if(CUDA_FOUND)
tensors/gpu/device.cu
tensors/gpu/algorithm.cu
tensors/gpu/prod.cpp
tensors/gpu/prod_sparse.cpp
tensors/gpu/topk.cu
tensors/gpu/element.cu
tensors/gpu/add.cu
Expand Down
15 changes: 3 additions & 12 deletions src/command/marian_train.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,30 +14,21 @@ int main(int argc, char** argv) {

auto options = parseOptions(argc, argv, cli::mode::training);

// selects MultiNodeGraphGroup family
//
// Note: --sync-sgd without --multi-node also supports MPI now, using the SyncGraphGroup. This
// means we have two redundant implementations of multi-node sync-sgd. Note that the
// MultiNodeGraphGroup family is out of date. Therefore, the goal is to remove
// MultiNodeGraphGroupSync.
if(options->get<bool>("multi-node")) {
LOG(warn, "[experimental] Using old multi-node training implementations that are not up-to-date");
ABORT("Old multi-node training code disabled");
}
// --sync-sgd always selects SyncGraphGroup
//
// If given, then this implementation is used for all combinations of (single, multiple) MPI
// processes x (single, multiple) GPUs per MPI process. This variant is presently up-to-date and
// best supported.
else if (options->get<bool>("sync-sgd")) {
if(options->get<bool>("sync-sgd")) { // @TODO: make default
LOG(info, "Using synchronous SGD");
New<Train<SyncGraphGroup>>(options)->run();
}
else {
auto devices = Config::getDevices(options);
if(devices.size() == 1) {
LOG(info, "[training] Using single-device training");
New<Train<SingletonGraph>>(options)->run();
New<Train<SyncGraphGroup>>(options)->run();
// New<Train<SingletonGraph>>(options)->run(); // kept for reference
} else {
LOG(info, "Using asynchronous training");
New<Train<AsyncGraphGroup>>(options)->run();
Expand Down
5 changes: 2 additions & 3 deletions src/common/aliases.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,9 @@ namespace marian {
void ConfigParser::addAliases(cli::CLIWrapper& cli) {
cli.alias("fp16", "true", [&](YAML::Node& config) {
if(mode_ == cli::mode::training) {
config["precision"] = std::vector<std::string>({"float16", "float32", "float32"}); // inference type, optimization type, save type
// @TODO: review this
config["precision"] = std::vector<std::string>({"float16", "float32"}); // inference type, optimization type, save type
// scaling factor (power of 2), frequency, multiplier at increase, tolerance, range, minium factor
config["cost-scaling"] = std::vector<std::string>({"7", "2000", "2", "0.05", "10", "1"});
config["cost-scaling"] = std::vector<std::string>({"0", "1000", "2", "0.05", "10", "1e-5"});
} else {
config["precision"] = std::vector<std::string>({"float16"}); // for inference we do not need the other types
}
Expand Down
3 changes: 1 addition & 2 deletions src/common/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,7 @@ std::vector<DeviceId> Config::getDevices(Ptr<Options> options,
size_t numPerMPIProcessDeviceNos = deviceNos.size() / numDevices;
// @TODO: improve logging message as devices[] and numDevices are not informative for the user
ABORT_IF(numDevices * numPerMPIProcessDeviceNos != deviceNos.size(),
"devices[] size must be equal to or a multiple of numDevices"); // (check that it is a
// multiple)
"devices[] size must be equal to or a multiple of numDevices"); // (check that it is a multiple)

// if multiple concatenated lists are given, slice out the one for myMPIRank
if(numPerMPIProcessDeviceNos != 1) {
Expand Down
78 changes: 57 additions & 21 deletions src/common/config_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,9 @@ void ConfigParser::addOptionsGeneral(cli::CLIWrapper& cli) {
"Suppress logging for translation");
cli.add<size_t>("--seed",
"Seed for all random number generators. 0 means initialize randomly");
cli.add<bool>("--check-nan",
"Check for NaNs or Infs in forward and backward pass. Will abort when found. "
"This is a diagnostic option that will slow down computation significantly");
cli.add<bool>("--interpolate-env-vars",
"allow the use of environment variables in paths, of the form ${VAR_NAME}");
cli.add<bool>("--relative-paths",
Expand Down Expand Up @@ -323,13 +326,6 @@ void ConfigParser::addOptionsModel(cli::CLIWrapper& cli) {
"Dropout source words (0 = no dropout)");
cli.add<float>("--dropout-trg",
"Dropout target words (0 = no dropout)");
cli.add<float>("--grad-dropping-rate",
"Gradient Dropping rate (0 = no gradient Dropping)");
cli.add<float>("--grad-dropping-momentum",
"Gradient Dropping momentum decay rate (0.0 to 1.0)");
cli.add<size_t>("--grad-dropping-warmup",
"Do not apply gradient dropping for the first arg steps",
100);
cli.add<float>("--transformer-dropout",
"Dropout between transformer layers (0 = no dropout)");
cli.add<float>("--transformer-dropout-attention",
Expand Down Expand Up @@ -520,30 +516,33 @@ void ConfigParser::addOptionsTraining(cli::CLIWrapper& cli) {
// mixed precision training
cli.add<bool>("--fp16",
"Shortcut for mixed precision training with float16 and cost-scaling, "
"corresponds to: --precision float16 float32 float32 --cost-scaling 7 2000 2 0.05 10 1");
"corresponds to: --precision float16 float32 --cost-scaling 0 1000 2 0.05 10 1e-5f");
cli.add<std::vector<std::string>>("--precision",
"Mixed precision training for forward/backward pass and optimizaton. "
"Defines types for: forward/backward, optimization, saving.",
{"float32", "float32", "float32"});
"Defines types for: forward/backward pass, optimization.",
{"float32", "float32"});
cli.add<std::vector<std::string>>("--cost-scaling",
"Dynamic cost scaling for mixed precision training: "
"power of 2, scaling window, scaling factor, tolerance, range, minimum factor")
->implicit_val("7.f 2000 2.f 0.05f 10 1.f");
cli.add<bool>("--normalize-gradient",
"Normalize gradient by multiplying with no. devices / total labels");
->implicit_val("0.f 1000 2.f 0.05f 10 1e-5f");
cli.add<size_t>("--gradient-norm-average-window",
"Window size over which the exponential average of the gradient norm is recorded (for logging and scaling). "
"After this many updates about 90% of the mass of the exponential average comes from these updates",
100);
cli.add<std::vector<std::string>>("--dynamic-gradient-scaling",
"Re-scale gradient to have average gradient norm if (log) gradient norm diverges from average by arg1 sigmas. "
"If arg2 = \"log\" the statistics are recorded for the log of the gradient norm else use plain norm")
->implicit_val("2.f log");
cli.add<bool>("--check-gradient-nan",
"Skip parameter update in case of NaNs in gradient");
cli.add<bool>("--normalize-gradient",
"Normalize gradient by multiplying with no. devices / total labels (not recommended and to be removed in the future)");

cli.add<std::vector<std::string>>("--train-embedder-rank",
"Override model configuration and train a embedding similarity ranker with the model encoder, "
"parameters encode margin and an optional normalization factor")
->implicit_val("0.3f 0.0f");

// multi-node training
cli.add<bool>("--multi-node",
"Enable asynchronous multi-node training through MPI (and legacy sync if combined with --sync-sgd)");
cli.add<bool>("--multi-node-overlap",
"Overlap model computations with MPI communication",
true);

// model quantization training
addSuboptionsQuantization(cli);

Expand Down Expand Up @@ -848,7 +847,7 @@ void ConfigParser::addSuboptionsBatching(cli::CLIWrapper& cli) {
cli.add<size_t>("--english-title-case-every",
"When forming minibatches, preprocess every Nth line on the fly to title-case. Assumes English (ASCII only)");

cli.add<int>("--mini-batch-words-ref",
cli.add<size_t>("--mini-batch-words-ref",
"If given, the following hyper parameters are adjusted as-if we had this mini-batch size: "
"--learn-rate, --optimizer-params, --exponential-smoothing, --mini-batch-warmup");
cli.add<std::string/*SchedulerPeriod*/>("--mini-batch-warmup",
Expand All @@ -857,6 +856,9 @@ void ConfigParser::addSuboptionsBatching(cli::CLIWrapper& cli) {
{"0"});
cli.add<bool>("--mini-batch-track-lr",
"Dynamically track mini-batch size inverse to actual learning rate (not considering lr-warmup)");
cli.add<bool>("--mini-batch-round-up",
"Round up batch size to next power of 2 for more efficient training, but this can make batch size less stable. Disable with --mini-batch-round-up=false",
true);
}
// clang-format on
}
Expand Down Expand Up @@ -1043,6 +1045,40 @@ Ptr<Options> ConfigParser::parseOptions(int argc, char** argv, bool doValidate)
}
}

#if 0 // @TODO: remove once fully deprecated
// Convert --after-batches N to --after Nu and --after-epochs N to --after Ne, different values get concatenated with ","
if(mode_ == cli::mode::training && get<size_t>("after-epochs") > 0) {
auto afterValue = get<size_t>("after-epochs");
LOG(info, "\"--after-epochs {}\" is deprecated, please use \"--after {}e\" instead (\"e\" stands for epoch)", afterValue, afterValue);
YAML::Node config;
std::string prevAfter = get<std::string>("after");
std::string converted = std::to_string(afterValue) + "e";
if(prevAfter != "0e")
config["after"] = prevAfter + "," + converted;
else
config["after"] = converted;
if(!config.IsNull())
cli_.updateConfig(config,
cli::OptionPriority::CommandLine,
"Could not update --after with value from --after-epochs");
}
if(mode_ == cli::mode::training && get<size_t>("after-batches") > 0) {
auto afterValue = get<size_t>("after-batches");
LOG(info, "\"--after-batches {}\" is deprecated, please use \"--after {}u\" instead (\"u\" stands for updates)", afterValue, afterValue);
YAML::Node config;
std::string prevAfter = get<std::string>("after");
std::string converted = std::to_string(afterValue) + "u";
if(prevAfter != "0e")
config["after"] = prevAfter + "," + converted;
else
config["after"] = converted;
if(!config.IsNull())
cli_.updateConfig(config,
cli::OptionPriority::CommandLine,
"Could not update --after with value from --after-updates");
}
#endif

cli_.parseAliases();
auto opts = New<Options>();
opts->merge(Config(*this).get());
Expand Down
16 changes: 5 additions & 11 deletions src/common/config_validator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,22 +170,16 @@ void ConfigValidator::validateModelExtension(cli::mode mode) const {
}
}

void ConfigValidator::validateDevices(cli::mode mode) const {
void ConfigValidator::validateDevices(cli::mode /*mode*/) const {
std::string devices = utils::join(get<std::vector<std::string>>("devices"));
utils::trim(devices);

regex::regex pattern;
std::string help;
// @TODO: Is this format still supported? Remove this if not.
if(mode == cli::mode::training && get<bool>("multi-node")) {
// valid strings: '0: 1 2', '0:1 2 1:2 3'
pattern = "( *[0-9]+ *: *[0-9]+( *[0-9]+)*)+";
help = "Supported format for multi-node setting: '0:0 1 2 3 1:0 1 2 3'";
} else {
// valid strings: '0', '0 1 2 3', '3 2 0 1'
pattern = "[0-9]+( *[0-9]+)*";
help = "Supported formats: '0 1 2 3'";
}

// valid strings: '0', '0 1 2 3', '3 2 0 1'
pattern = "[0-9]+( *[0-9]+)*";
help = "Supported formats: '0 1 2 3'";

ABORT_IF(!regex::regex_match(devices, pattern),
"the argument '{}' for option '--devices' is invalid. {}",
Expand Down
Loading

0 comments on commit ba91b39

Please sign in to comment.