From e92c36cab96a9d71f9eb390aa2794a558264f284 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 13:35:07 -0400 Subject: [PATCH 01/15] SLSQP: 1) Change desired direction of inequality to <= (see issue 148). 2) Keep old behavior as default through new parameter. 3) Update documentation, including examples, and unit tests. --- R/slsqp.R | 52 +++++++++++++++++----------- inst/tinytest/test-wrapper-slsqp.R | 55 +++++++++++++++++++----------- man/slsqp.Rd | 24 +++++++++---- 3 files changed, 85 insertions(+), 46 deletions(-) diff --git a/R/slsqp.R b/R/slsqp.R index 337d23a7..6f34fde3 100644 --- a/R/slsqp.R +++ b/R/slsqp.R @@ -10,11 +10,12 @@ # CHANGELOG # # 2023-02-09: Direct Jacobian now converges to proper value so removing -# confusing commentary in example. Also Cleanup and tweaks for safety -# and efficiency (Avraham Adler) +# confusing commentary in example. Also Cleanup and tweaks for safety and +# efficiency (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now (Avraham Adler). # - #' Sequential Quadratic Programming (SQP) #' #' Sequential (least-squares) quadratic programming (SQP) algorithm for @@ -31,15 +32,21 @@ #' not specified. #' @param lower,upper lower and upper bound constraints. #' @param hin function defining the inequality constraints, that is -#' \code{hin>=0} for all components. +#' \code{hin <= 0} for all components. This is new behavior in line with the +#' rest of the \code{nloptr} arguments. To use the old behavior, please set +#' \code{deprecatedBehavior} to \code{TRUE}. #' @param hinjac Jacobian of function \code{hin}; will be calculated #' numerically if not specified. -#' @param heq function defining the equality constraints, that is \code{heq==0} +#' @param heq function defining the equality constraints, that is \code{heq = 0} #' for all components. #' @param heqjac Jacobian of function \code{heq}; will be calculated #' numerically if not specified. #' @param nl.info logical; shall the original NLopt info been shown. #' @param control list of options, see \code{nl.opts} for help. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -75,17 +82,19 @@ #' } #' hin.hs100 <- function(x) { #' h <- numeric(4) -#' h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] -#' h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] -#' h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] -#' h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] +#' h[1] <- -127 + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] +#' h[2] <- -282 + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] +#' h[3] <- -196 + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] +#' h[4] <- 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + +#' 5 * x[6] - 11 * x[7] #' return(h) #' } #' #' S <- slsqp(x0.hs100, fn = fn.hs100, # no gradients and jacobians provided #' hin = hin.hs100, #' nl.info = TRUE, -#' control = list(xtol_rel = 1e-8, check_derivatives = TRUE)) +#' control = list(xtol_rel = 1e-8, check_derivatives = TRUE), +#' deprecatedBehavior = FALSE) #' S #' ## Optimal value of objective function: 680.630057375943 #' ## Optimal value of controls: 2.3305 1.951372 -0.4775407 4.365726 @@ -94,7 +103,7 @@ #' slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, hinjac = NULL, heq = NULL, heqjac = NULL, nl.info = FALSE, - control = list(), ...) { + control = list(), deprecatedBehavior = TRUE, ...) { opts <- nl.opts(control) opts["algorithm"] <- "NLOPT_LD_SLSQP" @@ -110,19 +119,24 @@ slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, } if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - .hin <- match.fun(hin) - hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else { - .hinjac <- match.fun(hinjac) - hinjac <- function(x) -.hinjac(x) + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") + .hinjac <- match.fun(hinjac) + hinjac <- function(x) -.hinjac(x) + } } } diff --git a/inst/tinytest/test-wrapper-slsqp.R b/inst/tinytest/test-wrapper-slsqp.R index 99b6d5b9..3049b91e 100644 --- a/inst/tinytest/test-wrapper-slsqp.R +++ b/inst/tinytest/test-wrapper-slsqp.R @@ -15,9 +15,9 @@ library(nloptr) tol <- sqrt(.Machine$double.eps) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") ## Functions for SLSQP x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) @@ -61,10 +61,6 @@ hin2.hs100 <- function(x) -hin.hs100(x) # Needed for nloptr call hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed for nloptr call hinjac2b.hs100 <- function(x) nl.jacobian(x, hin2.hs100)# Needed for nloptr call -# Test messages -expect_message(slsqp(x0.hs100, fn = fn.hs100, hin = hin.hs100), ineqMess) - - # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. expect_stdout(slsqp(x0.hs100, fn = fn.hs100, nl.info = TRUE), @@ -93,19 +89,20 @@ expect_identical(slsqpTest$message, slsqpControl$message) slsqpTest <- suppressMessages(slsqp(x0.hs100, fn = fn.hs100, gr = gr.hs100, hin = hin.hs100, hinjac = hinjac.hs100)) -slsqpControl <- nloptr(x0 = x0.hs100, - eval_f = fn.hs100, - eval_grad_f = gr.hs100, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2.hs100, - opts = list(algorithm = "NLOPT_LD_SLSQP", - xtol_rel = 1e-6, maxeval = 1000L)) - -expect_identical(slsqpTest$par, slsqpControl$solution) -expect_identical(slsqpTest$value, slsqpControl$objective) -expect_identical(slsqpTest$iter, slsqpControl$iterations) -expect_identical(slsqpTest$convergence, slsqpControl$status) -expect_identical(slsqpTest$message, slsqpControl$message) +# Going to be reused below in new behavior test. +slsqpControlhinjac <- nloptr(x0 = x0.hs100, + eval_f = fn.hs100, + eval_grad_f = gr.hs100, + eval_g_ineq = hin2.hs100, + eval_jac_g_ineq = hinjac2.hs100, + opts = list(algorithm = "NLOPT_LD_SLSQP", + xtol_rel = 1e-6, maxeval = 1000L)) + +expect_identical(slsqpTest$par, slsqpControlhinjac$solution) +expect_identical(slsqpTest$value, slsqpControlhinjac$objective) +expect_identical(slsqpTest$iter, slsqpControlhinjac$iterations) +expect_identical(slsqpTest$convergence, slsqpControlhinjac$status) +expect_identical(slsqpTest$message, slsqpControlhinjac$message) # Not passing equality Jacobian slsqpTest <- suppressMessages(slsqp(x0.hs100, fn = fn.hs100, heq = hin.hs100)) @@ -141,3 +138,21 @@ expect_identical(slsqpTest$value, slsqpControl$objective) expect_identical(slsqpTest$iter, slsqpControl$iterations) expect_identical(slsqpTest$convergence, slsqpControl$status) expect_identical(slsqpTest$message, slsqpControl$message) + +# Test deprecated behavor message; remove when old behavior made defucnt. +expect_message(slsqp(x0.hs100, fn = fn.hs100, hin = hin.hs100), depMess) + +# Test new behavior. Adjust tests above when old behavior made defucnt. +hinx <- function(x) -hin.hs100(x) +hinjacx <- function(x) -hinjac.hs100(x) +expect_silent(slsqp(x0.hs100, fn = fn.hs100, hin = hinx, hinjac = hinjacx, + deprecatedBehavior = FALSE)) + +slsqpTest <- slsqp(x0.hs100, fn = fn.hs100, hin = hinx, hinjac = hinjacx, + deprecatedBehavior = FALSE) + +expect_identical(slsqpTest$par, slsqpControlhinjac$solution) +expect_identical(slsqpTest$value, slsqpControlhinjac$objective) +expect_identical(slsqpTest$iter, slsqpControlhinjac$iterations) +expect_identical(slsqpTest$convergence, slsqpControlhinjac$status) +expect_identical(slsqpTest$message, slsqpControlhinjac$message) diff --git a/man/slsqp.Rd b/man/slsqp.Rd index 18fac402..ca0c239f 100644 --- a/man/slsqp.Rd +++ b/man/slsqp.Rd @@ -16,6 +16,7 @@ slsqp( heqjac = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ... ) } @@ -30,12 +31,14 @@ not specified.} \item{lower, upper}{lower and upper bound constraints.} \item{hin}{function defining the inequality constraints, that is -\code{hin>=0} for all components.} +\code{hin <= 0} for all components. This is new behavior in line with the +rest of the \code{nloptr} arguments. To use the old behavior, please set +\code{deprecatedBehavior} to \code{TRUE}.} \item{hinjac}{Jacobian of function \code{hin}; will be calculated numerically if not specified.} -\item{heq}{function defining the equality constraints, that is \code{heq==0} +\item{heq}{function defining the equality constraints, that is \code{heq = 0} for all components.} \item{heqjac}{Jacobian of function \code{heq}; will be calculated @@ -45,6 +48,11 @@ numerically if not specified.} \item{control}{list of options, see \code{nl.opts} for help.} +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} + \item{...}{additional arguments passed to the function.} } \value{ @@ -81,17 +89,19 @@ fn.hs100 <- function(x) { } hin.hs100 <- function(x) { h <- numeric(4) - h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] - h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] - h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] - h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] + h[1] <- -127 + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] + h[2] <- -282 + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] + h[3] <- -196 + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] + h[4] <- 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + + 5 * x[6] - 11 * x[7] return(h) } S <- slsqp(x0.hs100, fn = fn.hs100, # no gradients and jacobians provided hin = hin.hs100, nl.info = TRUE, - control = list(xtol_rel = 1e-8, check_derivatives = TRUE)) + control = list(xtol_rel = 1e-8, check_derivatives = TRUE), + deprecatedBehavior = FALSE) S ## Optimal value of objective function: 680.630057375943 ## Optimal value of controls: 2.3305 1.951372 -0.4775407 4.365726 From eb1f058e597ce782570d1966d5edf89319991228 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 14:35:23 -0400 Subject: [PATCH 02/15] AUGLAG: 1) Change desired direction of inequality to <= (see issue 148). 2) Keep old behavior as default through new parameter. 3) Update documentation, including examples, and unit tests. 4) Correct Powell example to match that in Rsolnp --- R/auglag.R | 91 ++++++++++++++++++----------- inst/tinytest/test-wrapper-auglag.R | 78 +++++++++++++++---------- man/auglag.Rd | 58 ++++++++++++------ 3 files changed, 144 insertions(+), 83 deletions(-) diff --git a/R/auglag.R b/R/auglag.R index 68430b35..0e9d31eb 100644 --- a/R/auglag.R +++ b/R/auglag.R @@ -7,10 +7,14 @@ # # Wrapper to solve optimization problem using Augmented Lagrangian. # -# Changelog: -# 2017-09-26: Fixed bug, BOBYQA is allowed as local solver -# (thanks to Leo Belzile). -# 2023-02-08: Tweaks for efficiency and readability (Avraham Adler) +# CHANGELOG +# +# 2017-09-26: Fixed bug, BOBYQA is allowed as local solver +# (thanks to Leo Belzile). +# 2023-02-08: Tweaks for efficiency and readability (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now (Avraham Adler). +# #' Augmented Lagrangian Algorithm #' @@ -54,6 +58,10 @@ #' the local solver?; not possible at the moment. #' @param nl.info logical; shall the original NLopt info been shown. #' @param control list of options, see \code{nl.opts} for help. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -89,50 +97,65 @@ #' @examples #' #' x0 <- c(1, 1) -#' fn <- function(x) (x[1]-2)^2 + (x[2]-1)^2 -#' hin <- function(x) -0.25*x[1]^2 - x[2]^2 + 1 # hin >= 0 -#' heq <- function(x) x[1] - 2*x[2] + 1 # heq == 0 +#' fn <- function(x) (x[1] - 2) ^ 2 + (x[2] - 1) ^ 2 +#' hin <- function(x) 0.25 * x[1]^2 + x[2] ^ 2 - 1 # hin <= 0 +#' heq <- function(x) x[1] - 2 * x[2] + 1 # heq = 0 #' gr <- function(x) nl.grad(x, fn) #' hinjac <- function(x) nl.jacobian(x, hin) #' heqjac <- function(x) nl.jacobian(x, heq) #' -#' auglag(x0, fn, gr = NULL, hin = hin, heq = heq) # with COBYLA +#' # with COBYLA +#' auglag(x0, fn, gr = NULL, hin = hin, heq = heq, deprecatedBehavior = FALSE) +#' #' # $par: 0.8228761 0.9114382 #' # $value: 1.393464 #' # $iter: 1001 #' -#' auglag(x0, fn, gr = NULL, hin = hin, heq = heq, localsolver = "SLSQP") +#' auglag(x0, fn, gr = NULL, hin = hin, heq = heq, localsolver = "SLSQP", +#' deprecatedBehavior = FALSE) +#' #' # $par: 0.8228757 0.9114378 #' # $value: 1.393465 -#' # $iter 173 +#' # $iter 184 #' #' ## Example from the alabama::auglag help page -#' fn <- function(x) (x[1] + 3*x[2] + x[3])^2 + 4 * (x[1] - x[2])^2 +#' ## Parameters should be roughly (0, 0, 1) with an objective value of 1. +#' +#' fn <- function(x) (x[1] + 3 * x[2] + x[3]) ^ 2 + 4 * (x[1] - x[2]) ^ 2 #' heq <- function(x) x[1] + x[2] + x[3] - 1 -#' hin <- function(x) c(6*x[2] + 4*x[3] - x[1]^3 - 3, x[1], x[2], x[3]) +#' # hin restated from alabama example to be <= 0. +#' hin <- function(x) c(-6 * x[2] - 4 * x[3] + x[1] ^ 3 + 3, -x[1], -x[2], -x[3]) +#' +#' set.seed(12) +#' auglag(runif(3), fn, hin = hin, heq = heq, localsolver= "lbfgs", +#' deprecatedBehavior = FALSE) #' -#' auglag(runif(3), fn, hin = hin, heq = heq, localsolver="lbfgs") -#' # $par: 2.380000e-09 1.086082e-14 1.000000e+00 +#' # $par: 4.861756e-08 4.732373e-08 9.999999e-01 #' # $value: 1 -#' # $iter: 289 +#' # $iter: 145 #' #' ## Powell problem from the Rsolnp::solnp help page +#' ## Parameters should be roughly (-1.7171, 1.5957, 1.8272, -0.7636, -0.7636) +#' ## with an objective value of 0.0539498478. +#' #' x0 <- c(-2, 2, 2, -1, -1) -#' fn1 <- function(x) exp(x[1]*x[2]*x[3]*x[4]*x[5]) +#' fn1 <- function(x) exp(x[1] * x[2] * x[3] * x[4] * x[5]) #' eqn1 <-function(x) -#' c(x[1]*x[1]+x[2]*x[2]+x[3]*x[3]+x[4]*x[4]+x[5]*x[5], -#' x[2]*x[3]-5*x[4]*x[5], -#' x[1]*x[1]*x[1]+x[2]*x[2]*x[2]) +#' c(x[1] * x[1] + x[2] * x[2] + x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - 10, +#' x[2] * x[3] - 5 * x[4] * x[5], +#' x[1] * x[1] * x[1] + x[2] * x[2] * x[2] + 1) #' -#' auglag(x0, fn1, heq = eqn1, localsolver = "mma") -#' # $par: -3.988458e-10 -1.654201e-08 -3.752028e-10 8.904445e-10 8.926336e-10 -#' # $value: 1 -#' # $iter: 1001 +#' auglag(x0, fn1, heq = eqn1, localsolver = "mma", deprecatedBehavior = FALSE) +#' +#' # $par: -1.7173645 1.5959655 1.8268352 -0.7636185 -0.7636185 +#' # $value: 0.05394987 +#' # $iter: 916 #' auglag <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, hinjac = NULL, heq = NULL, heqjac = NULL, localsolver = "COBYLA", localtol = 1e-6, ineq2local = FALSE, - nl.info = FALSE, control = list(), ...) { + nl.info = FALSE, control = list(), deprecatedBehavior = TRUE, + ...) { if (ineq2local) { # gsolver <- "NLOPT_LN_AUGLAG_EQ" stop("Inequalities to local solver: feature not yet implemented.") @@ -167,23 +190,25 @@ auglag <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, # Inequality constraints if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - - .hin <- match.fun(hin) - hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } if (!dfree) { if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) - } else { + } else if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) + } } - } # Equality constraints if (!is.null(heq)) { diff --git a/inst/tinytest/test-wrapper-auglag.R b/inst/tinytest/test-wrapper-auglag.R index 2ea565cd..10d86bbf 100644 --- a/inst/tinytest/test-wrapper-auglag.R +++ b/inst/tinytest/test-wrapper-auglag.R @@ -13,20 +13,24 @@ library(nloptr) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") # Taken from example x0 <- c(1, 1) fn <- function(x) (x[1L] - 2) ^ 2 + (x[2L] - 1) ^ 2 -hin <- function(x) -0.25 * x[1L] ^ 2 - x[2L] ^ 2 + 1 # hin >= 0 +hin <- function(x) 0.25 * x[1L] ^ 2 + x[2L] ^ 2 - 1 # hin <= 0 heq <- function(x) x[1L] - 2 * x[2L] + 1 # heq == 0 gr <- function(x) nl.grad(x, fn) hinjac <- function(x) nl.jacobian(x, hin) heqjac <- function(x) nl.jacobian(x, heq) -hin2 <- function(x) -hin(x) # hin2 <= 0 needed for nloptr -hinjac2 <- function(x) nl.jacobian(x, hin2) +hin2 <- function(x) -hin(x) # Needed to test old behavior +hinjac2 <- function(x) nl.jacobian(x, hin2) # Needed to test old behavior + +# Test silence on proper behavior +expect_silent(auglag(x0, fn)) +expect_silent(auglag(x0, fn, hin = hin, deprecatedBehavior = FALSE)) # Test errors expect_error(auglag(x0, fn, ineq2local = TRUE), @@ -34,21 +38,16 @@ expect_error(auglag(x0, fn, ineq2local = TRUE), expect_error(auglag(x0, fn, localsolver = "NLOPT_LN_NELDERMEAD"), "Only local solvers allowed: BOBYQA, COBYLA, LBFGS, MMA, SLSQP.") -# Test messages -expect_message(auglag(x0, fn, hin = hin), ineqMess) - # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. expect_stdout(auglag(x0, fn, nl.info = TRUE), "Call:", fixed = TRUE) -expect_silent(auglag(x0, fn)) - # Test COBYLA version -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq)) +augTest <- auglag(x0, fn, hin = hin, heq = heq, deprecatedBehavior = FALSE) augControl <- nloptr(x0 = x0, eval_f = fn, - eval_g_ineq = hin2, + eval_g_ineq = hin, eval_g_eq = heq, opts = list(algorithm = "NLOPT_LN_AUGLAG", xtol_rel = 1e-6, @@ -66,12 +65,12 @@ expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) # Test BOBYQA version -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq, - localsolver = "BOBYQA")) +augTest <- auglag(x0, fn, hin = hin, heq = heq, localsolver = "BOBYQA", + deprecatedBehavior = FALSE) augControl <- nloptr(x0 = x0, eval_f = fn, - eval_g_ineq = hin2, + eval_g_ineq = hin, eval_g_eq = heq, opts = list(algorithm = "NLOPT_LN_AUGLAG", xtol_rel = 1e-6, @@ -90,13 +89,14 @@ expect_identical(augTest$message, augControl$message) # Test SLSQP version # No passed hin/heq Jacobian -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq, - localsolver = "SLSQP")) +augTest <- auglag(x0, fn, hin = hin, heq = heq, localsolver = "SLSQP", + deprecatedBehavior = FALSE) + augControl <- nloptr(x0 = x0, eval_f = fn, eval_grad_f = gr, - eval_g_ineq = hin2, - eval_jac_g_ineq = hinjac2, + eval_g_ineq = hin, + eval_jac_g_ineq = hinjac, eval_g_eq = heq, eval_jac_g_eq = heqjac, opts = list(algorithm = "NLOPT_LD_AUGLAG", @@ -114,9 +114,9 @@ expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) # Passed hin/heq Jacobian -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq, - hinjac = hinjac, heqjac = heqjac, - localsolver = "SLSQP")) +augTest <- auglag(x0, fn, hin = hin, heq = heq, hinjac = hinjac, + heqjac = heqjac, localsolver = "SLSQP", + deprecatedBehavior = FALSE) expect_identical(augTest$par, augControl$solution) expect_identical(augTest$value, augControl$objective) @@ -126,13 +126,14 @@ expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) # Test LBFGS version -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq, - localsolver = "LBFGS")) +augTest <- auglag(x0, fn, hin = hin, heq = heq, localsolver = "LBFGS", + deprecatedBehavior = FALSE) + augControl <- nloptr(x0 = x0, eval_f = fn, eval_grad_f = gr, - eval_g_ineq = hin2, - eval_jac_g_ineq = hinjac2, + eval_g_ineq = hin, + eval_jac_g_ineq = hinjac, eval_g_eq = heq, eval_jac_g_eq = heqjac, opts = list(algorithm = "NLOPT_LD_AUGLAG", @@ -150,13 +151,14 @@ expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) # Test MMA version -augTest <- suppressMessages(auglag(x0, fn, hin = hin, heq = heq, - localsolver = "MMA")) +augTest <- auglag(x0, fn, hin = hin, heq = heq, localsolver = "MMA", + deprecatedBehavior = FALSE) + augControl <- nloptr(x0 = x0, eval_f = fn, eval_grad_f = gr, - eval_g_ineq = hin2, - eval_jac_g_ineq = hinjac2, + eval_g_ineq = hin, + eval_jac_g_ineq = hinjac, eval_g_eq = heq, eval_jac_g_eq = heqjac, opts = list(algorithm = "NLOPT_LD_AUGLAG", @@ -172,3 +174,17 @@ expect_identical(augTest$global_solver, augControl$options$algorithm) expect_identical(augTest$local_solver, augControl$local_options$algorithm) expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) + +# Test deprecated message +expect_message(auglag(x0, fn, hin = hin2), depMess) + +# Test old behavior still works +augTest <- suppressMessages(auglag(x0, fn, hin = hin2, heq = heq, + localsolver = "MMA")) + +expect_identical(augTest$par, augControl$solution) +expect_identical(augTest$value, augControl$objective) +expect_identical(augTest$global_solver, augControl$options$algorithm) +expect_identical(augTest$local_solver, augControl$local_options$algorithm) +expect_identical(augTest$convergence, augControl$status) +expect_identical(augTest$message, augControl$message) diff --git a/man/auglag.Rd b/man/auglag.Rd index 06c6aa2f..ae56ddb3 100644 --- a/man/auglag.Rd +++ b/man/auglag.Rd @@ -19,6 +19,7 @@ auglag( ineq2local = FALSE, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ... ) } @@ -47,6 +48,11 @@ the local solver?; not possible at the moment.} \item{control}{list of options, see \code{nl.opts} for help.} +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} + \item{...}{additional arguments passed to the function.} } \value{ @@ -97,45 +103,59 @@ semi-free packages like LANCELOT. \examples{ x0 <- c(1, 1) -fn <- function(x) (x[1]-2)^2 + (x[2]-1)^2 -hin <- function(x) -0.25*x[1]^2 - x[2]^2 + 1 # hin >= 0 -heq <- function(x) x[1] - 2*x[2] + 1 # heq == 0 +fn <- function(x) (x[1] - 2) ^ 2 + (x[2] - 1) ^ 2 +hin <- function(x) 0.25 * x[1]^2 + x[2] ^ 2 - 1 # hin <= 0 +heq <- function(x) x[1] - 2 * x[2] + 1 # heq = 0 gr <- function(x) nl.grad(x, fn) hinjac <- function(x) nl.jacobian(x, hin) heqjac <- function(x) nl.jacobian(x, heq) -auglag(x0, fn, gr = NULL, hin = hin, heq = heq) # with COBYLA +# with COBYLA +auglag(x0, fn, gr = NULL, hin = hin, heq = heq, deprecatedBehavior = FALSE) + # $par: 0.8228761 0.9114382 # $value: 1.393464 # $iter: 1001 -auglag(x0, fn, gr = NULL, hin = hin, heq = heq, localsolver = "SLSQP") +auglag(x0, fn, gr = NULL, hin = hin, heq = heq, localsolver = "SLSQP", + deprecatedBehavior = FALSE) + # $par: 0.8228757 0.9114378 # $value: 1.393465 -# $iter 173 +# $iter 184 ## Example from the alabama::auglag help page -fn <- function(x) (x[1] + 3*x[2] + x[3])^2 + 4 * (x[1] - x[2])^2 +## Parameters should be roughly (0, 0, 1) with an objective value of 1. + +fn <- function(x) (x[1] + 3 * x[2] + x[3]) ^ 2 + 4 * (x[1] - x[2]) ^ 2 heq <- function(x) x[1] + x[2] + x[3] - 1 -hin <- function(x) c(6*x[2] + 4*x[3] - x[1]^3 - 3, x[1], x[2], x[3]) +# hin restated from alabama example to be <= 0. +hin <- function(x) c(-6 * x[2] - 4 * x[3] + x[1] ^ 3 + 3, -x[1], -x[2], -x[3]) + +set.seed(12) +auglag(runif(3), fn, hin = hin, heq = heq, localsolver= "lbfgs", + deprecatedBehavior = FALSE) -auglag(runif(3), fn, hin = hin, heq = heq, localsolver="lbfgs") -# $par: 2.380000e-09 1.086082e-14 1.000000e+00 +# $par: 4.861756e-08 4.732373e-08 9.999999e-01 # $value: 1 -# $iter: 289 +# $iter: 145 ## Powell problem from the Rsolnp::solnp help page +## Parameters should be roughly (-1.7171, 1.5957, 1.8272, -0.7636, -0.7636) +## with an objective value of 0.0539498478. + x0 <- c(-2, 2, 2, -1, -1) -fn1 <- function(x) exp(x[1]*x[2]*x[3]*x[4]*x[5]) +fn1 <- function(x) exp(x[1] * x[2] * x[3] * x[4] * x[5]) eqn1 <-function(x) - c(x[1]*x[1]+x[2]*x[2]+x[3]*x[3]+x[4]*x[4]+x[5]*x[5], - x[2]*x[3]-5*x[4]*x[5], - x[1]*x[1]*x[1]+x[2]*x[2]*x[2]) + c(x[1] * x[1] + x[2] * x[2] + x[3] * x[3] + x[4] * x[4] + x[5] * x[5] - 10, + x[2] * x[3] - 5 * x[4] * x[5], + x[1] * x[1] * x[1] + x[2] * x[2] * x[2] + 1) -auglag(x0, fn1, heq = eqn1, localsolver = "mma") -# $par: -3.988458e-10 -1.654201e-08 -3.752028e-10 8.904445e-10 8.926336e-10 -# $value: 1 -# $iter: 1001 +auglag(x0, fn1, heq = eqn1, localsolver = "mma", deprecatedBehavior = FALSE) + +# $par: -1.7173645 1.5959655 1.8268352 -0.7636185 -0.7636185 +# $value: 0.05394987 +# $iter: 916 } \references{ From 5dcb1c47025f3f5268cd0d16af44fa9f4644af48 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 14:43:38 -0400 Subject: [PATCH 03/15] SLSQP: Combine else with if into one branch; should be more efficient. --- R/slsqp.R | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/R/slsqp.R b/R/slsqp.R index 6f34fde3..9393f695 100644 --- a/R/slsqp.R +++ b/R/slsqp.R @@ -129,14 +129,12 @@ slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) - } else { - if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", - "restate the inequality to be <=0. The ability to use the old", - "behavior will be removed in a future release.") - .hinjac <- match.fun(hinjac) - hinjac <- function(x) -.hinjac(x) - } + } else if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") + .hinjac <- match.fun(hinjac) + hinjac <- function(x) -.hinjac(x) } } From 25540a9a78df8d43a0676b7874589fa6c453475d Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 15:33:06 -0400 Subject: [PATCH 04/15] SLSQP: Clean up Hock-Schittkowski 100 problem. --- R/slsqp.R | 39 +++++++++++++++++++++------------------ man/slsqp.Rd | 35 ++++++++++++++++++----------------- 2 files changed, 39 insertions(+), 35 deletions(-) diff --git a/R/slsqp.R b/R/slsqp.R index 9393f695..423d5594 100644 --- a/R/slsqp.R +++ b/R/slsqp.R @@ -13,7 +13,8 @@ # confusing commentary in example. Also Cleanup and tweaks for safety and # efficiency (Avraham Adler) # 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving -# the old behavior as the default for now (Avraham Adler). +# the old behavior as the default for now. Also cleaned up the HS100 +# example (Avraham Adler). # #' Sequential Quadratic Programming (SQP) @@ -74,20 +75,20 @@ #' #' @examples #' -#' ## Solve the Hock-Schittkowski problem no. 100 +#' ## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +#' ## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm +#' #' x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -#' fn.hs100 <- function(x) { -#' (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + -#' 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -#' } -#' hin.hs100 <- function(x) { -#' h <- numeric(4) -#' h[1] <- -127 + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] -#' h[2] <- -282 + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] -#' h[3] <- -196 + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] -#' h[4] <- 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + -#' 5 * x[6] - 11 * x[7] -#' return(h) +#' fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + +#' 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + +#' x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} +#' +#' hin.hs100 <- function(x) {c( +#' 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +#' 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +#' 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +#' 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - +#' 11 * x[7]) #' } #' #' S <- slsqp(x0.hs100, fn = fn.hs100, # no gradients and jacobians provided @@ -95,12 +96,14 @@ #' nl.info = TRUE, #' control = list(xtol_rel = 1e-8, check_derivatives = TRUE), #' deprecatedBehavior = FALSE) -#' S -#' ## Optimal value of objective function: 680.630057375943 -#' ## Optimal value of controls: 2.3305 1.951372 -0.4775407 4.365726 -#' ## -0.6244871 1.038131 1.594227 #' +#' ## The optimum value of the objective function should be 680.6300573 +#' ## A suitable parameter vector is roughly +#' ## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) #' +#' S +#' + slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, hinjac = NULL, heq = NULL, heqjac = NULL, nl.info = FALSE, control = list(), deprecatedBehavior = TRUE, ...) { diff --git a/man/slsqp.Rd b/man/slsqp.Rd index ca0c239f..71178a4f 100644 --- a/man/slsqp.Rd +++ b/man/slsqp.Rd @@ -81,20 +81,20 @@ See more infos at } \examples{ -## Solve the Hock-Schittkowski problem no. 100 +## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm + x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + - 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -} -hin.hs100 <- function(x) { - h <- numeric(4) - h[1] <- -127 + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - h[2] <- -282 + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - h[3] <- -196 + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - h[4] <- 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + - 5 * x[6] - 11 * x[7] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( +2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } S <- slsqp(x0.hs100, fn = fn.hs100, # no gradients and jacobians provided @@ -102,11 +102,12 @@ S <- slsqp(x0.hs100, fn = fn.hs100, # no gradients and jacobians provided nl.info = TRUE, control = list(xtol_rel = 1e-8, check_derivatives = TRUE), deprecatedBehavior = FALSE) -S -## Optimal value of objective function: 680.630057375943 -## Optimal value of controls: 2.3305 1.951372 -0.4775407 4.365726 -## -0.6244871 1.038131 1.594227 +## The optimum value of the objective function should be 680.6300573 +## A suitable parameter vector is roughly +## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) + +S } \references{ From c7a23e5b823eff152bfb2db3578cc61a0e4a94d9 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 17:14:31 -0400 Subject: [PATCH 05/15] CCSAQ: 1) Change desired direction of inequalities to <= 2) Maintain old behavior as default with new parameter 3) Clean up HS100 example; analytic gradient works now - as it should. 3) Update documentation and unit tests --- R/ccsaq.R | 91 +++++++++------ inst/tinytest/test-wrapper-ccsaq.R | 170 +++++++++++++++++------------ man/ccsaq.Rd | 62 +++++++---- 3 files changed, 193 insertions(+), 130 deletions(-) diff --git a/R/ccsaq.R b/R/ccsaq.R index 684c7371..80d9e08c 100644 --- a/R/ccsaq.R +++ b/R/ccsaq.R @@ -6,7 +6,10 @@ # Wrapper to solve optimization problem using CCSAQ. # # CHANGELOG: -# 2023-02-11: Tweaks for efficiency and readability (Avraham Adler) +# 2023-02-11: Tweaks for efficiency and readability (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the HS100 +# example (Avraham Adler). # #' Conservative Convex Separable Approximation with Affine Approximation plus @@ -28,6 +31,10 @@ #' numerically if not specified. #' @param nl.info logical; shall the original NLopt info been shown. #' @param control list of options, see \code{nl.opts} for help. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -54,48 +61,58 @@ #' @examples #' #' ## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +#' ## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm +#' #' x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -#' fn.hs100 <- function(x) { -#' (x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + 3 * (x[4] - 11) ^ 2 + -#' 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + x[7] ^ 4 - 4 * x[6] * x[7] - -#' 10 * x[6] - 8 * x[7] -#' } -#' hin.hs100 <- function(x) { -#' h <- numeric(4) -#' h[1] <- 127 - 2 * x[1] ^ 2 - 3 * x[2] ^ 4 - x[3] - 4 * x[4] ^ 2 - 5 * x[5] -#' h[2] <- 282 - 7 * x[1] - 3 * x[2] - 10 * x[3] ^ 2 - x[4] + x[5] -#' h[3] <- 196 - 23 * x[1] - x[2] ^ 2 - 6 * x[6] ^ 2 + 8 * x[7] -#' h[4] <- -4 * x[1] ^ 2 - x[2] ^ 2 + 3 * x[1] * x[2] -2 * x[3] ^ 2 - -#' 5 * x[6] + 11 * x[7] -#' return(h) +#' fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + +#' 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + +#' x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} +#' +#' hin.hs100 <- function(x) {c( +#' 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +#' 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +#' 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +#' 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - +#' 11 * x[7]) #' } +#' #' gr.hs100 <- function(x) { -#' c( 2 * x[1] - 20, +#' c( 2 * x[1] - 20, #' 10 * x[2] - 120, #' 4 * x[3] ^ 3, #' 6 * x[4] - 66, #' 60 * x[5] ^ 5, #' 14 * x[6] - 4 * x[7] - 10, -#' 4 * x[7] ^ 3 - 4 * x[6] - 8) +#' 4 * x[7] ^ 3 - 4 * x[6] - 8) #' } +#' #' hinjac.hs100 <- function(x) { -#' matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, 7, 3, 20 * x[3], -#' 1, -1, 0, 0, 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, -#' 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], -#' 0, 0, 5, -11), 4, 7, byrow = TRUE) +#' matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, +#' 7, 3, 20 * x[3], 1, -1, 0, 0, +#' 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, +#' 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), +#' nrow = 4, byrow = TRUE) #' } #' -#' # incorrect result with exact jacobian +#' ## The optimum value of the objective function should be 680.6300573 +#' ## A suitable parameter vector is roughly +#' ## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) +#' +#' # Results with exact Jacobian #' S <- ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, #' hin = hin.hs100, hinjac = hinjac.hs100, -#' nl.info = TRUE, control = list(xtol_rel = 1e-8)) +#' nl.info = TRUE, control = list(xtol_rel = 1e-8), +#' deprecatedBehavior = FALSE) #' -#' \donttest{ +#' # Results without Jacobian #' S <- ccsaq(x0.hs100, fn.hs100, hin = hin.hs100, -#' nl.info = TRUE, control = list(xtol_rel = 1e-8)) -#' } +#' nl.info = TRUE, control = list(xtol_rel = 1e-8), +#' deprecatedBehavior = FALSE) +#' + ccsaq <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, - hinjac = NULL, nl.info = FALSE, control = list(), ...) { + hinjac = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ...) { opts <- nl.opts(control) opts["algorithm"] <- "NLOPT_LD_CCSAQ" @@ -111,19 +128,21 @@ ccsaq <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, } if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - - .hin <- match.fun(hin) - hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) - } else { - .hinjac <- match.fun(hinjac) - hinjac <- function(x) -.hinjac(x) + } else if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") + .hinjac <- match.fun(hinjac) + hinjac <- function(x) -.hinjac(x) } } diff --git a/inst/tinytest/test-wrapper-ccsaq.R b/inst/tinytest/test-wrapper-ccsaq.R index 2012ca61..60034325 100644 --- a/inst/tinytest/test-wrapper-ccsaq.R +++ b/inst/tinytest/test-wrapper-ccsaq.R @@ -13,99 +13,129 @@ library(nloptr) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") + # Taken from example x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1L] - 10) ^ 2 + 5 * (x[2L] - 12) ^ 2 + x[3L] ^ 4 + 3 * (x[4L] - 11) ^ 2 + - 10 * x[5L] ^ 6 + 7 * x[6L] ^ 2 + x[7L] ^ 4 - 4 * x[6L] * x[7L] - - 10 * x[6L] - 8 * x[7L] -} - -hin.hs100 <- function(x) { - h <- double(4L) - h[1L] <- 127 - 2 * x[1L] ^ 2 - 3 * x[2L] ^ 4 - x[3L] - 4 * x[4L] ^ 2 - 5 * - x[5L] - h[2L] <- 282 - 7 * x[1L] - 3 * x[2L] - 10 * x[3L] ^ 2 - x[4L] + x[5L] - h[3L] <- 196 - 23 * x[1L] - x[2L] ^ 2 - 6 * x[6L] ^ 2 + 8 * x[7L] - h[4L] <- -4 * x[1L] ^ 2 - x[2L] ^ 2 + 3 * x[1L] * x[2L] - 2 * x[3L] ^ 2 - - 5 * x[6L] + 11 * x[7L] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, + 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } gr.hs100 <- function(x) { - c(2 * x[1L] - 20, - 10 * x[2L] - 120, - 4 * x[3L] ^ 3, - 6 * x[4L] - 66, - 60 * x[5L] ^ 5, - 14 * x[6L] - 4 * x[7L] - 10, - 4 * x[7L] ^ 3 - 4 * x[6L] - 8) + c( 2 * x[1] - 20, + 10 * x[2] - 120, + 4 * x[3] ^ 3, + 6 * x[4] - 66, + 60 * x[5] ^ 5, + 14 * x[6] - 4 * x[7] - 10, + 4 * x[7] ^ 3 - 4 * x[6] - 8) } hinjac.hs100 <- function(x) { - matrix(c(4 * x[1L], 12 * x[2L] ^ 3, 1, 8 * x[4L], 5, 0, 0, 7, 3, 20 * x[3L], - 1, -1, 0, 0, 23, 2 * x[2L], 0, 0, 0, 12 * x[6L], -8, - 8 * x[1L] - 3 * x[2L], 2 * x[2L] - 3 * x[1L], 4 * x[3L], 0, 0, 5, - -11), 4, 7, byrow = TRUE) + matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, + 7, 3, 20 * x[3], 1, -1, 0, 0, + 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, + 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), + nrow = 4, byrow = TRUE) } -gr.hs100.proper <- function(x) nl.grad(x, fn.hs100) # See example +# In older version, the HS100 was not properly copied, so the gradients caused +# an issue. This has since been corrected, but leaving the calls in to test the +# gradient/Jacobian creation routines. -hin2.hs100 <- function(x) -hin.hs100(x) # Needed for nloptr call -hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed for nloptr call -hinjac.hs100.proper <- function(x) nl.jacobian(x, hin.hs100) # See example -hinjac2.hs100.proper <- function(x) nl.jacobian(x, hin2.hs100) # See example +gr.hs100.computed <- function(x) nl.grad(x, fn.hs100) -ctl <- list(xtol_rel = 1e-8) +hin2.hs100 <- function(x) -hin.hs100(x) # Needed to test old behavior +hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed to test old behavior +hinjac.hs100.computed <- function(x) nl.jacobian(x, hin.hs100) # See example +hinjac2.hs100.computed <- function(x) nl.jacobian(x, hin2.hs100) # See example -# Test messages -expect_message(ccsaq(x0.hs100, fn.hs100, hin = hin.hs100), ineqMess) +ctl <- list(xtol_rel = 1e-8, maxeval = 1000L) + +# Test normal silent running +expect_silent(ccsaq(x0.hs100, fn.hs100)) # Provides incorrect answer +expect_silent(ccsaq(x0.hs100, fn.hs100, hin = hin.hs100, hinjac = hinjac.hs100, + deprecatedBehavior = FALSE)) # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. -expect_stdout(ccsaq(x0.hs100, fn.hs100, nl.info = TRUE), "Call:", fixed = TRUE) - -expect_silent(ccsaq(x0.hs100, fn.hs100)) +expect_stdout(ccsaq(x0.hs100, fn.hs100, nl.info = TRUE, + deprecatedBehavior = FALSE), "Call:", fixed = TRUE) -# Test no passed gradient or Jacobian -ccsaqTest <- suppressMessages(ccsaq(x0.hs100, fn.hs100, hin = hin.hs100, - control = ctl)) +# Control for CCSAQ HS100 +## Exact +ccsaqControlE <- nloptr( + x0 = x0.hs100, + eval_f = fn.hs100, + eval_grad_f = gr.hs100, + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = hinjac.hs100, + opts = list(algorithm = "NLOPT_LD_CCSAQ", xtol_rel = 1e-8, maxeval = 1000L) +) -ccsaqControl <- nloptr( +## Computed +ccsaqControlC <- nloptr( x0 = x0.hs100, eval_f = fn.hs100, - eval_grad_f = gr.hs100.proper, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2.hs100.proper, - opts = list(algorithm = "NLOPT_LD_CCSAQ", xtol_rel = 1e-8) + eval_grad_f = gr.hs100.computed, + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = hinjac.hs100.computed, + opts = list(algorithm = "NLOPT_LD_CCSAQ", xtol_rel = 1e-8, maxeval = 1000L) ) -expect_identical(ccsaqTest$par, ccsaqControl$solution) -expect_identical(ccsaqTest$value, ccsaqControl$objective) -expect_identical(ccsaqTest$iter, ccsaqControl$iterations) -expect_identical(ccsaqTest$convergence, ccsaqControl$status) -expect_identical(ccsaqTest$message, ccsaqControl$message) +# Test no passed gradient or Jacobian (so algorithm computes). +ccsaqTest <- ccsaq(x0.hs100, fn.hs100, hin = hin.hs100, control = ctl, + deprecatedBehavior = FALSE) + +expect_identical(ccsaqTest$par, ccsaqControlC$solution) +expect_identical(ccsaqTest$value, ccsaqControlC$objective) +expect_identical(ccsaqTest$iter, ccsaqControlC$iterations) +expect_identical(ccsaqTest$convergence, ccsaqControlC$status) +expect_identical(ccsaqTest$message, ccsaqControlC$message) # Test passed gradient and Jacobian -ccsaqTest <- suppressMessages(ccsaq(x0.hs100, fn.hs100, gr = gr.hs100.proper, - hin = hin.hs100, - hinjac = hinjac.hs100.proper, +## Exact +ccsaqTest <- ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin.hs100, + hinjac = hinjac.hs100, control = ctl, + deprecatedBehavior = FALSE) + +expect_identical(ccsaqTest$par, ccsaqControlE$solution) +expect_identical(ccsaqTest$value, ccsaqControlE$objective) +expect_identical(ccsaqTest$iter, ccsaqControlE$iterations) +expect_identical(ccsaqTest$convergence, ccsaqControlE$status) +expect_identical(ccsaqTest$message, ccsaqControlE$message) + +## Computed +ccsaqTest <- ccsaq(x0.hs100, fn.hs100, gr = gr.hs100.computed, hin = hin.hs100, + hinjac = hinjac.hs100.computed, control = ctl, + deprecatedBehavior = FALSE) + +expect_identical(ccsaqTest$par, ccsaqControlC$solution) +expect_identical(ccsaqTest$value, ccsaqControlC$objective) +expect_identical(ccsaqTest$iter, ccsaqControlC$iterations) +expect_identical(ccsaqTest$convergence, ccsaqControlC$status) +expect_identical(ccsaqTest$message, ccsaqControlC$message) + +# Test deprecated behavior message +expect_message(ccsaq(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) + +# Test deprecated behavior +ccsaqTest <- suppressMessages(ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, + hin = hin2.hs100, hinjac = hinjac2.hs100, control = ctl)) -ccsaqControl <- nloptr(x0 = x0.hs100, - eval_f = fn.hs100, - eval_grad_f = gr.hs100.proper, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2.hs100.proper, - opts = list(algorithm = "NLOPT_LD_CCSAQ", - xtol_rel = 1e-8) -) - -expect_identical(ccsaqTest$par, ccsaqControl$solution) -expect_identical(ccsaqTest$value, ccsaqControl$objective) -expect_identical(ccsaqTest$iter, ccsaqControl$iterations) -expect_identical(ccsaqTest$convergence, ccsaqControl$status) -expect_identical(ccsaqTest$message, ccsaqControl$message) +expect_identical(ccsaqTest$par, ccsaqControlE$solution) +expect_identical(ccsaqTest$value, ccsaqControlE$objective) +expect_identical(ccsaqTest$iter, ccsaqControlE$iterations) +expect_identical(ccsaqTest$convergence, ccsaqControlE$status) +expect_identical(ccsaqTest$message, ccsaqControlE$message) diff --git a/man/ccsaq.Rd b/man/ccsaq.Rd index 1996d12d..112934d9 100644 --- a/man/ccsaq.Rd +++ b/man/ccsaq.Rd @@ -15,6 +15,7 @@ ccsaq( hinjac = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ... ) } @@ -38,6 +39,11 @@ numerically if not specified.} \item{control}{list of options, see \code{nl.opts} for help.} +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} + \item{...}{additional arguments passed to the function.} } \value{ @@ -64,46 +70,54 @@ minimum from any feasible starting point. \examples{ ## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm + x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + 3 * (x[4] - 11) ^ 2 + - 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + x[7] ^ 4 - 4 * x[6] * x[7] - - 10 * x[6] - 8 * x[7] -} -hin.hs100 <- function(x) { - h <- numeric(4) - h[1] <- 127 - 2 * x[1] ^ 2 - 3 * x[2] ^ 4 - x[3] - 4 * x[4] ^ 2 - 5 * x[5] - h[2] <- 282 - 7 * x[1] - 3 * x[2] - 10 * x[3] ^ 2 - x[4] + x[5] - h[3] <- 196 - 23 * x[1] - x[2] ^ 2 - 6 * x[6] ^ 2 + 8 * x[7] - h[4] <- -4 * x[1] ^ 2 - x[2] ^ 2 + 3 * x[1] * x[2] -2 * x[3] ^ 2 - - 5 * x[6] + 11 * x[7] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( +2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } + gr.hs100 <- function(x) { - c( 2 * x[1] - 20, + c( 2 * x[1] - 20, 10 * x[2] - 120, 4 * x[3] ^ 3, 6 * x[4] - 66, 60 * x[5] ^ 5, 14 * x[6] - 4 * x[7] - 10, - 4 * x[7] ^ 3 - 4 * x[6] - 8) + 4 * x[7] ^ 3 - 4 * x[6] - 8) } + hinjac.hs100 <- function(x) { - matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, 7, 3, 20 * x[3], - 1, -1, 0, 0, 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, - 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], - 0, 0, 5, -11), 4, 7, byrow = TRUE) + matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, + 7, 3, 20 * x[3], 1, -1, 0, 0, + 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, + 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), + nrow = 4, byrow = TRUE) } -# incorrect result with exact jacobian +## The optimum value of the objective function should be 680.6300573 +## A suitable parameter vector is roughly +## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) + +# Results with exact Jacobian S <- ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin.hs100, hinjac = hinjac.hs100, - nl.info = TRUE, control = list(xtol_rel = 1e-8)) + nl.info = TRUE, control = list(xtol_rel = 1e-8), + deprecatedBehavior = FALSE) -\donttest{ +# Results without Jacobian S <- ccsaq(x0.hs100, fn.hs100, hin = hin.hs100, - nl.info = TRUE, control = list(xtol_rel = 1e-8)) -} + nl.info = TRUE, control = list(xtol_rel = 1e-8), + deprecatedBehavior = FALSE) + } \references{ Krister Svanberg, ``A class of globally convergent optimization From 6a5f73534211b79c280990f09cf52680acefb9c6 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 18:10:51 -0400 Subject: [PATCH 06/15] COBYLA (includes BOBYQA and NEWUOA): 1) Change desired direction of inequalities to <= 2) Maintain old behavior as default with new parameter 3) Clean up HS100 example 4) Update documentation and unit tests --- R/cobyla.R | 153 +++++++++++++++++----------- inst/tinytest/test-wrapper-cobyla.R | 79 +++++++------- man/bobyqa.Rd | 40 +++++--- man/cobyla.Rd | 56 ++++++---- man/newuoa.Rd | 30 +++--- 5 files changed, 219 insertions(+), 139 deletions(-) diff --git a/R/cobyla.R b/R/cobyla.R index 9aac5f23..0f9c71a9 100644 --- a/R/cobyla.R +++ b/R/cobyla.R @@ -8,29 +8,37 @@ # Wrapper to solve optimization problem using COBYLA, BOBYQA, and NEWUOA. # # CHANGELOG -# 2023-02-10: Tweaks for efficiency and readability (Avraham Adler) - +# 2023-02-10: Tweaks for efficiency and readability (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the HS100 +# example (Avraham Adler). +# #' Constrained Optimization by Linear Approximations #' -#' COBYLA is an algorithm for derivative-free optimization with nonlinear -#' inequality and equality constraints (but see below). +#' \acronym{COBYLA} is an algorithm for derivative-free optimization with +#' nonlinear inequality and equality constraints (but see below). #' #' It constructs successive linear approximations of the objective function and #' constraints via a simplex of \eqn{n+1} points (in \eqn{n} dimensions), and #' optimizes these approximations in a trust region at each step. #' -#' COBYLA supports equality constraints by transforming them into two -#' inequality constraints. As this does not give full satisfaction with the -#' implementation in NLOPT, it has not been made available here. +#' \acronym{COBYLA} supports equality constraints by transforming them into two +#' inequality constraints. This functionality has not been added to the wrapper. +#' To use \acronym{COBYLA} with equality constraints, please use the full +#' \code{nloptr} invocation. #' #' @param x0 starting point for searching the optimum. #' @param fn objective function that is to be minimized. #' @param lower,upper lower and upper bound constraints. #' @param hin function defining the inequality constraints, that is #' \code{hin>=0} for all components. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param control list of options, see \code{nl.opts} for help. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -45,7 +53,7 @@ #' @author Hans W. Borchers #' #' @note The original code, written in Fortran by Powell, was converted in C -#' for the Scipy project. +#' for the \acronym{SciPy} project. #' #' @seealso \code{\link{bobyqa}}, \code{\link{newuoa}} #' @@ -56,28 +64,38 @@ #' #' @examples #' -#' ### Solve Hock-Schittkowski no. 100 +#' ## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +#' ## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm +#' #' x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -#' fn.hs100 <- function(x) { -#' (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + -#' 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -#' } -#' hin.hs100 <- function(x) { -#' h <- numeric(4) -#' h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] -#' h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] -#' h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] -#' h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] -#' return(h) +#' fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + +#' 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + +#' x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} +#' +#' hin.hs100 <- function(x) {c( +#' 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +#' 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +#' 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +#' 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - +#' 11 * x[7]) #' } #' #' S <- cobyla(x0.hs100, fn.hs100, hin = hin.hs100, -#' nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 2000)) -#' ## Optimal value of objective function: 680.630057374431 +#' nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 2000), +#' deprecatedBehavior = FALSE) +#' +#' ## The optimum value of the objective function should be 680.6300573 +#' ## A suitable parameter vector is roughly +#' ## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) +#' +#' S #' #' @export cobyla +#' + cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, - nl.info = FALSE, control = list(), ...) { + nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ...) { opts <- nl.opts(control) opts["algorithm"] <- "NLOPT_LN_COBYLA" @@ -86,14 +104,13 @@ cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, fn <- function(x) f1(x, ...) if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - - f2 <- match.fun(hin) - hin <- function(x) -f2(x, ...) # NLOPT expects hin <= 0 } S0 <- nloptr(x0, @@ -109,19 +126,20 @@ cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, convergence = S0$status, message = S0$message) } - #' Bound Optimization by Quadratic Approximation #' -#' BOBYQA performs derivative-free bound-constrained optimization using an -#' iteratively constructed quadratic approximation for the objective function. +#' \acronym{BOBYQA} performs derivative-free bound-constrained optimization +#' using an iteratively constructed quadratic approximation for the objective +#' function. #' -#' This is an algorithm derived from the BOBYQA Fortran subroutine of Powell, -#' converted to C and modified for the NLOPT stopping criteria. +#' This is an algorithm derived from the \acronym{BOBYQA} Fortran subroutine of +#' Powell, converted to C and modified for the \acronym{NLopt} stopping +#' criteria. #' #' @param x0 starting point for searching the optimum. #' @param fn objective function that is to be minimized. #' @param lower,upper lower and upper bound constraints. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param control list of options, see \code{nl.opts} for help. #' @param ... additional arguments passed to the function. #' @@ -131,13 +149,13 @@ cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, #' \item{iter}{number of (outer) iterations, see \code{maxeval}.} #' \item{convergence}{integer code indicating successful completion (> 0) #' or a possible error number (< 0).} -#' \item{message}{character string produced by NLopt and giving additional -#' information.} +#' \item{message}{character string produced by \acronym{NLopt} and giving +#' additional information.} #' #' @export bobyqa #' -#' @note Because BOBYQA constructs a quadratic approximation of the objective, -#' it may perform poorly for objective functions that are not +#' @note Because \acronym{BOBYQA} constructs a quadratic approximation of the +#' objective, it may perform poorly for objective functions that are not #' twice-differentiable. #' #' @seealso \code{\link{cobyla}}, \code{\link{newuoa}} @@ -148,11 +166,24 @@ cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, #' #' @examples #' -#' fr <- function(x) { ## Rosenbrock Banana function -#' 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2 -#' } -#' (S <- bobyqa(c(0, 0, 0), fr, lower = c(0, 0, 0), upper = c(0.5, 0.5, 0.5))) +#' ## Rosenbrock Banana function +#' +#' rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} +#' +#' ## The function as written above has a minimum of 0 at (1, 1) #' +#' S <- bobyqa(c(0, 0), rbf) +#' +#' +#' S +#' +#' ## Rosenbrock Banana function with both parameters constrained to [0, 0.5] +#' +#' S <- bobyqa(c(0, 0), rbf, lower = c(0, 0), upper = c(0.5, 0.5)) +#' +#' S +#' + bobyqa <- function(x0, fn, lower = NULL, upper = NULL, nl.info = FALSE, control = list(), ...) { @@ -170,20 +201,20 @@ bobyqa <- function(x0, fn, lower = NULL, upper = NULL, nl.info = FALSE, convergence = S0$status, message = S0$message) } - #' New Unconstrained Optimization with quadratic Approximation #' -#' NEWUOA solves quadratic subproblems in a spherical trust regionvia a -#' truncated conjugate-gradient algorithm. For bound-constrained problems, -#' BOBYQA should be used instead, as Powell developed it as an enhancement -#' thereof for bound constraints. +#' \acronym{NEWUOA} solves quadratic subproblems in a spherical trust region via +#' a truncated conjugate-gradient algorithm. For bound-constrained problems, +#' \acronym{BOBYQA} should be used instead, as Powell developed it as an +#' enhancement thereof for bound constraints. #' -#' This is an algorithm derived from the NEWUOA Fortran subroutine of Powell, -#' converted to C and modified for the NLOPT stopping criteria. +#' This is an algorithm derived from the \acronym{NEWUOA} Fortran subroutine of +#' Powell, converted to C and modified for the \acronym{NLopt} stopping +#' criteria. #' #' @param x0 starting point for searching the optimum. #' @param fn objective function that is to be minimized. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param control list of options, see \code{nl.opts} for help. #' @param ... additional arguments passed to the function. #' @@ -200,7 +231,7 @@ bobyqa <- function(x0, fn, lower = NULL, upper = NULL, nl.info = FALSE, #' #' @author Hans W. Borchers #' -#' @note NEWUOA may be largely superseded by BOBYQA. +#' @note \acronym{NEWUOA} may be largely superseded by \acronym{BOBYQA}. #' #' @seealso \code{\link{bobyqa}}, \code{\link{cobyla}} #' @@ -210,11 +241,17 @@ bobyqa <- function(x0, fn, lower = NULL, upper = NULL, nl.info = FALSE, #' #' @examples #' -#' fr <- function(x) { ## Rosenbrock Banana function -#' 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2 -#' } -#' (S <- newuoa(c(1, 2), fr)) +#' ## Rosenbrock Banana function +#' +#' rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} +#' +#' S <- newuoa(c(1, 2), rbf) #' +#' ## The function as written above has a minimum of 0 at (1, 1) +#' +#' S +#' + newuoa <- function(x0, fn, nl.info = FALSE, control = list(), ...) { opts <- nl.opts(control) diff --git a/inst/tinytest/test-wrapper-cobyla.R b/inst/tinytest/test-wrapper-cobyla.R index f9485419..0aacc9f7 100644 --- a/inst/tinytest/test-wrapper-cobyla.R +++ b/inst/tinytest/test-wrapper-cobyla.R @@ -7,62 +7,58 @@ # # Test wrapper calls to cobyla, bobyqa, and newuoa algorithms # -# Changelog: -# 2023-08-23: Change _output to _stdout +# CHANGELOG +# +# 2023-08-23: Change _output to _stdout +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the HS100 +# example (Avraham Adler). # library(nloptr) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") ## Functions for COBYLA, BOBYQA, and NEWUOA x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1L] - 10) ^ 2 + 5 * (x[2L] - 12) ^ 2 + x[3L] ^ 4 + 3 * (x[4L] - 11) ^ 2 + - 10 * x[5L] ^ 6 + 7 * x[6L] ^ 2 + x[7L] ^ 4 - 4 * x[6L] * x[7L] - - 10 * x[6L] - 8 * x[7L] -} - -hin.hs100 <- function(x) { - h <- double(4L) - h[1L] <- 127 - 2 * x[1L] ^ 2 - 3 * x[2L] ^ 4 - x[3L] - 4 * x[4L] ^ 2 - 5 * - x[5L] - h[2L] <- 282 - 7 * x[1L] - 3 * x[2L] - 10 * x[3L] ^ 2 - x[4L] + x[5L] - h[3L] <- 196 - 23 * x[1L] - x[2L] ^ 2 - 6 * x[6L] ^ 2 + 8 * x[7L] - h[4L] <- -4 * x[1L] ^ 2 - x[2L] ^ 2 + 3 * x[1L] * x[2L] - 2 * x[3L] ^ 2 - - 5 * x[6L] + 11 * x[7L] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, + 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } -hin2.hs100 <- function(x) -hin.hs100(x) # Needed for nloptr call +hin2.hs100 <- function(x) -hin.hs100(x) # Needed to test old behavior -fr <- function(x) {100 * (x[2L] - x[1L] ^ 2) ^ 2 + (1 - x[1L]) ^ 2} +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} ctl <- list(xtol_rel = 1e-8) -# Test messages -expect_message(cobyla(x0.hs100, fn.hs100, hin = hin.hs100), ineqMess) - # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. -expect_stdout(cobyla(x0.hs100, fn.hs100, nl.info = TRUE), "Call:", fixed = TRUE) +expect_stdout(cobyla(x0.hs100, fn.hs100, nl.info = TRUE, + deprecatedBehavior = FALSE), "Call:", fixed = TRUE) expect_stdout(bobyqa(x0.hs100, fn.hs100, nl.info = TRUE), "Call:", fixed = TRUE) expect_stdout(newuoa(x0.hs100, fn.hs100, nl.info = TRUE), "Call:", fixed = TRUE) -expect_silent(cobyla(x0.hs100, fn.hs100)) +expect_silent(cobyla(x0.hs100, fn.hs100, deprecatedBehavior = FALSE)) expect_silent(bobyqa(x0.hs100, fn.hs100)) expect_silent(newuoa(x0.hs100, fn.hs100)) # Test COBYLA algorithm -cobylaTest <- suppressMessages( - cobyla(x0.hs100, fn.hs100, hin = hin.hs100, control = ctl) -) +cobylaTest <- cobyla(x0.hs100, fn.hs100, hin = hin.hs100, control = ctl, + deprecatedBehavior = FALSE) cobylaControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, - eval_g_ineq = hin2.hs100, + eval_g_ineq = hin.hs100, opts = list(algorithm = "NLOPT_LN_COBYLA", xtol_rel = 1e-8, maxeval = 1000L)) @@ -72,10 +68,23 @@ expect_identical(cobylaTest$iter, cobylaControl$iterations) expect_identical(cobylaTest$convergence, cobylaControl$status) expect_identical(cobylaTest$message, cobylaControl$message) +# Test deprecated message +expect_message(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) + +# Test deprecated behavior +cobylaTest <- suppressMessages(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100, + control = ctl)) + +expect_identical(cobylaTest$par, cobylaControl$solution) +expect_identical(cobylaTest$value, cobylaControl$objective) +expect_identical(cobylaTest$iter, cobylaControl$iterations) +expect_identical(cobylaTest$convergence, cobylaControl$status) +expect_identical(cobylaTest$message, cobylaControl$message) + # Test BOBYQA algorithm -bobyqaTest <- bobyqa(c(0, 0), fr, lower = c(0, 0), upper = c(0.5, 0.5)) +bobyqaTest <- bobyqa(c(0, 0), rbf, lower = c(0, 0), upper = c(0.5, 0.5)) -bobyqaControl <- nloptr(x0 = c(0, 0), eval_f = fr, lb = c(0, 0), +bobyqaControl <- nloptr(x0 = c(0, 0), eval_f = rbf, lb = c(0, 0), ub = c(0.5, 0.5), opts = list(algorithm = "NLOPT_LN_BOBYQA", xtol_rel = 1e-6, maxeval = 1000L)) @@ -87,9 +96,9 @@ expect_identical(bobyqaTest$convergence, bobyqaControl$status) expect_identical(bobyqaTest$message, bobyqaControl$message) # Test NEWUOA algorithm -newuoaTest <- newuoa(c(1, 2), fr) +newuoaTest <- newuoa(c(1, 2), rbf) -newuoaControl <- nloptr(x0 = c(1, 2), eval_f = fr, +newuoaControl <- nloptr(x0 = c(1, 2), eval_f = rbf, opts = list(algorithm = "NLOPT_LN_NEWUOA", xtol_rel = 1e-6, maxeval = 1000L)) diff --git a/man/bobyqa.Rd b/man/bobyqa.Rd index 8796aee2..b8d01fdc 100644 --- a/man/bobyqa.Rd +++ b/man/bobyqa.Rd @@ -21,7 +21,7 @@ bobyqa( \item{lower, upper}{lower and upper bound constraints.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{control}{list of options, see \code{nl.opts} for help.} @@ -34,28 +34,42 @@ List with components: \item{iter}{number of (outer) iterations, see \code{maxeval}.} \item{convergence}{integer code indicating successful completion (> 0) or a possible error number (< 0).} -\item{message}{character string produced by NLopt and giving additional -information.} +\item{message}{character string produced by \acronym{NLopt} and giving +additional information.} } \description{ -BOBYQA performs derivative-free bound-constrained optimization using an -iteratively constructed quadratic approximation for the objective function. +\acronym{BOBYQA} performs derivative-free bound-constrained optimization +using an iteratively constructed quadratic approximation for the objective +function. } \details{ -This is an algorithm derived from the BOBYQA Fortran subroutine of Powell, -converted to C and modified for the NLOPT stopping criteria. +This is an algorithm derived from the \acronym{BOBYQA} Fortran subroutine of +Powell, converted to C and modified for the \acronym{NLopt} stopping +criteria. } \note{ -Because BOBYQA constructs a quadratic approximation of the objective, -it may perform poorly for objective functions that are not +Because \acronym{BOBYQA} constructs a quadratic approximation of the +objective, it may perform poorly for objective functions that are not twice-differentiable. } \examples{ -fr <- function(x) { ## Rosenbrock Banana function - 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2 -} -(S <- bobyqa(c(0, 0, 0), fr, lower = c(0, 0, 0), upper = c(0.5, 0.5, 0.5))) +## Rosenbrock Banana function + +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} + +## The function as written above has a minimum of 0 at (1, 1) + +S <- bobyqa(c(0, 0), rbf) + + +S + +## Rosenbrock Banana function with both parameters constrained to [0, 0.5] + +S <- bobyqa(c(0, 0), rbf, lower = c(0, 0), upper = c(0.5, 0.5)) + +S } \references{ diff --git a/man/cobyla.Rd b/man/cobyla.Rd index c46a685d..c3e24892 100644 --- a/man/cobyla.Rd +++ b/man/cobyla.Rd @@ -12,6 +12,7 @@ cobyla( hin = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ... ) } @@ -25,10 +26,15 @@ cobyla( \item{hin}{function defining the inequality constraints, that is \code{hin>=0} for all components.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{control}{list of options, see \code{nl.opts} for help.} +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} + \item{...}{additional arguments passed to the function.} } \value{ @@ -42,42 +48,50 @@ or a possible error number (< 0).} information.} } \description{ -COBYLA is an algorithm for derivative-free optimization with nonlinear -inequality and equality constraints (but see below). +\acronym{COBYLA} is an algorithm for derivative-free optimization with +nonlinear inequality and equality constraints (but see below). } \details{ It constructs successive linear approximations of the objective function and constraints via a simplex of \eqn{n+1} points (in \eqn{n} dimensions), and optimizes these approximations in a trust region at each step. -COBYLA supports equality constraints by transforming them into two -inequality constraints. As this does not give full satisfaction with the -implementation in NLOPT, it has not been made available here. +\acronym{COBYLA} supports equality constraints by transforming them into two +inequality constraints. This functionality has not been added to the wrapper. +To use \acronym{COBYLA} with equality constraints, please use the full +\code{nloptr} invocation. } \note{ The original code, written in Fortran by Powell, was converted in C -for the Scipy project. +for the \acronym{SciPy} project. } \examples{ -### Solve Hock-Schittkowski no. 100 +## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +## See https://apmonitor.com/wiki/uploads/Apps/hs100.apm + x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + - 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -} -hin.hs100 <- function(x) { - h <- numeric(4) - h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] - h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] - h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] - h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( +2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } S <- cobyla(x0.hs100, fn.hs100, hin = hin.hs100, - nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 2000)) -## Optimal value of objective function: 680.630057374431 + nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 2000), + deprecatedBehavior = FALSE) + +## The optimum value of the objective function should be 680.6300573 +## A suitable parameter vector is roughly +## (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) + +S } \references{ diff --git a/man/newuoa.Rd b/man/newuoa.Rd index c944dc4c..6ea81325 100644 --- a/man/newuoa.Rd +++ b/man/newuoa.Rd @@ -11,7 +11,7 @@ newuoa(x0, fn, nl.info = FALSE, control = list(), ...) \item{fn}{objective function that is to be minimized.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{control}{list of options, see \code{nl.opts} for help.} @@ -28,24 +28,30 @@ or a possible error number (< 0).} information.} } \description{ -NEWUOA solves quadratic subproblems in a spherical trust regionvia a -truncated conjugate-gradient algorithm. For bound-constrained problems, -BOBYQA should be used instead, as Powell developed it as an enhancement -thereof for bound constraints. +\acronym{NEWUOA} solves quadratic subproblems in a spherical trust region via +a truncated conjugate-gradient algorithm. For bound-constrained problems, +\acronym{BOBYQA} should be used instead, as Powell developed it as an +enhancement thereof for bound constraints. } \details{ -This is an algorithm derived from the NEWUOA Fortran subroutine of Powell, -converted to C and modified for the NLOPT stopping criteria. +This is an algorithm derived from the \acronym{NEWUOA} Fortran subroutine of +Powell, converted to C and modified for the \acronym{NLopt} stopping +criteria. } \note{ -NEWUOA may be largely superseded by BOBYQA. +\acronym{NEWUOA} may be largely superseded by \acronym{BOBYQA}. } \examples{ -fr <- function(x) { ## Rosenbrock Banana function - 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2 -} -(S <- newuoa(c(1, 2), fr)) +## Rosenbrock Banana function + +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} + +S <- newuoa(c(1, 2), rbf) + +## The function as written above has a minimum of 0 at (1, 1) + +S } \references{ From bb1b1daaaefb851f0991095d254677701a6c96a8 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 21:29:41 -0400 Subject: [PATCH 07/15] GLOBAL (includes STOGO, ISRES, and CRS2LM): 1) Change desired direction of inequalities to <= 2) Maintain old behavior as default with new parameter 3) Clean up Hartmann 6 example 4) Update documentation and unit tests. --- R/global.R | 212 +++++++++++++++------------- inst/tinytest/test-wrapper-global.R | 134 ++++++++++++------ man/crs2lm.Rd | 79 ++++++----- man/isres.Rd | 59 +++++--- man/stogo.Rd | 38 +++-- 5 files changed, 300 insertions(+), 222 deletions(-) diff --git a/R/global.R b/R/global.R index 0352ee4d..8eed0035 100644 --- a/R/global.R +++ b/R/global.R @@ -8,19 +8,20 @@ # Wrapper to solve optimization problem using StoGo. # # CHANGELOG -# 2023-02-08: Tweaks for efficiency and readability (Avraham Adler) - -#-- --------------------------------------------------------------------Stogo--- +# +# 2023-02-08: Tweaks for efficiency and readability. (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the Hartmann 6 +# example. (Avraham Adler) +# +#----------------------------------StoGo---------------------------------------- #' Stochastic Global Optimization #' -#' StoGO is a global optimization algorithm that works by systematically -#' dividing the search space into smaller hyper-rectangles. -#' -#' StoGO is a global optimization algorithm that works by systematically -#' dividing the search space (which must be bound-constrained) into smaller -#' hyper-rectangles via a branch-and-bound technique, and searching them by a -#' gradient-based local-search algorithm (a BFGS variant), optionally including -#' some randomness. +#' \acronym{StoGO} is a global optimization algorithm that works by +#' systematically dividing the search space---which must be +#' bound-constrained---into smaller hyper-rectangles via a branch-and-bound +#' technique, and searching them using a gradient-based local-search algorithm +#' (a \acronym{BFGS} variant), optionally including some randomness. #' #' @param x0 initial point for searching the optimum. #' @param fn objective function that is to be minimized. @@ -29,7 +30,7 @@ #' @param maxeval maximum number of function evaluations. #' @param xtol_rel stopping criterion for relative change reached. #' @param randomized logical; shall a randomizing variant be used? -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -38,14 +39,14 @@ #' \item{iter}{number of (outer) iterations, see \code{maxeval}.} #' \item{convergence}{integer code indicating successful completion (> 0) #' or a possible error number (< 0).} -#' \item{message}{character string produced by NLopt and giving additional -#' information.} +#' \item{message}{character string produced by \acronym{NLopt} and giving +#' additional information.} #' #' @export stogo #' #' @author Hans W. Borchers #' -#' @note Only bound-constrained problems are supported by this algorithm. +#' @note Only bounds-constrained problems are supported by this algorithm. #' #' @references S. Zertchaninov and K. Madsen, ``A C++ Programme for Global #' Optimization,'' IMM-REP-1998-04, Department of Mathematical Modelling, @@ -53,21 +54,23 @@ #' #' @examples #' -#' ### Rosenbrock Banana objective function -#' fn <- function(x) -#' return( 100 * (x[2] - x[1] * x[1])^2 + (1 - x[1])^2 ) +#' ## Rosenbrock Banana objective function +#' +#' rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} #' -#' x0 <- c( -1.2, 1 ) -#' lb <- c( -3, -3 ) -#' ub <- c( 3, 3 ) +#' x0 <- c(-1.2, 1) +#' lb <- c(-3, -3) +#' ub <- c(3, 3) #' -#' stogo(x0 = x0, fn = fn, lower = lb, upper = ub) +#' ## The function as written above has a minimum of 0 at (1, 1) #' +#' stogo(x0 = x0, fn = rbf, lower = lb, upper = ub) +#' + stogo <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, maxeval = 10000, xtol_rel = 1e-6, randomized = FALSE, - nl.info = FALSE, ...) { # control = list() + nl.info = FALSE, ...) { - # opts <- nl.opts(control) opts <- list() opts$maxeval <- maxeval opts$xtol_rel <- xtol_rel @@ -93,39 +96,42 @@ stogo <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, list(par = S0$solution, value = S0$objective, iter = S0$iterations, convergence = S0$status, message = S0$message) - } -#-- Supports nonlinear constraints: quite inaccurate! -------------- ISRES --- - +#---------------------------------ISRES----------------------------------------- +# ISRES supports nonlinear constraints but mat be quite inaccurate! #' Improved Stochastic Ranking Evolution Strategy #' -#' The Improved Stochastic Ranking Evolution Strategy (ISRES) algorithm for -#' nonlinearly constrained global optimization (or at least semi-global: -#' although it has heuristics to escape local optima. +#' The Improved Stochastic Ranking Evolution Strategy (\acronym{ISRES}) is an +#' algorithm for nonlinearly constrained global optimization, or at least +#' semi-global, although it has heuristics to escape local optima. #' -#' The evolution strategy is based on a combination of a mutation rule (with a -#' log-normal step-size update and exponential smoothing) and differential -#' variation (a Nelder-Mead-like update rule). The fitness ranking is simply +#' The evolution strategy is based on a combination of a mutation rule---with a +#' log-normal step-size update and exponential smoothing---and differential +#' variation---a Nelder-Mead-like update rule). The fitness ranking is simply #' via the objective function for problems without nonlinear constraints, but #' when nonlinear constraints are included the stochastic ranking proposed by #' Runarsson and Yao is employed. #' #' This method supports arbitrary nonlinear inequality and equality constraints -#' in addition to the bound constraints. +#' in addition to the bounds constraints. #' #' @param x0 initial point for searching the optimum. #' @param fn objective function that is to be minimized. #' @param lower,upper lower and upper bound constraints. #' @param hin function defining the inequality constraints, that is -#' \code{hin>=0} for all components. -#' @param heq function defining the equality constraints, that is \code{heq==0} +#' \code{hin <= 0} for all components. +#' @param heq function defining the equality constraints, that is \code{heq = 0} #' for all components. #' @param maxeval maximum number of function evaluations. #' @param pop.size population size. #' @param xtol_rel stopping criterion for relative change reached. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -141,9 +147,9 @@ stogo <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, #' #' @author Hans W. Borchers #' -#' @note The initial population size for CRS defaults to \code{20x(n+1)} in -#' \code{n} dimensions, but this can be changed; the initial population must be -#' at least \code{n+1}. +#' @note The initial population size for CRS defaults to \eqn{20x(n+1)} in +#' \eqn{n} dimensions, but this can be changed. The initial population must be +#' at least \eqn{n+1}. #' #' @references Thomas Philip Runarsson and Xin Yao, ``Search biases in #' constrained evolutionary optimization,'' IEEE Trans. on Systems, Man, and @@ -152,22 +158,34 @@ stogo <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, #' #' @examples #' -#' ### Rosenbrock Banana objective function -#' fn <- function(x) -#' return( 100 * (x[2] - x[1] * x[1])^2 + (1 - x[1])^2 ) +#' ## Rosenbrock Banana objective function +#' +#' rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} +#' +#' x0 <- c(-1.2, 1) +#' lb <- c(-3, -3) +#' ub <- c(3, 3) #' -#' x0 <- c( -1.2, 1 ) -#' lb <- c( -3, -3 ) -#' ub <- c( 3, 3 ) +#' ## The function as written above has a minimum of 0 at (1, 1) #' -#' isres(x0 = x0, fn = fn, lower = lb, upper = ub) +#' isres(x0 = x0, fn = rbf, lower = lb, upper = ub) +#' +#' ## Now subject to the inequality that x[1] + x[2] <= 1.5 +#' +#' hin <- function(x) {x[1] + x[2] - 1.5} +#' +#' S <- isres(x0 = x0, fn = rbf, hin = hin, lower = lb, upper = ub, +#' maxeval = 2e5L, deprecatedBehavior = FALSE) +#' +#' S +#' +#' sum(S$par) #' isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, pop.size = 20 * (length(x0) + 1), xtol_rel = 1e-6, - nl.info = FALSE, ...) { + nl.info = FALSE, deprecatedBehavior = TRUE, ...) { - #opts <- nl.opts(control) opts <- list() opts$maxeval <- maxeval opts$xtol_rel <- xtol_rel @@ -178,14 +196,13 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, fn <- function(x) fun(x, ...) if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - - .hin <- match.fun(hin) - hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } if (!is.null(heq)) { @@ -206,20 +223,19 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, convergence = S0$status, message = S0$message) } - -#-- ------------------------------------------------------------------ CRS --- +#- --------------------------------- CRS -------------------------------------- #' Controlled Random Search #' -#' The Controlled Random Search (CRS) algorithm (and in particular, the CRS2 -#' variant) with the `local mutation' modification. +#' The Controlled Random Search (\acronym{CRS}) algorithm (and in particular, +#' the \acronym{CRS2} variant) with the `local mutation' modification. #' -#' The CRS algorithms are sometimes compared to genetic algorithms, in that -#' they start with a random population of points, and randomly evolve these +#' The \acronym{CRS} algorithms are sometimes compared to genetic algorithms, in +#' that they start with a random population of points, and randomly evolve these #' points by heuristic rules. In this case, the evolution somewhat resembles a #' randomized Nelder-Mead algorithm. #' -#' The published results for CRS seem to be largely empirical. +#' The published results for \acronym{CRS} seem to be largely empirical. #' #' @param x0 initial point for searching the optimum. #' @param fn objective function that is to be minimized. @@ -228,7 +244,7 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, #' @param pop.size population size. #' @param ranseed prescribe seed for random number generator. #' @param xtol_rel stopping criterion for relative change reached. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -237,14 +253,14 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, #' \item{iter}{number of (outer) iterations, see \code{maxeval}.} #' \item{convergence}{integer code indicating successful completion (> 0) #' or a possible error number (< 0).} -#' \item{message}{character string produced by NLopt and giving additional -#' information.} +#' \item{message}{character string produced by \acronym{NLopt} and giving +#' additional information.} #' #' @export crs2lm #' -#' @note The initial population size for CRS defaults to \code{10x(n+1)} in -#' \code{n} dimensions, but this can be changed; the initial population must be -#' at least \code{n+1}. +#' @note The initial population size for CRS defaults to \eqn{10x(n+1)} in +#' \eqn{n} dimensions, but this can be changed. The initial population must be +#' at least \eqn{n+1}. #' #' @references W. L. Price, ``Global optimization by controlled random #' search,'' J. Optim. Theory Appl. 40 (3), p. 333-348 (1983). @@ -255,45 +271,47 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, #' #' @examples #' -#' ### Minimize the Hartmann6 function -#' hartmann6 <- function(x) { -#' n <- length(x) -#' a <- c(1.0, 1.2, 3.0, 3.2) -#' A <- matrix(c(10.0, 0.05, 3.0, 17.0, -#' 3.0, 10.0, 3.5, 8.0, -#' 17.0, 17.0, 1.7, 0.05, -#' 3.5, 0.1, 10.0, 10.0, -#' 1.7, 8.0, 17.0, 0.1, -#' 8.0, 14.0, 8.0, 14.0), nrow=4, ncol=6) -#' B <- matrix(c(.1312,.2329,.2348,.4047, -#' .1696,.4135,.1451,.8828, -#' .5569,.8307,.3522,.8732, -#' .0124,.3736,.2883,.5743, -#' .8283,.1004,.3047,.1091, -#' .5886,.9991,.6650,.0381), nrow=4, ncol=6) -#' fun <- 0.0 +#' ## Minimize the Hartmann 6-Dimensional function +#' ## See https://www.sfu.ca/~ssurjano/hart6.html +#' +#' a <- c(1.0, 1.2, 3.0, 3.2) +#' A <- matrix(c(10, 0.05, 3, 17, +#' 3, 10, 3.5, 8, +#' 17, 17, 1.7, 0.05, +#' 3.5, 0.1, 10, 10, +#' 1.7, 8, 17, 0.1, +#' 8, 14, 8, 14), nrow = 4) +#' +#' B <- matrix(c(.1312, .2329, .2348, .4047, +#' .1696, .4135, .1451, .8828, +#' .5569, .8307, .3522, .8732, +#' .0124, .3736, .2883, .5743, +#' .8283, .1004, .3047, .1091, +#' .5886, .9991, .6650, .0381), nrow = 4) +#' +#' hartmann6 <- function(x, a, A, B) { +#' fun <- 0 #' for (i in 1:4) { -#' fun <- fun - a[i] * exp(-sum(A[i,]*(x-B[i,])^2)) +#' fun <- fun - a[i] * exp(-sum(A[i, ] * (x - B[i, ]) ^ 2)) #' } -#' return(fun) +#' +#' fun #' } #' +#' ## The function has a global minimum of -3.32237 at +#' ## (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573) +#' #' S <- crs2lm(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), -#' nl.info = TRUE, xtol_rel=1e-8, maxeval = 10000) -#' ## Number of Iterations....: 5106 -#' ## Termination conditions: maxeval: 10000 xtol_rel: 1e-08 -#' ## Number of inequality constraints: 0 -#' ## Number of equality constraints: 0 -#' ## Optimal value of objective function: -3.32236801141551 -#' ## Optimal value of controls: 0.2016895 0.1500107 0.476874 0.2753324 -#' ## 0.3116516 0.6573005 +#' ranseed = 10L, nl.info = TRUE, xtol_rel=1e-8, maxeval = 10000, +#' a = a, A = A, B = B) +#' +#' S #' crs2lm <- function(x0, fn, lower, upper, maxeval = 10000, pop.size = 10 * (length(x0) + 1), ranseed = NULL, xtol_rel = 1e-6, nl.info = FALSE, ...) { - #opts <- nl.opts(control) opts <- list() opts$maxeval <- maxeval opts$xtol_rel <- xtol_rel diff --git a/inst/tinytest/test-wrapper-global.R b/inst/tinytest/test-wrapper-global.R index 6d13c5ea..8e438529 100644 --- a/inst/tinytest/test-wrapper-global.R +++ b/inst/tinytest/test-wrapper-global.R @@ -14,41 +14,45 @@ library(nloptr) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") ## Functions for the algorithms -fr <- function(x) {100 * (x[2L] - x[1L] * x[1L]) ^ 2 + (1 - x[1L]) ^ 2} -gr <- function(x) nl.grad(x, fr) +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} +## Analytic gradient +gr <- function(x) {c(-2 * (1 - x[1]) - 400 * x[1] * (x[2] - x[1] ^ 2), + 200 * (x[2] - x[1] ^ 2))} -hin <- function(x) -0.25 * x[1L] ^ 2 - x[2L] ^ 2 + 1 # hin >= 0 -heq <- function(x) x[1L] - 2 * x[2L] + 1 # heq == 0 +hin <- function(x) 0.25 * x[1L] ^ 2 + x[2L] ^ 2 - 1 # hin <= 0 +heq <- function(x) x[1L] - 2 * x[2L] + 1 # heq = 0 hinjac <- function(x) nl.jacobian(x, hin) heqjac <- function(x) nl.jacobian(x, heq) -hin2 <- function(x) -hin(x) # hin2 <= 0 needed for nloptr -hinjac2 <- function(x) nl.jacobian(x, hin2) # hin2 <= 0 needed for nloptr - -hartmann6 <- function(x) { - a <- c(1, 1.2, 3, 3.2) - A <- matrix(c(10, 0.05, 3, 17, - 3, 10, 3.5, 8, - 17, 17, 1.7, 0.05, - 3.5, 0.1, 10, 10, - 1.7, 8, 17, 0.1, - 8, 14, 8, 14), - nrow = 4L, ncol = 6L) - B <- matrix(c(0.1312, 0.2329, 0.2348, 0.4047, - 0.1696, 0.4135, 0.1451, 0.8828, - 0.5569, 0.8307, 0.3522, 0.8732, - 0.0124, 0.3736, 0.2883, 0.5743, - 0.8283, 0.1004, 0.3047, 0.1091, - 0.5886, 0.9991, 0.6650, 0.0381), - nrow = 4L, ncol = 6L) +hin2 <- function(x) -hin(x) # Needed to test old behavior +hinjac2 <- function(x) nl.jacobian(x, hin2) # Needed to test old behavior + +# Take these outside the function since they are unchanging; just pass them! +a <- c(1.0, 1.2, 3.0, 3.2) +A <- matrix(c(10, 0.05, 3, 17, + 3, 10, 3.5, 8, + 17, 17, 1.7, 0.05, + 3.5, 0.1, 10, 10, + 1.7, 8, 17, 0.1, + 8, 14, 8, 14), nrow = 4) + +B <- matrix(c(.1312, .2329, .2348, .4047, + .1696, .4135, .1451, .8828, + .5569, .8307, .3522, .8732, + .0124, .3736, .2883, .5743, + .8283, .1004, .3047, .1091, + .5886, .9991, .6650, .0381), nrow = 4) + +hartmann6 <- function(x, a, A, B) { fun <- 0 for (i in 1:4) { fun <- fun - a[i] * exp(-sum(A[i, ] * (x - B[i, ]) ^ 2)) } + fun } @@ -59,17 +63,17 @@ ub <- c(3, 3) ## StoGo # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. -expect_stdout(stogo(x0, fr, lower = lb, upper = ub, nl.info = TRUE), +expect_stdout(stogo(x0, rbf, lower = lb, upper = ub, nl.info = TRUE), "Call:", fixed = TRUE) -expect_silent(stogo(x0, fr, lower = lb, upper = ub)) +expect_silent(stogo(x0, rbf, lower = lb, upper = ub)) # No passed gradient; Randomized: FALSE -stogoTest <- stogo(x0, fr, lower = lb, upper = ub) +stogoTest <- stogo(x0, rbf, lower = lb, upper = ub) stogoControl <- nloptr(x0 = x0, - eval_f = fr, - eval_grad_f = function(x) nl.grad(x, fr), + eval_f = rbf, + eval_grad_f = function(x) nl.grad(x, rbf), lb = lb, ub = ub, opts = list(algorithm = "NLOPT_GD_STOGO", @@ -82,10 +86,10 @@ expect_identical(stogoTest$convergence, stogoControl$status) expect_identical(stogoTest$message, stogoControl$message) # Passed gradient; Randomized: FALSE -stogoTest <- stogo(x0, fr, gr, lb, ub) +stogoTest <- stogo(x0, rbf, gr, lb, ub) stogoControl <- nloptr(x0 = x0, - eval_f = fr, + eval_f = rbf, eval_grad_f = gr, lb = lb, ub = ub, @@ -99,10 +103,10 @@ expect_identical(stogoTest$convergence, stogoControl$status) expect_identical(stogoTest$message, stogoControl$message) # Passed gradient; Randomized: TRUE -stogoTest <- stogo(x0, fr, gr, lb, ub, randomized = TRUE) +stogoTest <- stogo(x0, rbf, gr, lb, ub, randomized = TRUE) stogoControl <- nloptr(x0 = x0, - eval_f = fr, + eval_f = rbf, eval_grad_f = gr, lb = lb, ub = ub, @@ -116,23 +120,20 @@ expect_identical(stogoTest$convergence, stogoControl$status) expect_identical(stogoTest$message, stogoControl$message) ## ISRES -# Test message -expect_message(isres(x0, fr, lower = lb, upper = ub, hin = hin), ineqMess) - # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. -expect_stdout(isres(x0, fr, lb, ub, nl.info = TRUE), "Call:", fixed = TRUE) +expect_stdout(isres(x0, rbf, lb, ub, nl.info = TRUE), "Call:", fixed = TRUE) -expect_silent(isres(x0, fr, lb, ub)) +expect_silent(isres(x0, rbf, lb, ub)) # As ISRES is stochastic, more iterations and a much looser tolerance is needed. # Also, iteration count will almost surely not be equal. # No passed hin or heq -isresTest <- isres(x0, fr, lb, ub, maxeval = 2e4L) +isresTest <- isres(x0, rbf, lb, ub, maxeval = 2e4L) isresControl <- nloptr(x0 = x0, - eval_f = fr, + eval_f = rbf, lb = lb, ub = ub, opts = list(algorithm = "NLOPT_GN_ISRES", @@ -147,10 +148,10 @@ expect_identical(stogoTest$message, stogoControl$message) # Passing heq # Need a ridiculously loose tolerance on ISRES now. # (AA: 2023-02-06) -isresTest <- isres(x0, fr, lb, ub, heq = heq, maxeval = 2e4L) +isresTest <- isres(x0, rbf, lb, ub, heq = heq, maxeval = 2e4L) isresControl <- nloptr(x0 = x0, - eval_f = fr, + eval_f = rbf, eval_g_eq = heq, lb = lb, ub = ub, @@ -163,6 +164,40 @@ isresControl <- nloptr(x0 = x0, expect_identical(stogoTest$convergence, stogoControl$status) expect_identical(stogoTest$message, stogoControl$message) +# Passing hin +isresControl <- nloptr(x0 = x0, + eval_f = rbf, + eval_g_ineq = hin, + lb = lb, + ub = ub, + opts = list(algorithm = "NLOPT_GN_ISRES", + maxeval = 2e4L, xtol_rel = 1e-6, + population = 60)) + +expect_silent(isres(x0, rbf, lb, ub, hin = hin, maxeval = 2e4L, + deprecatedBehavior = FALSE)) + +isresTest <- isres(x0, rbf, lb, ub, hin = hin, maxeval = 2e4L, + deprecatedBehavior = FALSE) + +expect_equal(isresTest$par, isresControl$solution, tolerance = 1e-4) +expect_equal(isresTest$value, isresControl$objective, tolerance = 1e-4) +expect_identical(stogoTest$convergence, stogoControl$status) +expect_identical(stogoTest$message, stogoControl$message) + +# Test deprecated message +expect_message(isres(x0, rbf, lower = lb, upper = ub, hin = hin2, + maxeval = 2e4L), depMess) + +# Test deprecated behavior +isresTest <- suppressMessages(isres(x0, rbf, lb, ub, hin = hin2, + maxeval = 2e4L)) + +expect_equal(isresTest$par, isresControl$solution, tolerance = 1e-4) +expect_equal(isresTest$value, isresControl$objective, tolerance = 1e-4) +expect_identical(stogoTest$convergence, stogoControl$status) +expect_identical(stogoTest$message, stogoControl$message) + ## CRS2LM # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. @@ -170,17 +205,22 @@ x0 <- lb <- rep(0, 6L) ub <- rep(1, 6L) expect_stdout(crs2lm(x0 = x0, hartmann6, lower = lb, upper = ub, - nl.info = TRUE), "Call:", fixed = TRUE) + nl.info = TRUE, a = a, A = A, B = B), + "Call:", fixed = TRUE) -expect_silent(crs2lm(x0 = x0, hartmann6, lower = lb, upper = ub)) +expect_silent(crs2lm(x0 = x0, hartmann6, lower = lb, upper = ub, + a = a, A = A, B = B)) crs2lmTest <- crs2lm(x0 = x0, hartmann6, lower = lb, upper = ub, ranseed = 43L, - xtol_rel = 1e-8, maxeval = 100L) + xtol_rel = 1e-8, maxeval = 100L, a = a, A = A, B = B) crs2lmControl <- nloptr(x0 = x0, eval_f = hartmann6, lb = lb, ub = ub, + a = a, + A = A, + B = B, opts = list(algorithm = "NLOPT_GN_CRS2_LM", xtol_rel = 1e-8, maxeval = 100L, diff --git a/man/crs2lm.Rd b/man/crs2lm.Rd index 95514ad0..c33c6db0 100644 --- a/man/crs2lm.Rd +++ b/man/crs2lm.Rd @@ -32,7 +32,7 @@ crs2lm( \item{xtol_rel}{stopping criterion for relative change reached.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{...}{additional arguments passed to the function.} } @@ -43,60 +43,63 @@ List with components: \item{iter}{number of (outer) iterations, see \code{maxeval}.} \item{convergence}{integer code indicating successful completion (> 0) or a possible error number (< 0).} -\item{message}{character string produced by NLopt and giving additional -information.} +\item{message}{character string produced by \acronym{NLopt} and giving +additional information.} } \description{ -The Controlled Random Search (CRS) algorithm (and in particular, the CRS2 -variant) with the `local mutation' modification. +The Controlled Random Search (\acronym{CRS}) algorithm (and in particular, +the \acronym{CRS2} variant) with the `local mutation' modification. } \details{ -The CRS algorithms are sometimes compared to genetic algorithms, in that -they start with a random population of points, and randomly evolve these +The \acronym{CRS} algorithms are sometimes compared to genetic algorithms, in +that they start with a random population of points, and randomly evolve these points by heuristic rules. In this case, the evolution somewhat resembles a randomized Nelder-Mead algorithm. -The published results for CRS seem to be largely empirical. +The published results for \acronym{CRS} seem to be largely empirical. } \note{ -The initial population size for CRS defaults to \code{10x(n+1)} in -\code{n} dimensions, but this can be changed; the initial population must be -at least \code{n+1}. +The initial population size for CRS defaults to \eqn{10x(n+1)} in +\eqn{n} dimensions, but this can be changed. The initial population must be +at least \eqn{n+1}. } \examples{ -### Minimize the Hartmann6 function -hartmann6 <- function(x) { - n <- length(x) - a <- c(1.0, 1.2, 3.0, 3.2) - A <- matrix(c(10.0, 0.05, 3.0, 17.0, - 3.0, 10.0, 3.5, 8.0, - 17.0, 17.0, 1.7, 0.05, - 3.5, 0.1, 10.0, 10.0, - 1.7, 8.0, 17.0, 0.1, - 8.0, 14.0, 8.0, 14.0), nrow=4, ncol=6) - B <- matrix(c(.1312,.2329,.2348,.4047, - .1696,.4135,.1451,.8828, - .5569,.8307,.3522,.8732, - .0124,.3736,.2883,.5743, - .8283,.1004,.3047,.1091, - .5886,.9991,.6650,.0381), nrow=4, ncol=6) - fun <- 0.0 +## Minimize the Hartmann 6-Dimensional function +## See https://www.sfu.ca/~ssurjano/hart6.html + +a <- c(1.0, 1.2, 3.0, 3.2) +A <- matrix(c(10, 0.05, 3, 17, + 3, 10, 3.5, 8, + 17, 17, 1.7, 0.05, + 3.5, 0.1, 10, 10, + 1.7, 8, 17, 0.1, + 8, 14, 8, 14), nrow = 4) + +B <- matrix(c(.1312, .2329, .2348, .4047, + .1696, .4135, .1451, .8828, + .5569, .8307, .3522, .8732, + .0124, .3736, .2883, .5743, + .8283, .1004, .3047, .1091, + .5886, .9991, .6650, .0381), nrow = 4) + +hartmann6 <- function(x, a, A, B) { + fun <- 0 for (i in 1:4) { - fun <- fun - a[i] * exp(-sum(A[i,]*(x-B[i,])^2)) + fun <- fun - a[i] * exp(-sum(A[i, ] * (x - B[i, ]) ^ 2)) } - return(fun) + + fun } +## The function has a global minimum of -3.32237 at +## (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573) + S <- crs2lm(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), - nl.info = TRUE, xtol_rel=1e-8, maxeval = 10000) -## Number of Iterations....: 5106 -## Termination conditions: maxeval: 10000 xtol_rel: 1e-08 -## Number of inequality constraints: 0 -## Number of equality constraints: 0 -## Optimal value of objective function: -3.32236801141551 -## Optimal value of controls: 0.2016895 0.1500107 0.476874 0.2753324 -## 0.3116516 0.6573005 + ranseed = 10L, nl.info = TRUE, xtol_rel=1e-8, maxeval = 10000, + a = a, A = A, B = B) + +S } \references{ diff --git a/man/isres.Rd b/man/isres.Rd index 454d5847..84f24892 100644 --- a/man/isres.Rd +++ b/man/isres.Rd @@ -15,6 +15,7 @@ isres( pop.size = 20 * (length(x0) + 1), xtol_rel = 1e-06, nl.info = FALSE, + deprecatedBehavior = TRUE, ... ) } @@ -26,9 +27,9 @@ isres( \item{lower, upper}{lower and upper bound constraints.} \item{hin}{function defining the inequality constraints, that is -\code{hin>=0} for all components.} +\code{hin <= 0} for all components.} -\item{heq}{function defining the equality constraints, that is \code{heq==0} +\item{heq}{function defining the equality constraints, that is \code{heq = 0} for all components.} \item{maxeval}{maximum number of function evaluations.} @@ -37,7 +38,12 @@ for all components.} \item{xtol_rel}{stopping criterion for relative change reached.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} + +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} \item{...}{additional arguments passed to the function.} } @@ -52,37 +58,50 @@ or a possible error number (< 0).} information.} } \description{ -The Improved Stochastic Ranking Evolution Strategy (ISRES) algorithm for -nonlinearly constrained global optimization (or at least semi-global: -although it has heuristics to escape local optima. +The Improved Stochastic Ranking Evolution Strategy (\acronym{ISRES}) is an +algorithm for nonlinearly constrained global optimization, or at least +semi-global, although it has heuristics to escape local optima. } \details{ -The evolution strategy is based on a combination of a mutation rule (with a -log-normal step-size update and exponential smoothing) and differential -variation (a Nelder-Mead-like update rule). The fitness ranking is simply +The evolution strategy is based on a combination of a mutation rule---with a +log-normal step-size update and exponential smoothing---and differential +variation---a Nelder-Mead-like update rule). The fitness ranking is simply via the objective function for problems without nonlinear constraints, but when nonlinear constraints are included the stochastic ranking proposed by Runarsson and Yao is employed. This method supports arbitrary nonlinear inequality and equality constraints -in addition to the bound constraints. +in addition to the bounds constraints. } \note{ -The initial population size for CRS defaults to \code{20x(n+1)} in -\code{n} dimensions, but this can be changed; the initial population must be -at least \code{n+1}. +The initial population size for CRS defaults to \eqn{20x(n+1)} in +\eqn{n} dimensions, but this can be changed. The initial population must be +at least \eqn{n+1}. } \examples{ -### Rosenbrock Banana objective function -fn <- function(x) - return( 100 * (x[2] - x[1] * x[1])^2 + (1 - x[1])^2 ) +## Rosenbrock Banana objective function + +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} + +x0 <- c(-1.2, 1) +lb <- c(-3, -3) +ub <- c(3, 3) + +## The function as written above has a minimum of 0 at (1, 1) + +isres(x0 = x0, fn = rbf, lower = lb, upper = ub) + +## Now subject to the inequality that x[1] + x[2] <= 1.5 + +hin <- function(x) {x[1] + x[2] - 1.5} + +S <- isres(x0 = x0, fn = rbf, hin = hin, lower = lb, upper = ub, + maxeval = 2e5L, deprecatedBehavior = FALSE) -x0 <- c( -1.2, 1 ) -lb <- c( -3, -3 ) -ub <- c( 3, 3 ) +S -isres(x0 = x0, fn = fn, lower = lb, upper = ub) +sum(S$par) } \references{ diff --git a/man/stogo.Rd b/man/stogo.Rd index 02e5e3c0..3727337e 100644 --- a/man/stogo.Rd +++ b/man/stogo.Rd @@ -32,7 +32,7 @@ stogo( \item{randomized}{logical; shall a randomizing variant be used?} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{...}{additional arguments passed to the function.} } @@ -43,34 +43,32 @@ List with components: \item{iter}{number of (outer) iterations, see \code{maxeval}.} \item{convergence}{integer code indicating successful completion (> 0) or a possible error number (< 0).} -\item{message}{character string produced by NLopt and giving additional -information.} +\item{message}{character string produced by \acronym{NLopt} and giving +additional information.} } \description{ -StoGO is a global optimization algorithm that works by systematically -dividing the search space into smaller hyper-rectangles. -} -\details{ -StoGO is a global optimization algorithm that works by systematically -dividing the search space (which must be bound-constrained) into smaller -hyper-rectangles via a branch-and-bound technique, and searching them by a -gradient-based local-search algorithm (a BFGS variant), optionally including -some randomness. +\acronym{StoGO} is a global optimization algorithm that works by +systematically dividing the search space---which must be +bound-constrained---into smaller hyper-rectangles via a branch-and-bound +technique, and searching them using a gradient-based local-search algorithm +(a \acronym{BFGS} variant), optionally including some randomness. } \note{ -Only bound-constrained problems are supported by this algorithm. +Only bounds-constrained problems are supported by this algorithm. } \examples{ -### Rosenbrock Banana objective function -fn <- function(x) - return( 100 * (x[2] - x[1] * x[1])^2 + (1 - x[1])^2 ) +## Rosenbrock Banana objective function + +rbf <- function(x) {(1 - x[1]) ^ 2 + 100 * (x[2] - x[1] ^ 2) ^ 2} + +x0 <- c(-1.2, 1) +lb <- c(-3, -3) +ub <- c(3, 3) -x0 <- c( -1.2, 1 ) -lb <- c( -3, -3 ) -ub <- c( 3, 3 ) +## The function as written above has a minimum of 0 at (1, 1) -stogo(x0 = x0, fn = fn, lower = lb, upper = ub) +stogo(x0 = x0, fn = rbf, lower = lb, upper = ub) } \references{ From 739f7c8585d5b02997bde48d4b93be6b362015d5 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 21:40:58 -0400 Subject: [PATCH 08/15] Clean up Hartmann 6. --- R/mlsl.R | 116 +++++++++++++++++++++++++++------------------------- man/mlsl.Rd | 104 ++++++++++++++++++++++++---------------------- 2 files changed, 114 insertions(+), 106 deletions(-) diff --git a/R/mlsl.R b/R/mlsl.R index 678a88a4..124d9f72 100644 --- a/R/mlsl.R +++ b/R/mlsl.R @@ -8,40 +8,43 @@ # Wrapper to solve optimization problem using Multi-Level Single-Linkage. # # CHANGELOG: -# 2014-05-05: Replaced cat by warning. -# 2023-02-09: Cleanup and tweaks for safety and efficiency (Avraham Adler) -# Question, should passing a non-Gradient solver fail directly? -# It will anyway (Avraham Adler) +# +# 2014-05-05: Replaced cat by warning. +# 2023-02-09: Cleanup and tweaks for safety and efficiency. (Avraham Adler) +# Question, should passing a non-Gradient solver fail directly? It will +# anyway. (Avraham Adler) +# 2024-06-04: Cleaned up the Hartmann 6 example. (Avraham Adler) # #' Multi-level Single-linkage #' -#' The ``Multi-Level Single-Linkage'' (MLSL) algorithm for global optimization -#' searches by a sequence of local optimizations from random starting points. -#' A modification of MLSL is included using a low-discrepancy sequence (LDS) -#' instead of pseudorandom numbers. +#' The \dQuote{Multi-Level Single-Linkage} (\acronym{MLSL}) algorithm for global +#' optimization searches by a sequence of local optimizations from random +#' starting points. A modification of \acronym{MLSL} is included using a +#' low-discrepancy sequence (\acronym{LDS}) instead of pseudorandom numbers. #' -#' MLSL is a `multistart' algorithm: it works by doing a sequence of local -#' optimizations---using some other local optimization algorithm---from random -#' or low-discrepancy starting points. MLSL is distinguished, however, by a -#' `clustering' heuristic that helps it to avoid repeated searches of the same -#' local optima and also has some theoretical guarantees of finding all local -#' optima in a finite number of local minimizations. +#' \acronym{MLSL} is a \sQuote{multistart} algorithm: it works by doing a +#' sequence of local optimizations---using some other local optimization +#' algorithm---from random or low-discrepancy starting points. MLSL is +#' distinguished, however, by a `clustering' heuristic that helps it to avoid +#' repeated searches of the same local optima and also has some theoretical +#' guarantees of finding all local optima in a finite number of local +#' minimizations. #' -#' The local-search portion of MLSL can use any of the other algorithms in -#' NLopt, and, in particular, can use either gradient-based or derivative-free -#' algorithms. For this wrapper only gradient-based \code{L-BFGS} is available -#' as local method. +#' The local-search portion of \acronym{MLSL} can use any of the other +#' algorithms in \acronym{NLopt}, and, in particular, can use either +#' gradient-based or derivative-free algorithms. For this wrapper only +#' gradient-based \acronym{LBFGS} is available as local method. #' #' @param x0 initial point for searching the optimum. #' @param fn objective function that is to be minimized. #' @param gr gradient of function \code{fn}; will be calculated numerically if #' not specified. -#' @param lower,upper lower and upper bound constraints. +#' @param lower, upper lower and upper bound constraints. #' @param local.method only \code{BFGS} for the moment. #' @param low.discrepancy logical; shall a low discrepancy variation be used. -#' @param nl.info logical; shall the original NLopt info been shown. +#' @param nl.info logical; shall the original \acronym{NLopt} info be shown. #' @param control list of options, see \code{nl.opts} for help. #' @param ... additional arguments passed to the function. #' @@ -51,62 +54,63 @@ #' \item{iter}{number of (outer) iterations, see \code{maxeval}.} #' \item{convergence}{integer code indicating successful completion (> 0) #' or a possible error number (< 0).} -#' \item{message}{character string produced by NLopt and giving additional -#' information.} +#' \item{message}{character string produced by \acronym{NLopt} and giving +#' additional information.} #' #' @export mlsl #' #' @author Hans W. Borchers #' #' @note If you don't set a stopping tolerance for your local-optimization -#' algorithm, MLSL defaults to \code{ftol_rel = 1e-15} and +#' algorithm, \acronym{MLSL} defaults to \code{ftol_rel = 1e-15} and #' \code{xtol_rel = 1e-7} for the local searches. #' #' @seealso \code{\link{direct}} #' -#' @references A. H. G. Rinnooy Kan and G. T. Timmer, ``Stochastic global -#' optimization methods'' Mathematical Programming, vol. 39, p. 27-78 (1987). +#' @references A. H. G. Rinnooy Kan and G. T. Timmer, \dQuote{Stochastic global +#' optimization methods} Mathematical Programming, vol. 39, p. 27-78 (1987). #' -#' Sergei Kucherenko and Yury Sytsko, ``Application of deterministic -#' low-discrepancy sequences in global optimization,'' Computational +#' Sergei Kucherenko and Yury Sytsko, \dQuote{Application of deterministic +#' low-discrepancy sequences in global optimization}, Computational #' Optimization and Applications, vol. 30, p. 297-318 (2005). #' #' @examples #' -#' ### Minimize the Hartmann6 function -#' hartmann6 <- function(x) { -#' n <- length(x) -#' a <- c(1.0, 1.2, 3.0, 3.2) -#' A <- matrix(c(10.0, 0.05, 3.0, 17.0, -#' 3.0, 10.0, 3.5, 8.0, -#' 17.0, 17.0, 1.7, 0.05, -#' 3.5, 0.1, 10.0, 10.0, -#' 1.7, 8.0, 17.0, 0.1, -#' 8.0, 14.0, 8.0, 14.0), nrow=4, ncol=6) -#' B <- matrix(c(.1312,.2329,.2348,.4047, -#' .1696,.4135,.1451,.8828, -#' .5569,.8307,.3522,.8732, -#' .0124,.3736,.2883,.5743, -#' .8283,.1004,.3047,.1091, -#' .5886,.9991,.6650,.0381), nrow=4, ncol=6) -#' fun <- 0.0 +#' ## Minimize the Hartmann 6-Dimensional function +#' ## See https://www.sfu.ca/~ssurjano/hart6.html +#' +#' a <- c(1.0, 1.2, 3.0, 3.2) +#' A <- matrix(c(10, 0.05, 3, 17, +#' 3, 10, 3.5, 8, +#' 17, 17, 1.7, 0.05, +#' 3.5, 0.1, 10, 10, +#' 1.7, 8, 17, 0.1, +#' 8, 14, 8, 14), nrow = 4) +#' +#' B <- matrix(c(.1312, .2329, .2348, .4047, +#' .1696, .4135, .1451, .8828, +#' .5569, .8307, .3522, .8732, +#' .0124, .3736, .2883, .5743, +#' .8283, .1004, .3047, .1091, +#' .5886, .9991, .6650, .0381), nrow = 4) +#' +#' hartmann6 <- function(x, a, A, B) { +#' fun <- 0 #' for (i in 1:4) { -#' fun <- fun - a[i] * exp(-sum(A[i,]*(x-B[i,])^2)) +#' fun <- fun - a[i] * exp(-sum(A[i, ] * (x - B[i, ]) ^ 2)) #' } -#' return(fun) +#' +#' fun #' } -#' S <- mlsl(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), -#' nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 1000)) #' -#' ## Number of Iterations....: 1000 -#' ## Termination conditions: -#' ## stopval: -Inf, xtol_rel: 1e-08, maxeval: 1000, ftol_rel: 0, ftol_abs: 0 -#' ## Number of inequality constraints: 0 -#' ## Number of equality constraints: 0 -#' ## Current value of objective function: -3.32236801141552 -#' ## Current value of controls: -#' ## 0.2016895 0.1500107 0.476874 0.2753324 0.3116516 0.6573005 +#' ## The function has a global minimum of -3.32237 at +#' ## (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573) +#' +#' S <- mlsl(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), +#' nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 1000), +#' a = a, A = A, B = B) #' + mlsl <- function(x0, fn, gr = NULL, lower, upper, local.method = "LBFGS", low.discrepancy = TRUE, nl.info = FALSE, control = list(), ...) { diff --git a/man/mlsl.Rd b/man/mlsl.Rd index 0d3af965..42c6e8a0 100644 --- a/man/mlsl.Rd +++ b/man/mlsl.Rd @@ -25,13 +25,13 @@ mlsl( \item{gr}{gradient of function \code{fn}; will be calculated numerically if not specified.} -\item{lower, upper}{lower and upper bound constraints.} +\item{lower, }{upper lower and upper bound constraints.} \item{local.method}{only \code{BFGS} for the moment.} \item{low.discrepancy}{logical; shall a low discrepancy variation be used.} -\item{nl.info}{logical; shall the original NLopt info been shown.} +\item{nl.info}{logical; shall the original \acronym{NLopt} info be shown.} \item{control}{list of options, see \code{nl.opts} for help.} @@ -44,73 +44,77 @@ List with components: \item{iter}{number of (outer) iterations, see \code{maxeval}.} \item{convergence}{integer code indicating successful completion (> 0) or a possible error number (< 0).} -\item{message}{character string produced by NLopt and giving additional -information.} +\item{message}{character string produced by \acronym{NLopt} and giving +additional information.} } \description{ -The ``Multi-Level Single-Linkage'' (MLSL) algorithm for global optimization -searches by a sequence of local optimizations from random starting points. -A modification of MLSL is included using a low-discrepancy sequence (LDS) -instead of pseudorandom numbers. +The \dQuote{Multi-Level Single-Linkage} (\acronym{MLSL}) algorithm for global +optimization searches by a sequence of local optimizations from random +starting points. A modification of \acronym{MLSL} is included using a +low-discrepancy sequence (\acronym{LDS}) instead of pseudorandom numbers. } \details{ -MLSL is a \verb{multistart' algorithm: it works by doing a sequence of local optimizations---using some other local optimization algorithm---from random or low-discrepancy starting points. MLSL is distinguished, however, by a }clustering' heuristic that helps it to avoid repeated searches of the same -local optima and also has some theoretical guarantees of finding all local -optima in a finite number of local minimizations. - -The local-search portion of MLSL can use any of the other algorithms in -NLopt, and, in particular, can use either gradient-based or derivative-free -algorithms. For this wrapper only gradient-based \code{L-BFGS} is available -as local method. +\acronym{MLSL} is a \sQuote{multistart} algorithm: it works by doing a +sequence of local optimizations---using some other local optimization +algorithm---from random or low-discrepancy starting points. MLSL is +distinguished, however, by a `clustering' heuristic that helps it to avoid +repeated searches of the same local optima and also has some theoretical +guarantees of finding all local optima in a finite number of local +minimizations. + +The local-search portion of \acronym{MLSL} can use any of the other +algorithms in \acronym{NLopt}, and, in particular, can use either +gradient-based or derivative-free algorithms. For this wrapper only +gradient-based \acronym{LBFGS} is available as local method. } \note{ If you don't set a stopping tolerance for your local-optimization -algorithm, MLSL defaults to \code{ftol_rel = 1e-15} and +algorithm, \acronym{MLSL} defaults to \code{ftol_rel = 1e-15} and \code{xtol_rel = 1e-7} for the local searches. } \examples{ -### Minimize the Hartmann6 function -hartmann6 <- function(x) { - n <- length(x) - a <- c(1.0, 1.2, 3.0, 3.2) - A <- matrix(c(10.0, 0.05, 3.0, 17.0, - 3.0, 10.0, 3.5, 8.0, - 17.0, 17.0, 1.7, 0.05, - 3.5, 0.1, 10.0, 10.0, - 1.7, 8.0, 17.0, 0.1, - 8.0, 14.0, 8.0, 14.0), nrow=4, ncol=6) - B <- matrix(c(.1312,.2329,.2348,.4047, - .1696,.4135,.1451,.8828, - .5569,.8307,.3522,.8732, - .0124,.3736,.2883,.5743, - .8283,.1004,.3047,.1091, - .5886,.9991,.6650,.0381), nrow=4, ncol=6) - fun <- 0.0 +## Minimize the Hartmann 6-Dimensional function +## See https://www.sfu.ca/~ssurjano/hart6.html + +a <- c(1.0, 1.2, 3.0, 3.2) +A <- matrix(c(10, 0.05, 3, 17, + 3, 10, 3.5, 8, + 17, 17, 1.7, 0.05, + 3.5, 0.1, 10, 10, + 1.7, 8, 17, 0.1, + 8, 14, 8, 14), nrow = 4) + +B <- matrix(c(.1312, .2329, .2348, .4047, + .1696, .4135, .1451, .8828, + .5569, .8307, .3522, .8732, + .0124, .3736, .2883, .5743, + .8283, .1004, .3047, .1091, + .5886, .9991, .6650, .0381), nrow = 4) + +hartmann6 <- function(x, a, A, B) { + fun <- 0 for (i in 1:4) { - fun <- fun - a[i] * exp(-sum(A[i,]*(x-B[i,])^2)) + fun <- fun - a[i] * exp(-sum(A[i, ] * (x - B[i, ]) ^ 2)) } - return(fun) + + fun } -S <- mlsl(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), - nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 1000)) -## Number of Iterations....: 1000 -## Termination conditions: -## stopval: -Inf, xtol_rel: 1e-08, maxeval: 1000, ftol_rel: 0, ftol_abs: 0 -## Number of inequality constraints: 0 -## Number of equality constraints: 0 -## Current value of objective function: -3.32236801141552 -## Current value of controls: -## 0.2016895 0.1500107 0.476874 0.2753324 0.3116516 0.6573005 +## The function has a global minimum of -3.32237 at +## (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573) + +S <- mlsl(x0 = rep(0, 6), hartmann6, lower = rep(0, 6), upper = rep(1, 6), + nl.info = TRUE, control = list(xtol_rel = 1e-8, maxeval = 1000), + a = a, A = A, B = B) } \references{ -A. H. G. Rinnooy Kan and G. T. Timmer, ``Stochastic global -optimization methods'' Mathematical Programming, vol. 39, p. 27-78 (1987). +A. H. G. Rinnooy Kan and G. T. Timmer, \dQuote{Stochastic global +optimization methods} Mathematical Programming, vol. 39, p. 27-78 (1987). -Sergei Kucherenko and Yury Sytsko, ``Application of deterministic -low-discrepancy sequences in global optimization,'' Computational +Sergei Kucherenko and Yury Sytsko, \dQuote{Application of deterministic +low-discrepancy sequences in global optimization}, Computational Optimization and Applications, vol. 30, p. 297-318 (2005). } \seealso{ From f566837e23ca06f224943f3e12d6f5c7f5e7ac94 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:04:49 -0400 Subject: [PATCH 09/15] MMA: 1) Changed direction of hin/hinjac inequality to <= 2) Updated HS100 example so it is now accurate with analytic gradient 3) Updated documentation and unit tests. --- R/mma.R | 131 +++++++++++++++++-------------- inst/tinytest/test-wrapper-mma.R | 119 +++++++++++++++------------- man/mma.Rd | 104 +++++++++++++----------- 3 files changed, 194 insertions(+), 160 deletions(-) diff --git a/R/mma.R b/R/mma.R index 2f01217a..df58b5ec 100644 --- a/R/mma.R +++ b/R/mma.R @@ -6,21 +6,24 @@ # Date: 27 January 2014 # # Wrapper to solve optimization problem using MMA. +# # CHANGELOG # -# 2023-02-09: Cleanup and tweaks for safety and efficiency (Avraham Adler) +# 2023-02-09: Cleanup and tweaks for safety and efficiency. (Avraham Adler) +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the HS100 +# example. (Avraham Adler) # - #' Method of Moving Asymptotes #' -#' Globally-convergent method-of-moving-asymptotes (MMA) algorithm for +#' Globally-convergent method-of-moving-asymptotes (\acronym{MMA}) algorithm for #' gradient-based local optimization, including nonlinear inequality #' constraints (but not equality constraints). #' -#' This is an improved CCSA ("conservative convex separable approximation") -#' variant of the original MMA algorithm published by Svanberg in 1987, which -#' has become popular for topology optimization. Note: +#' This is an improved \acronym{CCSA} ("conservative convex separable +#' approximation") variant of the original \acronym{MMA} algorithm published by +#' Svanberg in 1987, which has become popular for topology optimization. #' #' @param x0 starting point for searching the optimum. #' @param fn objective function that is to be minimized. @@ -28,11 +31,15 @@ #' not specified. #' @param lower,upper lower and upper bound constraints. #' @param hin function defining the inequality constraints, that is -#' \code{hin>=0} for all components. +#' \code{hin <= 0} for all components. #' @param hinjac Jacobian of function \code{hin}; will be calculated #' numerically if not specified. #' @param nl.info logical; shall the original NLopt info been shown. #' @param control list of options, see \code{nl.opts} for help. +#' @param deprecatedBehavior logical; if \code{TRUE} (default for now), the old +#' behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +#' instead of \eqn{\le 0}. This will be reversed in a future release and +#' eventually removed. #' @param ... additional arguments passed to the function. #' #' @return List with components: @@ -48,66 +55,72 @@ #' #' @author Hans W. Borchers #' -#' @note ``Globally convergent'' does not mean that this algorithm converges to -#' the global optimum; it means that it is guaranteed to converge to some local -#' minimum from any feasible starting point. +#' @note \dQuote{Globally convergent} does not mean that this algorithm +#' converges to the global optimum; rather, it means that the algorithm is +#' guaranteed to converge to some local minimum from any feasible starting +#' point. #' #' @seealso \code{\link{slsqp}} #' -#' @references Krister Svanberg, ``A class of globally convergent optimization -#' methods based on conservative convex separable approximations,'' SIAM J. -#' Optim. 12 (2), p. 555-573 (2002). +#' @references Krister Svanberg, \dQuote{A class of globally convergent +#' optimization methods based on conservative convex separable +#' approximations}, SIAM J. Optim. 12 (2), p. 555-573 (2002). #' #' @examples #' -#' ## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +#' # Solve the Hock-Schittkowski problem no. 100 with analytic gradients +#' # See https://apmonitor.com/wiki/uploads/Apps/hs100.apm +#' #' x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -#' fn.hs100 <- function(x) { -#' (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + -#' 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -#' } -#' hin.hs100 <- function(x) { -#' h <- numeric(4) -#' h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] -#' h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] -#' h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] -#' h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] -#' return(h) +#' fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + +#' 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + +#' x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} +#' +#' hin.hs100 <- function(x) {c( +#' 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +#' 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +#' 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +#' 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - +#' 11 * x[7]) #' } +#' #' gr.hs100 <- function(x) { -#' c( 2 * x[1] - 20, -#' 10 * x[2] - 120, -#' 4 * x[3]^3, -#' 6 * x[4] - 66, -#' 60 * x[5]^5, -#' 14 * x[6] - 4 * x[7] - 10, -#' 4 * x[7]^3 - 4 * x[6] - 8 )} +#' c( 2 * x[1] - 20, +#' 10 * x[2] - 120, +#' 4 * x[3] ^ 3, +#' 6 * x[4] - 66, +#' 60 * x[5] ^ 5, +#' 14 * x[6] - 4 * x[7] - 10, +#' 4 * x[7] ^ 3 - 4 * x[6] - 8) +#' } +#' #' hinjac.hs100 <- function(x) { -#' matrix(c(4*x[1], 12*x[2]^3, 1, 8*x[4], 5, 0, 0, -#' 7, 3, 20*x[3], 1, -1, 0, 0, -#' 23, 2*x[2], 0, 0, 0, 12*x[6], -8, -#' 8*x[1]-3*x[2], 2*x[2]-3*x[1], 4*x[3], 0, 0, 5, -11), 4, 7, byrow=TRUE) +#' matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, +#' 7, 3, 20 * x[3], 1, -1, 0, 0, +#' 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, +#' 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), +#' nrow = 4, byrow = TRUE) #' } #' -#' # incorrect result with exact jacobian +#' # The optimum value of the objective function should be 680.6300573 +#' # A suitable parameter vector is roughly +#' # (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) +#' +#' # Using analytic Jacobian #' S <- mma(x0.hs100, fn.hs100, gr = gr.hs100, #' hin = hin.hs100, hinjac = hinjac.hs100, -#' nl.info = TRUE, control = list(xtol_rel = 1e-8)) -#' -#' \donttest{ -#' # This example is put in donttest because it runs for more than -#' # 40 seconds under 32-bit Windows. The difference in time needed -#' # to execute the code between 32-bit Windows and 64-bit Windows -#' # can probably be explained by differences in rounding/truncation -#' # on the different systems. On Windows 32-bit more iterations -#' # are needed resulting in a longer runtime. -#' # correct result with inexact jacobian +#' nl.info = TRUE, control = list(xtol_rel = 1e-8), +#' deprecatedBehavior = FALSE) +#' +#' # Using computed Jacobian #' S <- mma(x0.hs100, fn.hs100, hin = hin.hs100, -#' nl.info = TRUE, control = list(xtol_rel = 1e-8)) -#' } +#' nl.info = TRUE, control = list(xtol_rel = 1e-8), +#' deprecatedBehavior = FALSE) #' + mma <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, - hinjac = NULL, nl.info = FALSE, control = list(), ...) { + hinjac = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ...) { opts <- nl.opts(control) opts["algorithm"] <- "NLOPT_LD_MMA" @@ -124,17 +137,19 @@ mma <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (!is.null(hin)) { - if (getOption("nloptr.show.inequality.warning")) { - message("For consistency with the rest of the package the ", - "inequality sign may be switched from >= to <= in a ", - "future nloptr version.") + if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", + "behavior will be removed in a future release.") + .hin <- match.fun(hin) + hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! } - - .hin <- match.fun(hin) - hin <- function(x) -.hin(x) # change hin >= 0 to hin <= 0 ! if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) - } else { + } else if (deprecatedBehavior) { + message("The old behavior for hin >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) } diff --git a/inst/tinytest/test-wrapper-mma.R b/inst/tinytest/test-wrapper-mma.R index bdb7cf6b..78b7c863 100644 --- a/inst/tinytest/test-wrapper-mma.R +++ b/inst/tinytest/test-wrapper-mma.R @@ -7,79 +7,74 @@ # # Test wrapper calls to MMA algorithm # -# Changelog: -# 2023-08-23: Change _output to _stdout +# CHANGELOG # +# 2023-08-23: Change _output to _stdout +# 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving +# the old behavior as the default for now. Also cleaned up the HS100 +# example. (Avraham Adler) library(nloptr) -ineqMess <- paste("For consistency with the rest of the package the inequality", - "sign may be switched from >= to <= in a future nloptr", - "version.") +depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", + "restate the inequality to be <=0. The ability to use the old", + "behavior will be removed in a future release.") -## Functions for COBYLA, BOBYQA, and NEWUOA +# Taken from example x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1L] - 10) ^ 2 + 5 * (x[2L] - 12) ^ 2 + x[3L] ^ 4 + 3 * (x[4L] - 11) ^ 2 + - 10 * x[5L] ^ 6 + 7 * x[6L] ^ 2 + x[7L] ^ 4 - 4 * x[6L] * x[7L] - - 10 * x[6L] - 8 * x[7L] -} - -hin.hs100 <- function(x) { - h <- double(4L) - h[1L] <- 127 - 2 * x[1L] ^ 2 - 3 * x[2L] ^ 4 - x[3L] - 4 * x[4L] ^ 2 - 5 * - x[5L] - h[2L] <- 282 - 7 * x[1L] - 3 * x[2L] - 10 * x[3L] ^ 2 - x[4L] + x[5L] - h[3L] <- 196 - 23 * x[1L] - x[2L] ^ 2 - 6 * x[6L] ^ 2 + 8 * x[7L] - h[4L] <- -4 * x[1L] ^ 2 - x[2L] ^ 2 + 3 * x[1L] * x[2L] - 2 * x[3L] ^ 2 - - 5 * x[6L] + 11 * x[7L] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, + 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } gr.hs100 <- function(x) { - c(2 * x[1L] - 20, - 10 * x[2L] - 120, - 4 * x[3L] ^ 3, - 6 * x[4L] - 66, - 60 * x[5L] ^ 5, - 14 * x[6L] - 4 * x[7L] - 10, - 4 * x[7L] ^ 3 - 4 * x[6L] - 8) + c( 2 * x[1] - 20, + 10 * x[2] - 120, + 4 * x[3] ^ 3, + 6 * x[4] - 66, + 60 * x[5] ^ 5, + 14 * x[6] - 4 * x[7] - 10, + 4 * x[7] ^ 3 - 4 * x[6] - 8) } hinjac.hs100 <- function(x) { - matrix(c(4 * x[1L], 12 * x[2L] ^ 3, 1, 8 * x[4L], 5, 0, 0, 7, 3, 20 * x[3L], - 1, -1, 0, 0, 23, 2 * x[2L], 0, 0, 0, 12 * x[6L], -8, - 8 * x[1L] - 3 * x[2L], 2 * x[2L] - 3 * x[1L], 4 * x[3L], 0, 0, 5, - -11), 4L, 7L, byrow = TRUE) + matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, + 7, 3, 20 * x[3], 1, -1, 0, 0, + 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, + 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), + nrow = 4, byrow = TRUE) } -hin2.hs100 <- function(x) -hin.hs100(x) # Needed for nloptr call -hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed for nloptr call +hin2.hs100 <- function(x) -hin.hs100(x) # Needed to test old behavior +hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed to test old behavior ctl <- list(xtol_rel = 1e-8) -# Test messages -expect_message(mma(x0.hs100, fn.hs100, hin = hin.hs100), ineqMess) - # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. expect_stdout(suppressMessages(mma(x0.hs100, fn.hs100, nl.info = TRUE)), "Call:", fixed = TRUE) -expect_silent(suppressMessages(mma(x0.hs100, fn.hs100))) +# Expect unconstrained call to work +expect_silent(mma(x0.hs100, fn.hs100)) -# Test MMA algorithm passing both gradient and Jacobian. Yes, this is the -# incorrect answer but it properly tests the functionality. -# (AA: 2023-02-06) -mmaTest <- suppressMessages(mma(x0.hs100, fn.hs100, gr = gr.hs100, - hin = hin.hs100, hinjac = hinjac.hs100, - control = ctl)) +# Test MMA algorithm with inequality but passing neither gradient nor Jacobian. +# As everyone is on Windows64, timing should be less of an issue. +mmaTest <- mma(x0.hs100, fn.hs100, hin = hin.hs100, control = ctl, + deprecatedBehavior = FALSE) mmaControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, - eval_grad_f = gr.hs100, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2.hs100, + eval_grad_f = function(x) nl.grad(x, fn.hs100), + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = function(x) nl.jacobian(x, hin.hs100), opts = list(algorithm = "NLOPT_LD_MMA", xtol_rel = 1e-8, maxeval = 1000L)) @@ -89,19 +84,33 @@ expect_identical(mmaTest$iter, mmaControl$iterations) expect_identical(mmaTest$convergence, mmaControl$status) expect_identical(mmaTest$message, mmaControl$message) -# Test MMA algorithm passing neither gradient nor Jacobian. As everyone is on -# Windows64, timing should be less of an issue. -mmaTest <- suppressMessages(mma(x0.hs100, fn.hs100, hin = hin.hs100, - control = ctl)) - +# Test MMA algorithm passing both gradient and Jacobian. Now correct. +# (AA: 2024-06-04) mmaControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, - eval_grad_f = function(x) nl.grad(x, fn.hs100), - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = function(x) nl.jacobian(x, hin2.hs100), + eval_grad_f = gr.hs100, + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = hinjac.hs100, opts = list(algorithm = "NLOPT_LD_MMA", xtol_rel = 1e-8, maxeval = 1000L)) +mmaTest <- mma(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin.hs100, + hinjac = hinjac.hs100, control = ctl, deprecatedBehavior = FALSE) + +expect_identical(mmaTest$par, mmaControl$solution) +expect_identical(mmaTest$value, mmaControl$objective) +expect_identical(mmaTest$iter, mmaControl$iterations) +expect_identical(mmaTest$convergence, mmaControl$status) +expect_identical(mmaTest$message, mmaControl$message) + +# Test deprecated message +expect_message(mma(x0.hs100, fn.hs100, hin = hin.hs100), depMess) + +# Test deprecated behavior +mmaTest <- suppressMessages(mma(x0.hs100, fn.hs100, gr = gr.hs100, + hin = hin2.hs100, hinjac = hinjac2.hs100, + control = ctl)) + expect_identical(mmaTest$par, mmaControl$solution) expect_identical(mmaTest$value, mmaControl$objective) expect_identical(mmaTest$iter, mmaControl$iterations) diff --git a/man/mma.Rd b/man/mma.Rd index 28263070..8301f837 100644 --- a/man/mma.Rd +++ b/man/mma.Rd @@ -14,6 +14,7 @@ mma( hinjac = NULL, nl.info = FALSE, control = list(), + deprecatedBehavior = TRUE, ... ) } @@ -28,7 +29,7 @@ not specified.} \item{lower, upper}{lower and upper bound constraints.} \item{hin}{function defining the inequality constraints, that is -\code{hin>=0} for all components.} +\code{hin <= 0} for all components.} \item{hinjac}{Jacobian of function \code{hin}; will be calculated numerically if not specified.} @@ -37,6 +38,11 @@ numerically if not specified.} \item{control}{list of options, see \code{nl.opts} for help.} +\item{deprecatedBehavior}{logical; if \code{TRUE} (default for now), the old +behavior of the Jacobian function is used, where the equality is \eqn{\ge 0} +instead of \eqn{\le 0}. This will be reversed in a future release and +eventually removed.} + \item{...}{additional arguments passed to the function.} } \value{ @@ -50,73 +56,77 @@ or a possible error number (< 0).} information.} } \description{ -Globally-convergent method-of-moving-asymptotes (MMA) algorithm for +Globally-convergent method-of-moving-asymptotes (\acronym{MMA}) algorithm for gradient-based local optimization, including nonlinear inequality constraints (but not equality constraints). } \details{ -This is an improved CCSA ("conservative convex separable approximation") -variant of the original MMA algorithm published by Svanberg in 1987, which -has become popular for topology optimization. Note: +This is an improved \acronym{CCSA} ("conservative convex separable +approximation") variant of the original \acronym{MMA} algorithm published by +Svanberg in 1987, which has become popular for topology optimization. } \note{ -``Globally convergent'' does not mean that this algorithm converges to -the global optimum; it means that it is guaranteed to converge to some local -minimum from any feasible starting point. +\dQuote{Globally convergent} does not mean that this algorithm +converges to the global optimum; rather, it means that the algorithm is +guaranteed to converge to some local minimum from any feasible starting +point. } \examples{ -## Solve the Hock-Schittkowski problem no. 100 with analytic gradients +# Solve the Hock-Schittkowski problem no. 100 with analytic gradients +# See https://apmonitor.com/wiki/uploads/Apps/hs100.apm + x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1]-10)^2 + 5*(x[2]-12)^2 + x[3]^4 + 3*(x[4]-11)^2 + 10*x[5]^6 + - 7*x[6]^2 + x[7]^4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7] -} -hin.hs100 <- function(x) { - h <- numeric(4) - h[1] <- 127 - 2*x[1]^2 - 3*x[2]^4 - x[3] - 4*x[4]^2 - 5*x[5] - h[2] <- 282 - 7*x[1] - 3*x[2] - 10*x[3]^2 - x[4] + x[5] - h[3] <- 196 - 23*x[1] - x[2]^2 - 6*x[6]^2 + 8*x[7] - h[4] <- -4*x[1]^2 - x[2]^2 + 3*x[1]*x[2] -2*x[3]^2 - 5*x[6] +11*x[7] - return(h) +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( +2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, +7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, +23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, +4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } + gr.hs100 <- function(x) { - c( 2 * x[1] - 20, - 10 * x[2] - 120, - 4 * x[3]^3, - 6 * x[4] - 66, - 60 * x[5]^5, - 14 * x[6] - 4 * x[7] - 10, - 4 * x[7]^3 - 4 * x[6] - 8 )} + c( 2 * x[1] - 20, + 10 * x[2] - 120, + 4 * x[3] ^ 3, + 6 * x[4] - 66, + 60 * x[5] ^ 5, + 14 * x[6] - 4 * x[7] - 10, + 4 * x[7] ^ 3 - 4 * x[6] - 8) +} + hinjac.hs100 <- function(x) { - matrix(c(4*x[1], 12*x[2]^3, 1, 8*x[4], 5, 0, 0, - 7, 3, 20*x[3], 1, -1, 0, 0, - 23, 2*x[2], 0, 0, 0, 12*x[6], -8, - 8*x[1]-3*x[2], 2*x[2]-3*x[1], 4*x[3], 0, 0, 5, -11), 4, 7, byrow=TRUE) + matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, + 7, 3, 20 * x[3], 1, -1, 0, 0, + 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, + 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), + nrow = 4, byrow = TRUE) } -# incorrect result with exact jacobian +# The optimum value of the objective function should be 680.6300573 +# A suitable parameter vector is roughly +# (2.330, 1.9514, -0.4775, 4.3657, -0.6245, 1.0381, 1.5942) + +# Using analytic Jacobian S <- mma(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin.hs100, hinjac = hinjac.hs100, - nl.info = TRUE, control = list(xtol_rel = 1e-8)) - -\donttest{ -# This example is put in donttest because it runs for more than -# 40 seconds under 32-bit Windows. The difference in time needed -# to execute the code between 32-bit Windows and 64-bit Windows -# can probably be explained by differences in rounding/truncation -# on the different systems. On Windows 32-bit more iterations -# are needed resulting in a longer runtime. -# correct result with inexact jacobian + nl.info = TRUE, control = list(xtol_rel = 1e-8), + deprecatedBehavior = FALSE) + +# Using computed Jacobian S <- mma(x0.hs100, fn.hs100, hin = hin.hs100, - nl.info = TRUE, control = list(xtol_rel = 1e-8)) -} + nl.info = TRUE, control = list(xtol_rel = 1e-8), + deprecatedBehavior = FALSE) } \references{ -Krister Svanberg, ``A class of globally convergent optimization -methods based on conservative convex separable approximations,'' SIAM J. -Optim. 12 (2), p. 555-573 (2002). +Krister Svanberg, \dQuote{A class of globally convergent +optimization methods based on conservative convex separable +approximations}, SIAM J. Optim. 12 (2), p. 555-573 (2002). } \seealso{ \code{\link{slsqp}} From 165b4f7799d961f3183d89b6dd85f977c44c034a Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:11:42 -0400 Subject: [PATCH 10/15] Call the hinjac parameter by its proper name. --- R/auglag.R | 4 ++-- R/ccsaq.R | 4 ++-- R/mma.R | 4 ++-- R/slsqp.R | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/R/auglag.R b/R/auglag.R index 0e9d31eb..9d055d96 100644 --- a/R/auglag.R +++ b/R/auglag.R @@ -202,8 +202,8 @@ auglag <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", - "restate the inequality to be <=0. The ability to use the old", + message("The old behavior for hinjac >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) diff --git a/R/ccsaq.R b/R/ccsaq.R index 80d9e08c..fe11cab9 100644 --- a/R/ccsaq.R +++ b/R/ccsaq.R @@ -138,8 +138,8 @@ ccsaq <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", - "restate the inequality to be <=0. The ability to use the old", + message("The old behavior for hinjac >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) diff --git a/R/mma.R b/R/mma.R index df58b5ec..34ff96d5 100644 --- a/R/mma.R +++ b/R/mma.R @@ -147,8 +147,8 @@ mma <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", - "restate the inequality to be <=0. The ability to use the old", + message("The old behavior for hinjac >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) diff --git a/R/slsqp.R b/R/slsqp.R index 423d5594..647aa738 100644 --- a/R/slsqp.R +++ b/R/slsqp.R @@ -133,8 +133,8 @@ slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", - "restate the inequality to be <=0. The ability to use the old", + message("The old behavior for hinjac >= 0 has been deprecated. Please ", + "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) hinjac <- function(x) -.hinjac(x) From f10da7560be341a890e090b71e21f2a8f854d5f8 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:16:30 -0400 Subject: [PATCH 11/15] Bring auglag coverage back up to 100% --- inst/tinytest/test-wrapper-auglag.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inst/tinytest/test-wrapper-auglag.R b/inst/tinytest/test-wrapper-auglag.R index 10d86bbf..54ce10db 100644 --- a/inst/tinytest/test-wrapper-auglag.R +++ b/inst/tinytest/test-wrapper-auglag.R @@ -179,8 +179,8 @@ expect_identical(augTest$message, augControl$message) expect_message(auglag(x0, fn, hin = hin2), depMess) # Test old behavior still works -augTest <- suppressMessages(auglag(x0, fn, hin = hin2, heq = heq, - localsolver = "MMA")) +augTest <- suppressMessages(auglag(x0, fn, hin = hin2, hinjac = hinjac2, + heq = heq, localsolver = "MMA")) expect_identical(augTest$par, augControl$solution) expect_identical(augTest$value, augControl$objective) From e4feaad8db40694bdb33df85eb1fe50c39d3d81d Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:30:19 -0400 Subject: [PATCH 12/15] Combine all changes not yet sent to CRAN into one release in NEWS.md and DESCRIPTION. Release version is minor, since deprecating the direction is a significant change. It is not major, since the API remains the same, currently. --- DESCRIPTION | 2 +- NEWS.md | 46 ++++++++++++++++++++++++---------------------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 85ae857b..ca3ad4fb 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: nloptr Type: Package Title: R Interface to NLopt -Version: 2.0.4 +Version: 2.1.0 Authors@R: c(person("Jelmer", "Ypma", role = "aut", email = "uctpjyy@ucl.ac.uk"), person(c("Steven", "G."), "Johnson", role = "aut", diff --git a/NEWS.md b/NEWS.md index 70b8b7ba..f61a8ab6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,14 +1,26 @@ -# nloptr 2.0.4 - -* Updated roxygen version; -* Updated maintainer email; +# nloptr 2.1.0 +This release deprecates the default behavior of the inequality equations in any +wrapper function which uses them. Currently, they are calibrated to be >= 0. +This version allows for the equations to be consistent with the main `nloptr` +function, which requires <= 0. In a future release, the default behavior will +switch to assuming the calibration is <= 0, and eventually, the >= 0 behavior +will be removed. It also includes a large number of safety and efficiency +changes, and an expansion of the unit tests to 100% coverage for all files but +one. The major changes include: + +* Reversed the direction of the inequality equations `hin` and `hinjac` in the +wrapper functions which use them, bringing them into compliance with the main +`nloptr` call. This addresses +[Issue #148](https://github.com/astamm/nloptr/issues/148). +* Cleaned the Hock-Schittkowski problem no. 100 and Hartmann 6-dimensional +examples. This also addresses +[Issue #152](https://github.com/astamm/nloptr/issues/152). +* Updated roxygen version. +* Updated maintainer email. * Deal with NA returns from detectCords (contributed by @jeroen in PR #150); -* Setup rhub v2 checks; -* Update cmake installation instructions on Mac with brew (#146); -* Allow use of equality constraints with COBYLA (#135); - -# nloptr 2.0.3.9100 - +* Setup rhub v2 checks. +* Update cmake installation instructions on Mac with brew (#146). +* Allow use of equality constraints with COBYLA (#135). * Replaced the unit testing framework of `testthat` with `tinytest` (See [Issue #136](https://github.com/astamm/nloptr/issues/136)). * Brought coverage of `is.nloptr` to 100%. The only file not completely covered @@ -17,18 +29,6 @@ trapped by tests in R before the call gets to C. * Linted package for code correctness and consistency. * Updated vignette, DESCRIPTION, and NEWS. * Updated package website to use bootstrap 5. - -# nloptr 2.0.3.9000 - -This is a patch version update which should make the code safer, more efficient, -and easier to follow. Please see commit logs for -[#128](https://github.com/astamm/nloptr/pull/128), -[#129](https://github.com/astamm/nloptr/pull/129), -[#131](https://github.com/astamm/nloptr/pull/131), -[#132](https://github.com/astamm/nloptr/pull/132), -and [#133](https://github.com/astamm/nloptr/pull/133) for the full -description of the changes which include: - * Expanded unit tests: coverage now over 97% with no file below 90% * Removed forcing `C++11` * Added safety checks to C code @@ -38,6 +38,8 @@ description of the changes which include: * Updated Github actions * Some bugfixes (e.g. in `isres` or the warning in `nl.grad`.) +Please see the commit logs for more detailed descriptions of the changes. + # nloptr 2.0.3 * Improved compatibility on RHEL/CentOS by first searching for a `cmake3` binary From fa55b74ab5e622aeab4dd7a79d0213bb9526ffd6 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:36:05 -0400 Subject: [PATCH 13/15] Fix mlsl documentation. Stupid mistake with comma. --- R/mlsl.R | 2 +- man/mlsl.Rd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/R/mlsl.R b/R/mlsl.R index 124d9f72..d193c355 100644 --- a/R/mlsl.R +++ b/R/mlsl.R @@ -41,7 +41,7 @@ #' @param fn objective function that is to be minimized. #' @param gr gradient of function \code{fn}; will be calculated numerically if #' not specified. -#' @param lower, upper lower and upper bound constraints. +#' @param lower,upper lower and upper bound constraints. #' @param local.method only \code{BFGS} for the moment. #' @param low.discrepancy logical; shall a low discrepancy variation be used. #' @param nl.info logical; shall the original \acronym{NLopt} info be shown. diff --git a/man/mlsl.Rd b/man/mlsl.Rd index 42c6e8a0..d002088f 100644 --- a/man/mlsl.Rd +++ b/man/mlsl.Rd @@ -25,7 +25,7 @@ mlsl( \item{gr}{gradient of function \code{fn}; will be calculated numerically if not specified.} -\item{lower, }{upper lower and upper bound constraints.} +\item{lower, upper}{lower and upper bound constraints.} \item{local.method}{only \code{BFGS} for the moment.} From bc1ed922da03144b8007f6d32ea1bce551088778 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Tue, 4 Jun 2024 22:57:54 -0400 Subject: [PATCH 14/15] Missed one in description. --- NEWS.md | 7 ++++--- R/auglag.R | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/NEWS.md b/NEWS.md index f61a8ab6..a97c25b6 100644 --- a/NEWS.md +++ b/NEWS.md @@ -12,9 +12,10 @@ one. The major changes include: wrapper functions which use them, bringing them into compliance with the main `nloptr` call. This addresses [Issue #148](https://github.com/astamm/nloptr/issues/148). -* Cleaned the Hock-Schittkowski problem no. 100 and Hartmann 6-dimensional -examples. This also addresses -[Issue #152](https://github.com/astamm/nloptr/issues/152). +* Cleaned the Hock-Schittkowski problem no. 100, Hartmann 6-dimensional, and +Powell exponential examples. This addresses +[Issue #152](https://github.com/astamm/nloptr/issues/152) and +[Issue #156](https://github.com/astamm/nloptr/issues/156). * Updated roxygen version. * Updated maintainer email. * Deal with NA returns from detectCords (contributed by @jeroen in PR #150); diff --git a/R/auglag.R b/R/auglag.R index 9d055d96..b45cd613 100644 --- a/R/auglag.R +++ b/R/auglag.R @@ -13,7 +13,8 @@ # (thanks to Leo Belzile). # 2023-02-08: Tweaks for efficiency and readability (Avraham Adler) # 2024-06-04: Switched desired direction of the hin/hinjac inequalities, leaving -# the old behavior as the default for now (Avraham Adler). +# the old behavior as the default for now. Also corrected Powell example. +# (Avraham Adler) # #' Augmented Lagrangian Algorithm From fe63b95aac8f30a19f257ee3be2cef6a0538db97 Mon Sep 17 00:00:00 2001 From: Avraham Adler Date: Thu, 6 Jun 2024 11:21:39 -0400 Subject: [PATCH 15/15] 1) Upgrade deprecated message to a warning. 2) Bring SLSQP testing in line with others. --- R/auglag.R | 4 +- R/ccsaq.R | 4 +- R/cobyla.R | 2 +- R/global.R | 2 +- R/mma.R | 4 +- R/slsqp.R | 4 +- inst/tinytest/test-wrapper-auglag.R | 8 +- inst/tinytest/test-wrapper-ccsaq.R | 4 +- inst/tinytest/test-wrapper-cobyla.R | 4 +- inst/tinytest/test-wrapper-global.R | 4 +- inst/tinytest/test-wrapper-mma.R | 4 +- inst/tinytest/test-wrapper-slsqp.R | 110 +++++++++++++--------------- 12 files changed, 74 insertions(+), 80 deletions(-) diff --git a/R/auglag.R b/R/auglag.R index b45cd613..b0ace12f 100644 --- a/R/auglag.R +++ b/R/auglag.R @@ -192,7 +192,7 @@ auglag <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, # Inequality constraints if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) @@ -203,7 +203,7 @@ auglag <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hinjac >= 0 has been deprecated. Please ", + warning("The old behavior for hinjac >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) diff --git a/R/ccsaq.R b/R/ccsaq.R index fe11cab9..454ab985 100644 --- a/R/ccsaq.R +++ b/R/ccsaq.R @@ -129,7 +129,7 @@ ccsaq <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) @@ -138,7 +138,7 @@ ccsaq <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hinjac >= 0 has been deprecated. Please ", + warning("The old behavior for hinjac >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) diff --git a/R/cobyla.R b/R/cobyla.R index 0f9c71a9..e04a59e5 100644 --- a/R/cobyla.R +++ b/R/cobyla.R @@ -105,7 +105,7 @@ cobyla <- function(x0, fn, lower = NULL, upper = NULL, hin = NULL, if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) diff --git a/R/global.R b/R/global.R index 8eed0035..e3371f7d 100644 --- a/R/global.R +++ b/R/global.R @@ -197,7 +197,7 @@ isres <- function(x0, fn, lower, upper, hin = NULL, heq = NULL, maxeval = 10000, if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) diff --git a/R/mma.R b/R/mma.R index 34ff96d5..96401cf9 100644 --- a/R/mma.R +++ b/R/mma.R @@ -138,7 +138,7 @@ mma <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) @@ -147,7 +147,7 @@ mma <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hinjac >= 0 has been deprecated. Please ", + warning("The old behavior for hinjac >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) diff --git a/R/slsqp.R b/R/slsqp.R index 647aa738..60027f75 100644 --- a/R/slsqp.R +++ b/R/slsqp.R @@ -123,7 +123,7 @@ slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (!is.null(hin)) { if (deprecatedBehavior) { - message("The old behavior for hin >= 0 has been deprecated. Please ", + warning("The old behavior for hin >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hin <- match.fun(hin) @@ -133,7 +133,7 @@ slsqp <- function(x0, fn, gr = NULL, lower = NULL, upper = NULL, hin = NULL, if (is.null(hinjac)) { hinjac <- function(x) nl.jacobian(x, hin) } else if (deprecatedBehavior) { - message("The old behavior for hinjac >= 0 has been deprecated. Please ", + warning("The old behavior for hinjac >= 0 has been deprecated. Please ", "restate the inequality to be <=0. The ability to use the old ", "behavior will be removed in a future release.") .hinjac <- match.fun(hinjac) diff --git a/inst/tinytest/test-wrapper-auglag.R b/inst/tinytest/test-wrapper-auglag.R index 54ce10db..04379064 100644 --- a/inst/tinytest/test-wrapper-auglag.R +++ b/inst/tinytest/test-wrapper-auglag.R @@ -20,8 +20,8 @@ depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", # Taken from example x0 <- c(1, 1) fn <- function(x) (x[1L] - 2) ^ 2 + (x[2L] - 1) ^ 2 -hin <- function(x) 0.25 * x[1L] ^ 2 + x[2L] ^ 2 - 1 # hin <= 0 -heq <- function(x) x[1L] - 2 * x[2L] + 1 # heq == 0 +hin <- function(x) 0.25 * x[1L] ^ 2 + x[2L] ^ 2 - 1 # hin <= 0 +heq <- function(x) x[1L] - 2 * x[2L] + 1 # heq = 0 gr <- function(x) nl.grad(x, fn) hinjac <- function(x) nl.jacobian(x, hin) heqjac <- function(x) nl.jacobian(x, heq) @@ -176,10 +176,10 @@ expect_identical(augTest$convergence, augControl$status) expect_identical(augTest$message, augControl$message) # Test deprecated message -expect_message(auglag(x0, fn, hin = hin2), depMess) +expect_warning(auglag(x0, fn, hin = hin2), depMess) # Test old behavior still works -augTest <- suppressMessages(auglag(x0, fn, hin = hin2, hinjac = hinjac2, +augTest <- suppressWarnings(auglag(x0, fn, hin = hin2, hinjac = hinjac2, heq = heq, localsolver = "MMA")) expect_identical(augTest$par, augControl$solution) diff --git a/inst/tinytest/test-wrapper-ccsaq.R b/inst/tinytest/test-wrapper-ccsaq.R index 60034325..4dd5a670 100644 --- a/inst/tinytest/test-wrapper-ccsaq.R +++ b/inst/tinytest/test-wrapper-ccsaq.R @@ -127,10 +127,10 @@ expect_identical(ccsaqTest$convergence, ccsaqControlC$status) expect_identical(ccsaqTest$message, ccsaqControlC$message) # Test deprecated behavior message -expect_message(ccsaq(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) +expect_warning(ccsaq(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) # Test deprecated behavior -ccsaqTest <- suppressMessages(ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, +ccsaqTest <- suppressWarnings(ccsaq(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin2.hs100, hinjac = hinjac2.hs100, control = ctl)) diff --git a/inst/tinytest/test-wrapper-cobyla.R b/inst/tinytest/test-wrapper-cobyla.R index 0aacc9f7..7c972526 100644 --- a/inst/tinytest/test-wrapper-cobyla.R +++ b/inst/tinytest/test-wrapper-cobyla.R @@ -69,10 +69,10 @@ expect_identical(cobylaTest$convergence, cobylaControl$status) expect_identical(cobylaTest$message, cobylaControl$message) # Test deprecated message -expect_message(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) +expect_warning(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100), depMess) # Test deprecated behavior -cobylaTest <- suppressMessages(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100, +cobylaTest <- suppressWarnings(cobyla(x0.hs100, fn.hs100, hin = hin2.hs100, control = ctl)) expect_identical(cobylaTest$par, cobylaControl$solution) diff --git a/inst/tinytest/test-wrapper-global.R b/inst/tinytest/test-wrapper-global.R index 8e438529..ed3baf9a 100644 --- a/inst/tinytest/test-wrapper-global.R +++ b/inst/tinytest/test-wrapper-global.R @@ -186,11 +186,11 @@ expect_identical(stogoTest$convergence, stogoControl$status) expect_identical(stogoTest$message, stogoControl$message) # Test deprecated message -expect_message(isres(x0, rbf, lower = lb, upper = ub, hin = hin2, +expect_warning(isres(x0, rbf, lower = lb, upper = ub, hin = hin2, maxeval = 2e4L), depMess) # Test deprecated behavior -isresTest <- suppressMessages(isres(x0, rbf, lb, ub, hin = hin2, +isresTest <- suppressWarnings(isres(x0, rbf, lb, ub, hin = hin2, maxeval = 2e4L)) expect_equal(isresTest$par, isresControl$solution, tolerance = 1e-4) diff --git a/inst/tinytest/test-wrapper-mma.R b/inst/tinytest/test-wrapper-mma.R index 78b7c863..99c8aec2 100644 --- a/inst/tinytest/test-wrapper-mma.R +++ b/inst/tinytest/test-wrapper-mma.R @@ -104,10 +104,10 @@ expect_identical(mmaTest$convergence, mmaControl$status) expect_identical(mmaTest$message, mmaControl$message) # Test deprecated message -expect_message(mma(x0.hs100, fn.hs100, hin = hin.hs100), depMess) +expect_warning(mma(x0.hs100, fn.hs100, hin = hin.hs100), depMess) # Test deprecated behavior -mmaTest <- suppressMessages(mma(x0.hs100, fn.hs100, gr = gr.hs100, +mmaTest <- suppressWarnings(mma(x0.hs100, fn.hs100, gr = gr.hs100, hin = hin2.hs100, hinjac = hinjac2.hs100, control = ctl)) diff --git a/inst/tinytest/test-wrapper-slsqp.R b/inst/tinytest/test-wrapper-slsqp.R index 3049b91e..0a359f65 100644 --- a/inst/tinytest/test-wrapper-slsqp.R +++ b/inst/tinytest/test-wrapper-slsqp.R @@ -19,47 +19,44 @@ depMess <- paste("The old behavior for hin >= 0 has been deprecated. Please", "restate the inequality to be <=0. The ability to use the old", "behavior will be removed in a future release.") -## Functions for SLSQP +# Taken from example x0.hs100 <- c(1, 2, 0, 4, 0, 1, 1) -fn.hs100 <- function(x) { - (x[1L] - 10) ^ 2 + 5 * (x[2L] - 12) ^ 2 + x[3L] ^ 4 + 3 * (x[4L] - 11) ^ 2 + - 10 * x[5L] ^ 6 + 7 * x[6L] ^ 2 + x[7L] ^ 4 - 4 * x[6L] * x[7L] - - 10 * x[6L] - 8 * x[7L] +fn.hs100 <- function(x) {(x[1] - 10) ^ 2 + 5 * (x[2] - 12) ^ 2 + x[3] ^ 4 + + 3 * (x[4] - 11) ^ 2 + 10 * x[5] ^ 6 + 7 * x[6] ^ 2 + + x[7] ^ 4 - 4 * x[6] * x[7] - 10 * x[6] - 8 * x[7]} + +hin.hs100 <- function(x) {c( + 2 * x[1] ^ 2 + 3 * x[2] ^ 4 + x[3] + 4 * x[4] ^ 2 + 5 * x[5] - 127, + 7 * x[1] + 3 * x[2] + 10 * x[3] ^ 2 + x[4] - x[5] - 282, + 23 * x[1] + x[2] ^ 2 + 6 * x[6] ^ 2 - 8 * x[7] - 196, + 4 * x[1] ^ 2 + x[2] ^ 2 - 3 * x[1] * x[2] + 2 * x[3] ^ 2 + 5 * x[6] - + 11 * x[7]) } gr.hs100 <- function(x) { - c(2 * x[1L] - 20, - 10 * x[2L] - 120, - 4 * x[3L] ^ 3, - 6 * x[4L] - 66, - 60 * x[5L] ^ 5, - 14 * x[6L] - 4 * x[7L] - 10, - 4 * x[7L] ^ 3 - 4 * x[6L] - 8) -} - -gr <- function(x) nl.grad(x, fn.hs100) - -hin.hs100 <- function(x) { - h <- double(4L) - h[1L] <- 127 - 2 * x[1L] ^ 2 - 3 * x[2L] ^ 4 - x[3L] - 4 * x[4L] ^ 2 - 5 * - x[5L] - h[2L] <- 282 - 7 * x[1L] - 3 * x[2L] - 10 * x[3L] ^ 2 - x[4L] + x[5L] - h[3L] <- 196 - 23 * x[1L] - x[2L] ^ 2 - 6 * x[6L] ^ 2 + 8 * x[7L] - h[4L] <- -4 * x[1L] ^ 2 - x[2L] ^ 2 + 3 * x[1L] * x[2L] - 2 * x[3L] ^ 2 - - 5 * x[6L] + 11 * x[7L] - return(h) + c( 2 * x[1] - 20, + 10 * x[2] - 120, + 4 * x[3] ^ 3, + 6 * x[4] - 66, + 60 * x[5] ^ 5, + 14 * x[6] - 4 * x[7] - 10, + 4 * x[7] ^ 3 - 4 * x[6] - 8) } hinjac.hs100 <- function(x) { - matrix(c(4 * x[1L], 12 * x[2L] ^ 3, 1, 8 * x[4L], 5, 0, 0, 7, 3, 20 * x[3L], - 1, -1, 0, 0, 23, 2 * x[2L], 0, 0, 0, 12 * x[6L], -8, - 8 * x[1L] - 3 * x[2L], 2 * x[2L] - 3 * x[1L], 4 * x[3L], 0, 0, 5, - -11), 4L, 7L, byrow = TRUE) + matrix(c(4 * x[1], 12 * x[2] ^ 3, 1, 8 * x[4], 5, 0, 0, + 7, 3, 20 * x[3], 1, -1, 0, 0, + 23, 2 * x[2], 0, 0, 0, 12 * x[6], -8, + 8 * x[1] - 3 * x[2], 2 * x[2] - 3 * x[1], 4 * x[3], 0, 0, 5, -11), + nrow = 4, byrow = TRUE) } -hin2.hs100 <- function(x) -hin.hs100(x) # Needed for nloptr call -hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed for nloptr call -hinjac2b.hs100 <- function(x) nl.jacobian(x, hin2.hs100)# Needed for nloptr call +hin2.hs100 <- function(x) -hin.hs100(x) # Needed to test old behavior +hinjac2.hs100 <- function(x) -hinjac.hs100(x) # Needed to test old behavior + +gr.hs100.computed <- function(x) nl.grad(x, fn.hs100) +hinjac.hs100.computed <- function(x) nl.jacobian(x, hin.hs100) +hinjac2.hs100.computed <- function(x) nl.jacobian(x, hin2.hs100) # Test printout if nl.info passed. The word "Call:" should be in output if # passed and not if not passed. @@ -69,13 +66,14 @@ expect_stdout(slsqp(x0.hs100, fn = fn.hs100, nl.info = TRUE), expect_silent(slsqp(x0.hs100, fn = fn.hs100)) # No passed gradient or Inequality Jacobians -slsqpTest <- suppressMessages(slsqp(x0.hs100, fn.hs100, hin = hin.hs100)) +slsqpTest <- slsqp(x0.hs100, fn.hs100, hin = hin.hs100, + deprecatedBehavior = FALSE) slsqpControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, - eval_grad_f = gr, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2b.hs100, + eval_grad_f = gr.hs100.computed, + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = hinjac.hs100.computed, opts = list(algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1e-6, maxeval = 1000L)) @@ -86,15 +84,15 @@ expect_identical(slsqpTest$convergence, slsqpControl$status) expect_identical(slsqpTest$message, slsqpControl$message) # Passed gradient or Inequality Jacobians -slsqpTest <- suppressMessages(slsqp(x0.hs100, fn = fn.hs100, gr = gr.hs100, - hin = hin.hs100, hinjac = hinjac.hs100)) +slsqpTest <- slsqp(x0.hs100, fn = fn.hs100, gr = gr.hs100, hin = hin.hs100, + hinjac = hinjac.hs100, deprecatedBehavior = FALSE) # Going to be reused below in new behavior test. slsqpControlhinjac <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, eval_grad_f = gr.hs100, - eval_g_ineq = hin2.hs100, - eval_jac_g_ineq = hinjac2.hs100, + eval_g_ineq = hin.hs100, + eval_jac_g_ineq = hinjac.hs100, opts = list(algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1e-6, maxeval = 1000L)) @@ -105,13 +103,14 @@ expect_identical(slsqpTest$convergence, slsqpControlhinjac$status) expect_identical(slsqpTest$message, slsqpControlhinjac$message) # Not passing equality Jacobian -slsqpTest <- suppressMessages(slsqp(x0.hs100, fn = fn.hs100, heq = hin.hs100)) +slsqpTest <- slsqp(x0.hs100, fn = fn.hs100, heq = hin.hs100, + deprecatedBehavior = FALSE) slsqpControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, - eval_grad_f = gr.hs100, - eval_g_eq = hin2.hs100, - eval_jac_g_eq = hinjac2b.hs100, + eval_grad_f = gr.hs100.computed, + eval_g_eq = hin.hs100, + eval_jac_g_eq = hinjac.hs100.computed, opts = list(algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1e-6, maxeval = 1000L)) @@ -122,14 +121,14 @@ expect_identical(slsqpTest$convergence, slsqpControl$status) expect_identical(slsqpTest$message, slsqpControl$message) # Passing equality Jacobian -slsqpTest <- suppressMessages(slsqp(x0.hs100, fn = fn.hs100, heq = hin.hs100, - heqjac = hinjac.hs100)) +slsqpTest <- slsqp(x0.hs100, fn = fn.hs100, gr = gr.hs100, heq = hin.hs100, + heqjac = hinjac.hs100, deprecatedBehavior = FALSE) slsqpControl <- nloptr(x0 = x0.hs100, eval_f = fn.hs100, eval_grad_f = gr.hs100, - eval_g_eq = hin2.hs100, - eval_jac_g_eq = hinjac2.hs100, + eval_g_eq = hin.hs100, + eval_jac_g_eq = hinjac.hs100, opts = list(algorithm = "NLOPT_LD_SLSQP", xtol_rel = 1e-6, maxeval = 1000L)) @@ -139,17 +138,12 @@ expect_identical(slsqpTest$iter, slsqpControl$iterations) expect_identical(slsqpTest$convergence, slsqpControl$status) expect_identical(slsqpTest$message, slsqpControl$message) -# Test deprecated behavor message; remove when old behavior made defucnt. -expect_message(slsqp(x0.hs100, fn = fn.hs100, hin = hin.hs100), depMess) +# Test deprecated message +expect_warning(slsqp(x0.hs100, fn = fn.hs100, hin = hin2.hs100), depMess) -# Test new behavior. Adjust tests above when old behavior made defucnt. -hinx <- function(x) -hin.hs100(x) -hinjacx <- function(x) -hinjac.hs100(x) -expect_silent(slsqp(x0.hs100, fn = fn.hs100, hin = hinx, hinjac = hinjacx, - deprecatedBehavior = FALSE)) - -slsqpTest <- slsqp(x0.hs100, fn = fn.hs100, hin = hinx, hinjac = hinjacx, - deprecatedBehavior = FALSE) +# Test deprecated behavior Adjust tests above when old behavior made defunct. +slsqpTest <- suppressWarnings(slsqp(x0.hs100, fn = fn.hs100, gr = gr.hs100, + hin = hin2.hs100, hinjac = hinjac2.hs100)) expect_identical(slsqpTest$par, slsqpControlhinjac$solution) expect_identical(slsqpTest$value, slsqpControlhinjac$objective)