Skip to content

Commit

Permalink
Support re-opt for IPOPT
Browse files Browse the repository at this point in the history
  • Loading branch information
metab0t committed Nov 22, 2024
1 parent fdf0d8b commit ea13b4e
Show file tree
Hide file tree
Showing 12 changed files with 188 additions and 90 deletions.
2 changes: 2 additions & 0 deletions include/pyoptinterface/ipopt_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ struct IpoptModel

FunctionIndex _register_function(const AutodiffSymbolicStructure &structure);
void _set_function_evaluator(const FunctionIndex &k, const AutodiffEvaluator &evaluator);
bool _has_function_evaluator(const FunctionIndex &k);

NLConstraintIndex _add_nl_constraint_bounds(const FunctionIndex &k,
const std::vector<VariableIndex> &xs,
Expand All @@ -134,6 +135,7 @@ struct IpoptModel

void clear_nl_objective();

void analyze_structure();
void optimize();

// set options
Expand Down
3 changes: 2 additions & 1 deletion include/pyoptinterface/nleval.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ struct LinearQuadraticEvaluator
struct NonlinearFunctionEvaluator
{
std::vector<AutodiffSymbolicStructure> nl_function_structures;
std::vector<AutodiffEvaluator> nl_function_evaluators;
std::vector<std::optional<AutodiffEvaluator>> nl_function_evaluators;
std::vector<FunctionInstances> constraint_function_instances;
std::vector<size_t> active_constraint_function_indices;
std::vector<FunctionInstances> objective_function_instances;
Expand All @@ -222,6 +222,7 @@ struct NonlinearFunctionEvaluator

FunctionIndex register_function(const AutodiffSymbolicStructure &structure);
void set_function_evaluator(const FunctionIndex &k, const AutodiffEvaluator &evaluator);
bool has_function_evaluator(const FunctionIndex &k);

NLConstraintIndex add_nl_constraint(const FunctionIndex &k,
const std::vector<VariableIndex> &xs,
Expand Down
21 changes: 20 additions & 1 deletion lib/ipopt_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,11 @@ void IpoptModel::_set_function_evaluator(const FunctionIndex &k, const AutodiffE
m_function_model.set_function_evaluator(k, evaluator);
}

bool IpoptModel::_has_function_evaluator(const FunctionIndex &k)
{
return m_function_model.has_function_evaluator(k);
}

NLConstraintIndex IpoptModel::_add_nl_constraint_bounds(const FunctionIndex &k,
const std::vector<VariableIndex> &xs,
const std::vector<ParameterIndex> &ps,
Expand Down Expand Up @@ -444,8 +449,17 @@ static bool eval_h(ipindex n, ipnumber *x, bool new_x, ipnumber obj_factor, ipin
return true;
}

void IpoptModel::optimize()
void IpoptModel::analyze_structure()
{
// init variables
m_jacobian_nnz = 0;
m_jacobian_rows.clear();
m_jacobian_cols.clear();
m_hessian_nnz = 0;
m_hessian_rows.clear();
m_hessian_cols.clear();
m_hessian_index_map.clear();

// analyze structure
m_function_model.analyze_active_functions();
m_function_model.analyze_dense_gradient_structure();
Expand All @@ -465,6 +479,11 @@ void IpoptModel::optimize()
fmt::print("Hessian has {} nonzeros\n", m_hessian_nnz);
fmt::print("Hessian rows : {}\n", m_hessian_rows);
fmt::print("Hessian cols : {}\n", m_hessian_cols);*/
}

void IpoptModel::optimize()
{
analyze_structure();

auto problem_ptr =
ipopt::CreateIpoptProblem(n_variables, m_var_lb.data(), m_var_ub.data(), n_constraints,
Expand Down
1 change: 1 addition & 0 deletions lib/ipopt_model_ext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ NB_MODULE(ipopt_model_ext, m)

.def("_register_function", &IpoptModel::_register_function)
.def("_set_function_evaluator", &IpoptModel::_set_function_evaluator)
.def("_has_function_evaluator", &IpoptModel::_has_function_evaluator)

.def("_add_nl_constraint_bounds", &IpoptModel::_add_nl_constraint_bounds)
.def("_add_nl_constraint_eq", &IpoptModel::_add_nl_constraint_eq)
Expand Down
19 changes: 12 additions & 7 deletions lib/nleval.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ FunctionIndex NonlinearFunctionEvaluator::register_function(
idx.index = nl_function_structures.size();

nl_function_structures.push_back(structure);
nl_function_evaluators.emplace_back();
nl_function_evaluators.emplace_back(std::nullopt);
constraint_function_instances.emplace_back();
objective_function_instances.emplace_back();

Expand All @@ -390,6 +390,11 @@ void NonlinearFunctionEvaluator::set_function_evaluator(const FunctionIndex &k,
nl_function_evaluators[k.index] = evaluator;
}

bool NonlinearFunctionEvaluator::has_function_evaluator(const FunctionIndex &k)
{
return nl_function_evaluators[k.index].has_value();
}

NLConstraintIndex NonlinearFunctionEvaluator::add_nl_constraint(
const FunctionIndex &k, const std::vector<VariableIndex> &xs,
const std::vector<ParameterIndex> &ps, size_t y)
Expand Down Expand Up @@ -693,7 +698,7 @@ void NonlinearFunctionEvaluator::eval_objective(const double *x, double *y)
for (auto k : active_objective_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
bool has_parameter = kernel.has_parameter;
auto &inst_vec = objective_function_instances[k];

Expand Down Expand Up @@ -728,7 +733,7 @@ void NonlinearFunctionEvaluator::eval_objective_gradient(const double *x, double
for (auto k : active_objective_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
if (!kernel.has_jacobian)
continue;

Expand Down Expand Up @@ -766,7 +771,7 @@ void NonlinearFunctionEvaluator::eval_constraint(const double *x, double *con)
for (auto k : active_constraint_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
bool has_parameter = kernel.has_parameter;
auto &inst_vec = constraint_function_instances[k];
if (has_parameter)
Expand Down Expand Up @@ -799,7 +804,7 @@ void NonlinearFunctionEvaluator::eval_constraint_jacobian(const double *x, doubl
for (auto k : active_constraint_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
if (!kernel.has_jacobian)
continue;

Expand Down Expand Up @@ -837,7 +842,7 @@ void NonlinearFunctionEvaluator::eval_lagrangian_hessian(const double *x, const
for (auto k : active_constraint_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
if (!kernel.has_hessian)
continue;

Expand Down Expand Up @@ -874,7 +879,7 @@ void NonlinearFunctionEvaluator::eval_lagrangian_hessian(const double *x, const
for (auto k : active_objective_function_indices)
{
auto &kernel = nl_function_structures[k];
auto &evaluator = nl_function_evaluators[k];
auto &evaluator = nl_function_evaluators[k].value();
if (!kernel.has_hessian)
continue;

Expand Down
65 changes: 36 additions & 29 deletions src/pyoptinterface/_src/ipopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import types
import logging
import platform
from typing import Optional, Dict
from typing import Optional, Dict, Set

from llvmlite import ir

Expand Down Expand Up @@ -70,16 +70,22 @@ def autoload_library():


def compile_functions_c(model: "Model", jit_compiler: TCCJITCompiler):
needs_compile_function_indices = []
for function_index in model.function_indices:
if not model._has_function_evaluator(function_index):
needs_compile_function_indices.append(function_index)

if len(needs_compile_function_indices) == 0:
return

io = StringIO()

generate_csrc_prelude(io)

for (
function_index,
cppad_autodiff_graph,
) in model.function_cppad_autodiff_graphs.items():
for function_index in needs_compile_function_indices:
name = model.function_names[function_index]
autodiff_structure = model.function_autodiff_structures[function_index]
cppad_autodiff_graph = model.function_cppad_autodiff_graphs[function_index]

np = autodiff_structure.np
ny = autodiff_structure.ny
Expand Down Expand Up @@ -131,14 +137,10 @@ def compile_functions_c(model: "Model", jit_compiler: TCCJITCompiler):

csrc = io.getvalue()

jit_compiler.source_code = csrc
state = jit_compiler.create_state()
jit_compiler.compile_string(state, csrc)

jit_compiler.compile_string(csrc.encode())

for (
function_index,
cppad_autodiff_graph,
) in model.function_cppad_autodiff_graphs.items():
for function_index in needs_compile_function_indices:
name = model.function_names[function_index]
autodiff_structure = model.function_autodiff_structures[function_index]

Expand All @@ -147,13 +149,13 @@ def compile_functions_c(model: "Model", jit_compiler: TCCJITCompiler):
gradient_name = name + "_gradient"
hessian_name = name + "_hessian"

f_ptr = jit_compiler.get_symbol(f_name.encode())
f_ptr = jit_compiler.get_symbol(state, f_name)
jacobian_ptr = gradient_ptr = hessian_ptr = 0
if autodiff_structure.has_jacobian:
jacobian_ptr = jit_compiler.get_symbol(jacobian_name.encode())
gradient_ptr = jit_compiler.get_symbol(gradient_name.encode())
jacobian_ptr = jit_compiler.get_symbol(state, jacobian_name)
gradient_ptr = jit_compiler.get_symbol(state, gradient_name)
if autodiff_structure.has_hessian:
hessian_ptr = jit_compiler.get_symbol(hessian_name.encode())
hessian_ptr = jit_compiler.get_symbol(state, hessian_name)

evaluator = AutodiffEvaluator(
autodiff_structure, f_ptr, jacobian_ptr, gradient_ptr, hessian_ptr
Expand All @@ -162,17 +164,23 @@ def compile_functions_c(model: "Model", jit_compiler: TCCJITCompiler):


def compile_functions_llvm(model: "Model", jit_compiler: LLJITCompiler):
needs_compile_function_indices = []
for function_index in model.function_indices:
if not model._has_function_evaluator(function_index):
needs_compile_function_indices.append(function_index)

if len(needs_compile_function_indices) == 0:
return

module = ir.Module(name="my_module")
create_llvmir_basic_functions(module)

export_functions = []

for (
function_index,
cppad_autodiff_graph,
) in model.function_cppad_autodiff_graphs.items():
for function_index in needs_compile_function_indices:
name = model.function_names[function_index]
autodiff_structure = model.function_autodiff_structures[function_index]
cppad_autodiff_graph = model.function_cppad_autodiff_graphs[function_index]

np = autodiff_structure.np
ny = autodiff_structure.ny
Expand Down Expand Up @@ -224,12 +232,9 @@ def compile_functions_llvm(model: "Model", jit_compiler: LLJITCompiler):

export_functions.extend([f_name, jacobian_name, gradient_name, hessian_name])

jit_compiler.compile_module(module, export_functions)
rt = jit_compiler.compile_module(module, export_functions)

for (
function_index,
cppad_autodiff_graph,
) in model.function_cppad_autodiff_graphs.items():
for function_index in needs_compile_function_indices:
name = model.function_names[function_index]
autodiff_structure = model.function_autodiff_structures[function_index]

Expand All @@ -238,13 +243,13 @@ def compile_functions_llvm(model: "Model", jit_compiler: LLJITCompiler):
gradient_name = name + "_gradient"
hessian_name = name + "_hessian"

f_ptr = jit_compiler.get_symbol(f_name)
f_ptr = rt[f_name]
jacobian_ptr = gradient_ptr = hessian_ptr = 0
if autodiff_structure.has_jacobian:
jacobian_ptr = jit_compiler.get_symbol(jacobian_name)
gradient_ptr = jit_compiler.get_symbol(gradient_name)
jacobian_ptr = rt[jacobian_name]
gradient_ptr = rt[gradient_name]
if autodiff_structure.has_hessian:
hessian_ptr = jit_compiler.get_symbol(hessian_name)
hessian_ptr = rt[hessian_name]

evaluator = AutodiffEvaluator(
autodiff_structure, f_ptr, jacobian_ptr, gradient_ptr, hessian_ptr
Expand Down Expand Up @@ -445,6 +450,7 @@ def __init__(self, jit: str = "LLVM"):
self.jit = jit
self.add_variables = types.MethodType(make_nd_variable, self)

self.function_indices: Set[FunctionIndex] = set()
self.function_cppad_autodiff_graphs: Dict[FunctionIndex, CppADAutodiffGraph] = (
{}
)
Expand Down Expand Up @@ -514,6 +520,7 @@ def register_function(
)

function_index = super()._register_function(autodiff_structure)
self.function_indices.add(function_index)
self.function_cppad_autodiff_graphs[function_index] = cppad_graph
self.function_autodiff_structures[function_index] = autodiff_structure
self.function_tracing_results[function_index] = tracing_result
Expand Down
57 changes: 28 additions & 29 deletions src/pyoptinterface/_src/jit_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,20 +32,26 @@ def __init__(self, libtcc_path=libtcc_path):
# Initialize libtcc function prototypes
self._initialize_function_prototypes()

# store all TCC states
self.states = []

self.source_codes = []

def create_state(self):
# Create a new TCC state
self.state = self.libtcc.tcc_new()
state = self.libtcc.tcc_new()

# Ensure the state was successfully created
if not self.state:
if not state:
raise Exception("Failed to create TCC state")

# Set the output type to memory
if self.libtcc.tcc_set_output_type(self.state, TCC_OUTPUT_MEMORY) == -1:
self.cleanup()
if self.libtcc.tcc_set_output_type(state, TCC_OUTPUT_MEMORY) == -1:
raise Exception("Failed to set output type")

# relocate has been called
self.relocated = False
self.states.append(state)

return state

def _initialize_function_prototypes(self):
libtcc = self.libtcc
Expand All @@ -60,37 +66,30 @@ def _initialize_function_prototypes(self):
libtcc.tcc_get_symbol.argtypes = [TCCState, ctypes.c_char_p]
libtcc.tcc_get_symbol.restype = ctypes.c_void_p

def compile_string(self, c_code):
# Compile C code string
if self.libtcc.tcc_compile_string(self.state, c_code) == -1:
def compile_string(self, state, c_code: str):
if self.libtcc.tcc_compile_string(state, c_code.encode()) == -1:
raise Exception("Failed to compile code")

self.relocated = False
if self.libtcc.tcc_relocate(state) == -1:
raise Exception("Failed to relocate")

def add_symbol(self, symbol_name, symbol_address):
self.source_codes.append(c_code)

def add_symbol(self, state, symbol_name: str, symbol_address):
# Add a symbol to the TCC state
if self.libtcc.tcc_add_symbol(self.state, symbol_name, symbol_address) == -1:
if (
self.libtcc.tcc_add_symbol(state, symbol_name.encode(), symbol_address)
== -1
):
raise Exception(f"Failed to add symbol {symbol_name} to TCC state")

self.relocated = False

def get_symbol(self, symbol_name):
if not self.relocated:
if self.libtcc.tcc_relocate(self.state) == -1:
raise Exception("Failed to relocate")
self.relocated = True
def get_symbol(self, state, symbol_name: str):
# Get the symbol for the compiled function
symbol = self.libtcc.tcc_get_symbol(self.state, symbol_name)
symbol = self.libtcc.tcc_get_symbol(state, symbol_name.encode())
if not symbol:
raise Exception(f"Symbol {symbol_name.decode()} not found")
raise Exception(f"Symbol {symbol_name} not found")
return symbol

def cleanup(self):
# Clean up the TCC state
if self.state:
self.libtcc.tcc_delete(self.state)
self.state = None

def __del__(self):
# Ensure clean up is called when the instance is destroyed
self.cleanup()
for state in self.states:
self.libtcc.tcc_delete(state)
Loading

0 comments on commit ea13b4e

Please sign in to comment.