diff --git a/pydcop/algorithms/amaxsum.py b/pydcop/algorithms/amaxsum.py new file mode 100644 index 00000000..9458607d --- /dev/null +++ b/pydcop/algorithms/amaxsum.py @@ -0,0 +1,527 @@ +# BSD-3-Clause License +# +# Copyright 2017 Orange +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" + +MaxSum: Belief-propagation DCOP algorithm +----------------------------------------- + +Implementation of the MaxSum algorithm + +We try to make as few assumption on the way the algorithm is run, +and especially on the distribution of variables and factor on agents. +In particular, we do not force here a factor and a variable to belong to +the same agent and thus variables and factors are implemented completely +independently. +To run the Algorithm, factor and variable must be distributed on agents ( +who will 'run' them). + + + +""" + + +import logging + +from collections import defaultdict + +from pydcop.dcop.objects import VariableNoisyCostFunc, Variable +from pydcop.algorithms import AlgoParameterDef, ComputationDef +from pydcop.algorithms import maxsum +from pydcop.dcop.relations import generate_assignment_as_dict +from pydcop.infrastructure.computations import ( + DcopComputation, + VariableComputation, + register, +) + +# Avoid using symbolic infinity as it is currently not correctly +# (de)serialized +# INFINITY = float('inf') +INFINITY = 100000 + +STABILITY_COEFF = 0.1 + +HEADER_SIZE = 0 +UNIT_SIZE = 1 + + +SAME_COUNT = 4 + +# constants for memory costs and capacity +FACTOR_UNIT_SIZE = 1 +VARIABLE_UNIT_SIZE = 1 + +GRAPH_TYPE = "factor_graph" +logger = logging.getLogger("pydcop.maxsum") + + +def build_computation(comp_def: ComputationDef): + if comp_def.node.type == "VariableComputation": + logger.debug(f"Building variable computation {comp_def}") + return MaxSumVariableComputation(comp_def=comp_def) + if comp_def.node.type == "FactorComputation": + logger.debug(f"Building factor computation {comp_def}") + return MaxSumFactorComputation(comp_def=comp_def) + + +# MaxSum and AMaxSum have the same definitions for communication load +# and computation footprints. +computation_memory = maxsum.computation_memory +communication_load = maxsum.communication_load + +algo_params = [ + AlgoParameterDef("infinity", "int", None, 10000), + AlgoParameterDef("stability", "float", None, 0.1), + AlgoParameterDef("damping", "float", None, 0.0), + AlgoParameterDef("stability", "float", None, STABILITY_COEFF), +] + + + +def approx_match(costs, prev_costs): + """ + Check if a cost message match the previous message. + + Costs are considered to match if the variation is bellow STABILITY_COEFF. + + :param costs: costs as a dict val -> cost + :param prev_costs: previous costs as a dict val -> cost + :return: True if the cost match + """ + + for d, c in costs.items(): + prev_c = prev_costs[d] + if prev_c != c: + delta = abs(prev_c - c) + if prev_c + c != 0: + if not ((2 * delta / abs(prev_c + c)) < STABILITY_COEFF): + return False + else: + return False + return True + + +class MaxSumFactorComputation(DcopComputation): + """ + FactorAlgo encapsulate the algorithm running at factor's node. + + """ + + def __init__(self, comp_def=None): + assert comp_def.algo.algo == "amaxsum" + super().__init__(comp_def.node.factor.name, comp_def) + self.mode = comp_def.algo.mode + self.factor = comp_def.node.factor + + # costs : messages for our variables, used to store the content of the + # messages received from our variables. + # v -> d -> costs + # For each variable, we keep a dict mapping the values for this + # variable to an associated cost. + self._costs = {} + + + # A dict var_name -> (message, count) + self._prev_messages = defaultdict(lambda: (None, 0)) + + self._valid_assignments_cache = None + self._valid_assignments() + + @property + def variables(self): + """ + :return: The list of variables objects the factor depends on. + """ + return self.factor.dimensions + + def footprint(self): + return computation_memory(self.computation_def.node) + + def on_start(self): + # FIXME: remove, return value not used any more + msg_count, msg_size = 0, 0 + + # Only unary factors (leaf in the graph) needs to send their costs at + # init.Each leaf factor sends his costs to its only variable. + # When possible it is better to use a variable with integrated costs + # instead of a variable with an unary relation representing costs. + if len(self.variables) == 1: + self.logger.warning("Sending init costs of unary factor %s", self.name) + msg_count, msg_size = self._init_msg() + + return {"num_msg_out": msg_count, "size_msg_out": msg_size} + + def _init_msg(self): + # FIXME: remove method: can be done directly in on_start + msg_debug = [] # FIXME: remove, unused + msg_count, msg_size = 0, 0 # FIXME: remove, unused + + for v in self.variables: + costs_v = self._costs_for_var(v) + msg_size += self._send_costs(v.name, costs_v) + msg_count += 1 + msg_debug.append((v.name, costs_v)) + + if self.logger.isEnabledFor(logging.DEBUG): + debug = "Unary factor : init msg {} \n".format(self.name) + for dest, msg in msg_debug: + debug += " * {} -> {} : {}\n".format(self.name, dest, msg) + self.logger.debug(debug + "\n") + else: + self.logger.info( + "Init messages for %s to %s", self.name, [c for c, _ in msg_debug] + ) + + return msg_count, msg_size # FIXME: remove, unused + + def _send_costs(self, var_name, costs): + # FIXME: remove, use post_msg directly (one line, same size as method call !) + msg = maxsum.MaxSumMessage(costs) + size = msg.size + self.post_msg(var_name, msg) + return size + + @register("max_sum") + def _on_maxsum_msg(self, var_name, msg, t): + """ + Handling messages from variables nodes. + + :param var_name: name of the variable node that sent this messages + :param msg: the cost sent by the variable var_name + a d -> cost table, where + * d is a value from the domain of var_name + * cost is the sum of the costs received from all other factors + except f for this value d for the domain. + """ + self._costs[var_name] = msg.costs + send, no_send = [], [] + debug = "" + msg_count, msg_size = 0, 0 + + # Wait until we received costs from all our variables before sending + # our own costs + if len(self._costs) == len(self.factor.dimensions): + stable = True + for v in self.variables: + if v.name != var_name: + costs_v = self._costs_for_var(v) + same, same_count = self._match_previous(v.name, costs_v) + if not same or same_count < SAME_COUNT: + debug += " * SEND {} -> {} : {}\n".format( + self.name, v.name, costs_v + ) + msg_size += self._send_costs(v.name, costs_v) + send.append(v.name) + msg_count += 1 + self._prev_messages[v.name] = costs_v, same_count + 1 + else: + no_send.append(v.name) + debug += " * NO-SEND {} -> " "{} : {}\n".format( + self.name, v.name, costs_v + ) + else: + debug += ( + " * Still waiting for costs from all" + " the variables {}\n".format(self._costs.keys()) + ) + + if self.logger.isEnabledFor(logging.DEBUG): + self.logger.debug( + "ON %s -> %s message : %s \n%s", var_name, self.name, msg.costs, debug + ) + else: + self.logger.info( + "On cost msg from %s, send messages to %s - no " "send %s", + var_name, + send, + no_send, + ) + + return {"num_msg_out": msg_count, "size_msg_out": msg_size} + + def _costs_for_var(self, variable): + """ + Produce the message for the variable v. + + The content of this message is a table d -> mincost where + * d is a value of the domain of the variable v + * mincost is the minimum value of f when the variable v take the + value d + + :param variable: the variable we want to send the costs to + :return: a mapping { value => cost} + where value is all the values from the domain of 'variable' + costs is the cost when 'variable' == 'value' + + """ + costs = {} + for d in variable.domain: + # for each value d in the domain of v, calculate min cost (a) + # where a is any assignment where v = d + # cost (a) = f(a) + sum( costvar()) + # where costvar is the cost received from our other variables + + mode_opt = INFINITY if self.mode == "min" else -INFINITY + optimal_value = mode_opt + + for assignment in self._valid_assignments(): + if assignment[variable.name] != d: + continue + f_val = self.factor(**assignment) + if f_val == INFINITY: + continue + + sum_cost = 0 + # sum of the costs from all other variables + for another_var, var_value in assignment.items(): + if another_var == variable.name: + continue + if another_var in self._costs: + if var_value not in self._costs[another_var]: + # If there is no cost for this value, it means it + # is infinite (as infinite cost are not included + # in messages) and we can stop adding costs. + sum_cost = mode_opt + break + sum_cost += self._costs[another_var][var_value] + else: + # we have not received yet costs from variable v + pass + + current_val = f_val + sum_cost + if (optimal_value > current_val and self.mode == "min") or ( + optimal_value < current_val and self.mode == "max" + ): + + optimal_value = current_val + + if optimal_value != mode_opt: + costs[d] = optimal_value + + return costs + + def _valid_assignments(self): + """ + Populates a cache with all valid assignments for the factor + managed by the algorithm. + + :return: a list of all assignments returning a non-infinite value + """ + # Fixme: extract as a function + # FIXME: does not take into account min / max + if self._valid_assignments_cache is None: + self._valid_assignments_cache = [] + all_vars = self.factor.dimensions[:] + for assignment in generate_assignment_as_dict(all_vars): + if self.factor(**assignment) != INFINITY: + self._valid_assignments_cache.append(assignment) + return self._valid_assignments_cache + + def _match_previous(self, v_name, costs): + """ + Check if a cost message for a variable v_name match the previous + message sent to that variable. + + :param v_name: variable name + :param costs: costs sent to this factor + :return: + """ + prev_costs, count = self._prev_messages[v_name] + if prev_costs is not None: + same = approx_match(costs, prev_costs) + return same, count + else: + return False, 0 + + +class MaxSumVariableComputation(VariableComputation): + """ + Maxsum Computation for variable. + + Parameters + ---------- + comp_def: ComputationDef + """ + def __init__(self,comp_def: ComputationDef = None): + """ + + :param variable: variable object + :param factor_names: a list containing the names of the factors that + depend on the variable managed by this algorithm + :param msg_sender: the object that will be used to send messages to + neighbors, it must have a post_msg(sender, target_name, name) method. + """ + super().__init__(comp_def.node.variable, comp_def) + + assert comp_def.algo.algo == "amaxsum" + assert (comp_def.algo.mode == "min") or (comp_def.algo.mode == "max") + + self.mode = comp_def.algo.mode + + # Add noise to the variable, on top of cost if needed + # TODO: make this configurable through parameters + self._variable = VariableNoisyCostFunc( + self.variable.name, + self.variable.domain, + cost_func=lambda x: self.variable.cost_for_val(x), + initial_value=self.variable.initial_value, + ) + + # The list of factors (names) this variables is linked with + self._factors = [link.factor_node for link in comp_def.node.links] + + # costs : this dict is used to store, for each value of the domain, + # the associated cost sent by each factor this variable is involved + # with. { factor : {domain value : cost }} + self._costs = {} + + self._prev_messages = defaultdict(lambda: (None, 0)) + + self.damping = comp_def.algo.params["damping"] + self.logger.info("Running maxsum with damping %s", self.damping) + + def on_start(self) -> None: + """ + Startup handler for MaxSum variable computations. + + At startup, a variable select an initial value and send its cost to the factors + it depends on. + """ + + # select our initial value + if self.variable.initial_value: + self.value_selection(self.variable.initial_value, None) + else: + self.value_selection(*maxsum.select_value(self.variable, self._costs, self.mode)) + self.logger.info(f"Initial value selected {self.current_value}") + + # Send our costs to the factors we depends on. + for f in self._factors: + costs_f = maxsum.costs_for_factor(self.variable, f, self._factors, self._costs) + self.logger.info( + f"Sending init msg from variable {self.name} to factor {f} : {costs_f}" + ) + self.post_msg(f, maxsum.MaxSumMessage(costs_f)) + + @register("max_sum") + def _on_maxsum_msg(self, factor_name, msg, t): + """ + Handling cost message from a neighbor factor. + + :param factor_name: the name of that factor that sent us this message. + :param msg: a message whose content is a map { d -> cost } where: + * d is a value from the domain of this variable + * cost if the minimum cost of the factor when taking value d + """ + self._costs[factor_name] = msg.costs + + # select our value + self.value_selection(*maxsum.select_value(self.variable, self._costs, self.mode)) + + # Compute and send our own costs to all other factors. + # If our variable has his own costs, we must sent them back even + # to the factor which sent us this message, as integrated costs are + # similar to an unary factor and with an unary factor we would have + # sent these costs back to the original sender: + # factor -> variable -> unary_cost_factor -> variable -> factor + fs = self._factors.copy() + # if not self.var_with_cost: + fs.remove(factor_name) + + self._compute_and_send_costs(fs) + + def _compute_and_send_costs(self, factor_names): + """ + Computes and send costs messages for all factors in factor_names. + + :param factor_names: a list of names of factors to compute and send + messages to. + """ + debug = "" + stable = True + send, no_send = [], [] + msg_count, msg_size = 0, 0 + for f_name in factor_names: + costs_f = maxsum.costs_for_factor(self.variable, f_name, self._factors, self._costs) + same, same_count = self._match_previous(f_name, costs_f) + if not same or same_count < SAME_COUNT: + debug += " * SEND : {} -> {} : {}\n".format(self.name, f_name, costs_f) + msg_size += self._send_costs(f_name, costs_f) + send.append(f_name) + self._prev_messages[f_name] = costs_f, same_count + 1 + stable = False + msg_count += 1 + + else: + no_send.append(f_name) + debug += " * NO-SEND : {} -> {} : {}\n".format( + self.name, f_name, costs_f + ) + + # Display sent messages + if self.logger.isEnabledFor(logging.DEBUG): + self.logger.debug("Sending messages from %s :\n%s", self.name, debug) + else: + self.logger.info( + "Sending messages from %s to %s, no_send %s", self.name, send, no_send + ) + + return msg_count, msg_size + + def _send_costs(self, factor_name, costs): + """ + Sends a cost messages and return the size of the message sent. + :param factor_name: + :param costs: + :return: + """ + msg = maxsum.MaxSumMessage(costs) + self.post_msg(factor_name, msg) + return msg.size + + + + def _match_previous(self, f_name, costs): + """ + Check if a cost message for a factor f_name match the previous message + sent to that factor. + + :param f_name: factor name + :param costs: costs sent to this factor + :return: + """ + prev_costs, count = self._prev_messages[f_name] + if prev_costs is not None: + same = approx_match(costs, prev_costs) + return same, count + else: + return False, 0 + + diff --git a/pydcop/algorithms/maxsum.py b/pydcop/algorithms/maxsum.py index 3fb7c8c2..7e7e4114 100644 --- a/pydcop/algorithms/maxsum.py +++ b/pydcop/algorithms/maxsum.py @@ -32,73 +32,54 @@ MaxSum: Belief-propagation DCOP algorithm ----------------------------------------- -Implementation of the MaxSum algorithm - -We try to make as few assumption on the way the algorithm is run, -and especially on the distribution of variables and factor on agents. -In particular, we do not force here a factor and a variable to belong to -the same agent and thus variables and factors are implemented completely -independently. -To run the Algorithm, factor and variable must be distributed on agents ( -who will 'run' them). +Synchronous implementation of the MaxSum algorithm """ - - import logging -from random import choice - -from typing import Dict, Union, Tuple, Any, List - +from typing import Optional, List, Dict, Any, Tuple, Union from collections import defaultdict + +from pydcop.algorithms import ComputationDef, AlgoParameterDef from pydcop.computations_graph.factor_graph import ( - VariableComputationNode, FactorComputationNode, + VariableComputationNode, ) -from pydcop.dcop.objects import VariableNoisyCostFunc, Variable -from pydcop.algorithms import AlgoParameterDef, ComputationDef -from pydcop.dcop.relations import generate_assignment_as_dict +from pydcop.dcop.objects import Variable, VariableNoisyCostFunc +from pydcop.dcop.relations import Constraint, generate_assignment_as_dict from pydcop.infrastructure.computations import ( - Message, DcopComputation, + SynchronousComputationMixin, VariableComputation, register, + Message, ) +GRAPH_TYPE = "factor_graph" +logger = logging.getLogger("pydcop.maxsum") + # Avoid using symbolic infinity as it is currently not correctly # (de)serialized # INFINITY = float('inf') INFINITY = 100000 -STABILITY_COEFF = 0.1 - HEADER_SIZE = 0 UNIT_SIZE = 1 - -SAME_COUNT = 4 - # constants for memory costs and capacity FACTOR_UNIT_SIZE = 1 VARIABLE_UNIT_SIZE = 1 -GRAPH_TYPE = "factor_graph" -logger = logging.getLogger("pydcop.maxsum") - def build_computation(comp_def: ComputationDef): if comp_def.node.type == "VariableComputation": - factor_names = [l.factor_node for l in comp_def.node.links] - logger.debug( - "building variable computation {} - {}".format(comp_def.node, factor_names) - ) - return VariableAlgo(comp_def.node.variable, factor_names, comp_def=comp_def) + logger.debug(f"Building variable computation {comp_def}") + return MaxSumVariableComputation(comp_def=comp_def) if comp_def.node.type == "FactorComputation": - logger.debug("building factor computation {}".format(comp_def.node)) - return FactorAlgo(comp_def.node.factor, comp_def=comp_def) + logger.debug(f"Building factor computation {comp_def}") + return MaxSumFactorComputation(comp_def=comp_def) def computation_memory( @@ -110,10 +91,10 @@ def computation_memory( ----- Two formulations of the memory footprint are possible for factors : * If the constraint is given by a function (intentional), the factor - only needs to keep the costs sent by each variable and the footprint + only needs to keep the costs sent by each variable and the footprint is the total size of these cost vectors. * If the constraints is given extensively the size of the hypercube of - costs must also be accounted for. + costs must also be accounted for. Parameters ---------- @@ -186,14 +167,6 @@ def communication_load( ) -algo_params = [ - AlgoParameterDef("infinity", "int", None, 10000), - AlgoParameterDef("stability", "float", None, 0.1), - AlgoParameterDef("damping", "float", None, 0.0), - AlgoParameterDef("stability", "float", None, STABILITY_COEFF), -] - - class MaxSumMessage(Message): def __init__(self, costs: Dict): super().__init__("max_sum", None) @@ -225,7 +198,7 @@ def _simple_repr(self): r = {"__module__": self.__module__, "__qualname__": self.__class__.__qualname__} # When building the simple repr when transform the dict into a pair - # of list to avoid problem when serializing / deserializing the repr. + # of list to avoid problem when serializing / de-serializing the repr. # The costs dic often contains int as key, when converting to an from # json (which only support string for keys in dict), we would # otherwise loose the type information and restore the dict with str @@ -243,640 +216,178 @@ def _from_repr(cls, r): return MaxSumMessage(dict(zip(vals, costs))) -def approx_match(costs, prev_costs): - """ - Check if a cost message match the previous message. - - Costs are considered to match if the variation is bellow STABILITY_COEFF. - - :param costs: costs as a dict val -> cost - :param prev_costs: previous costs as a dict val -> cost - :return: True if the cost match - """ - - for d, c in costs.items(): - prev_c = prev_costs[d] - if prev_c != c: - delta = abs(prev_c - c) - if prev_c + c != 0: - if not ((2 * delta / abs(prev_c + c)) < STABILITY_COEFF): - return False - else: - return False - return True +# Some semantic type definition, to make things easier to read and check: +VarName = str +FactorName = str +VarVal = Any +Cost = float -class FactorAlgo(DcopComputation): - """ - FactorAlgo encapsulate the algorithm running at factor's node. - - """ - - def __init__( - self, - factor, - name=None, - msg_sender=None, - infinity=INFINITY, - stability=STABILITY_COEFF, - comp_def=None, - ): - """ - Factor algorithm (factor can be n-ary). - Variables does not need to be listed explicitly, they are taken from - the factor function. - - :param factor: a factor object implementing the factor protocol , - :param msg_sender: the object that will be used to send messages to - neighbors, it must have a post_msg(sender, target_name, name) method. - """ - name = name if name is not None else factor.name - super().__init__(name, comp_def) - +class MaxSumFactorComputation(SynchronousComputationMixin, DcopComputation): + def __init__(self, comp_def: ComputationDef): assert comp_def.algo.algo == "maxsum" - assert (comp_def.algo.mode == "min") or (comp_def.algo.mode == "max") + super().__init__(comp_def.node.factor.name, comp_def) - self._factor = factor self.mode = comp_def.algo.mode - - global INFINITY, STABILITY_COEFF - INFINITY = infinity - STABILITY_COEFF = stability + self._factor = comp_def.node.factor # costs : messages for our variables, used to store the content of the # messages received from our variables. - # v -> d -> costs + # {v -> {d -> costs} } # For each variable, we keep a dict mapping the values for this # variable to an associated cost. - self._costs = {} - - self._msg_sender = msg_sender + self._costs: Dict[VarName, Dict[VarVal:Cost]] = {} # A dict var_name -> (message, count) self._prev_messages = defaultdict(lambda: (None, 0)) - if len(self.variables) <= 1: - self._is_stable = True - else: - self._is_stable = False - - self._valid_assignments_cache = None - self._valid_assignments() + @register("max_sum") + def on_msg(self, variable_name, recv_msg, t): + # No implementation here, simply used to declare the kind of message supported + # by this computation + pass - @property - def name(self): - return self._name + def on_new_cycle(self, messages, cycle_id) -> Optional[List]: + pass - @property - def variables(self): - """ - :return: The list of variables objects the factor depends on. - """ - return self._factor.dimensions - @property - def factor(self): - return self._factor - @property - def is_stable(self): - return self._is_stable - - def footprint(self): - return computation_memory(self.computation_def.node) - - def on_start(self): - msg_count, msg_size = 0, 0 - - # Only unary factors (leaf in the graph) needs to send their costs at - # init.Each leaf factor sends his costs to its only variable. - # When possible it is better to use a variable with integrated costs - # instead of a variable with an unary relation representing costs. - if len(self.variables) == 1: - self.logger.warning("Sending init costs of unary factor %s", self.name) - msg_count, msg_size = self._init_msg() - - return {"num_msg_out": msg_count, "size_msg_out": msg_size} - - def _init_msg(self): - msg_debug = [] - msg_count, msg_size = 0, 0 - - for v in self.variables: - costs_v = self._costs_for_var(v) - msg_size += self._send_costs(v.name, costs_v) - msg_count += 1 - msg_debug.append((v.name, costs_v)) - - if self.logger.isEnabledFor(logging.DEBUG): - debug = "Unary factor : init msg {} \n".format(self.name) - for dest, msg in msg_debug: - debug += " * {} -> {} : {}\n".format(self.name, dest, msg) - self.logger.debug(debug + "\n") - else: - self.logger.info( - "Init messages for %s to %s", self.name, [c for c, _ in msg_debug] - ) +class MaxSumVariableComputation(SynchronousComputationMixin, VariableComputation): + def __init__(self, comp_def: ComputationDef): + super().__init__(comp_def.node.variable, comp_def) + assert comp_def.algo.algo == "maxsum" - return msg_count, msg_size + self.factor_names = [link.factor_node for link in comp_def.node.links] + self.mode = comp_def.algo.mode + self.costs = {} - def _send_costs(self, var_name, costs): - msg = MaxSumMessage(costs) - size = msg.size - self.post_msg(var_name, msg) - return size + # Add noise to the variable, on top of cost if needed + # TODO: make this configurable through parameters + self._variable = VariableNoisyCostFunc( + self.variable.name, + self.variable.domain, + cost_func=lambda x: self.variable.cost_for_val(x), + initial_value=self.variable.initial_value, + ) @register("max_sum") - def _on_maxsum_msg(self, var_name, msg, t): - """ - Handling messages from variables nodes. - - :param var_name: name of the variable node that sent this messages - :param msg: the cost sent by the variable var_name - a d -> cost table, where - * d is a value from the domain of var_name - * cost is the sum of the costs received from all other factors - except f for this value d for the domain. - """ - self._costs[var_name] = msg.costs - send, no_send = [], [] - debug = "" - msg_count, msg_size = 0, 0 - - # Wait until we received costs from all our variables before sending - # our own costs - if len(self._costs) == len(self._factor.dimensions): - stable = True - for v in self.variables: - if v.name != var_name: - costs_v = self._costs_for_var(v) - same, same_count = self._match_previous(v.name, costs_v) - if not same or same_count < SAME_COUNT: - debug += " * SEND {} -> {} : {}\n".format( - self.name, v.name, costs_v - ) - msg_size += self._send_costs(v.name, costs_v) - send.append(v.name) - msg_count += 1 - self._prev_messages[v.name] = costs_v, same_count + 1 - self._is_stable = False - else: - no_send.append(v.name) - debug += " * NO-SEND {} -> " "{} : {}\n".format( - self.name, v.name, costs_v - ) - self._is_stable = stable + def on_msg(self, variable_name, recv_msg, t): + # No implementation here, simply used to declare the kind of message supported + # by this computation + pass + + def on_start(self) -> None: + # Select our initial value + if self.variable.initial_value is not None: + self.value_selection(self.variable.initial_value) else: - debug += ( - " * Still waiting for costs from all" - " the variables {}\n".format(self._costs.keys()) - ) + self.value_selection(select_value(self.variable, self.costs, self.mode)) + self.logger.info(f"Initial value selected {self.current_value}") - if self.logger.isEnabledFor(logging.DEBUG): - self.logger.debug( - "ON %s -> %s message : %s \n%s", var_name, self.name, msg.costs, debug - ) - else: + # Send our costs to the factors we depends on. + for f in self._factors: + costs_f = costs_for_factor(self.variable, f, self._factors, self._costs) self.logger.info( - "On cost msg from %s, send messages to %s - no " "send %s", - var_name, - send, - no_send, - ) - - return {"num_msg_out": msg_count, "size_msg_out": msg_size} - - def _costs_for_var(self, variable): - """ - Produce the message for the variable v. - - The content of this message is a table d -> mincost where - * d is a value of the domain of the variable v - * mincost is the minimum value of f when the variable v take the - value d - - :param variable: the variable we want to send the costs to - :return: a mapping { value => cost} - where value is all the values from the domain of 'variable' - costs is the cost when 'variable' == 'value' - - """ - costs = {} - for d in variable.domain: - # for each value d in the domain of v, calculate min cost (a) - # where a is any assignment where v = d - # cost (a) = f(a) + sum( costvar()) - # where costvar is the cost received from our other variables - - mode_opt = INFINITY if self.mode == "min" else -INFINITY - optimal_value = mode_opt - - for assignment in self._valid_assignments(): - if assignment[variable.name] != d: - continue - f_val = self._factor(**assignment) - if f_val == INFINITY: - continue - - sum_cost = 0 - # sum of the costs from all other variables - for another_var, var_value in assignment.items(): - if another_var == variable.name: - continue - if another_var in self._costs: - if var_value not in self._costs[another_var]: - # If there is no cost for this value, it means it - # is infinite (as infinite cost are not included - # in messages) and we can stop adding costs. - sum_cost = mode_opt - break - sum_cost += self._costs[another_var][var_value] - else: - # we have not received yet costs from variable v - pass - - current_val = f_val + sum_cost - if (optimal_value > current_val and self.mode == "min") or ( - optimal_value < current_val and self.mode == "max" - ): - - optimal_value = current_val - - if optimal_value != mode_opt: - costs[d] = optimal_value - - return costs - - def _valid_assignments(self): - """ - Populates a cache with all valid assignments for the factor - managed by the algorithm. - - :return: a list of all assignments returning a non-infinite value - """ - if self._valid_assignments_cache is None: - self._valid_assignments_cache = [] - all_vars = self._factor.dimensions[:] - for assignment in generate_assignment_as_dict(all_vars): - if self._factor(**assignment) != INFINITY: - self._valid_assignments_cache.append(assignment) - return self._valid_assignments_cache - - def _match_previous(self, v_name, costs): - """ - Check if a cost message for a variable v_name match the previous - message sent to that variable. - - :param v_name: variable name - :param costs: costs sent to this factor - :return: - """ - prev_costs, count = self._prev_messages[v_name] - if prev_costs is not None: - same = approx_match(costs, prev_costs) - return same, count - else: - return False, 0 - - -class VariableAlgo(VariableComputation): - def __init__( - self, - variable: Variable, - factor_names: List[str], - msg_sender=None, - comp_def: ComputationDef = None, - ): - """ - - :param variable: variable object - :param factor_names: a list containing the names of the factors that - depend on the variable managed by this algorithm - :param msg_sender: the object that will be used to send messages to - neighbors, it must have a post_msg(sender, target_name, name) method. - """ - super().__init__(variable, comp_def) - - assert comp_def.algo.algo == "maxsum" - assert (comp_def.algo.mode == "min") or (comp_def.algo.mode == "max") - - self.mode = comp_def.algo.mode - - # self._v = variable.clone() - # Add noise to the variable, on top of cost if needed - if variable.has_cost: - self._v = VariableNoisyCostFunc( - variable.name, - variable.domain, - cost_func=lambda x: variable.cost_for_val(x), - initial_value=variable.initial_value, + f"Sending init msg from variable {self.name} to factor {f} : {costs_f}" ) - else: - self._v = VariableNoisyCostFunc( - variable.name, - variable.domain, - cost_func=lambda x: 0, - initial_value=variable.initial_value, - noise_level=0.0001 - ) - - self.var_with_cost = True + self.post_msg(f, MaxSumMessage(costs_f)) - # the currently selected value, will evolve when the algorithm is - # still running. - # if self._v.initial_value: - # self.value_selection(self._v.initial_value, None) - # - # elif self.var_with_cost: - # current_cost, current_value =\ - # min(((self._v.cost_for_val(dv), dv) for dv in self._v.domain )) - # self.value_selection(current_value, current_cost) + def on_new_cycle(self, messages, cycle_id) -> Optional[List]: + pass - # The list of factors (names) this variables is linked with - self._factors = factor_names - # The object used to send messages to factor - self._msg_sender = msg_sender +def select_value(variable: Variable, costs: Dict, mode: str) -> Tuple[Any, float]: + """ + select the value for `variable` with the best cost / reward (depending on `mode`) - # costs : this dict is used to store, for each value of the domain, - # the associated cost sent by each factor this variable is involved - # with. { factor : {domain value : cost }} - self._costs = {} + Returns + ------- + a Tuple containing the selected value and the corresponding cost for + this computation. + """ - self._is_stable = False - self._prev_messages = defaultdict(lambda: (None, 0)) + # If we have received costs from all our factor, we can select a + # value from our domain. + d_costs = {d: variable.cost_for_val(d) for d in variable.domain} + for d in variable.domain: + for f_costs in costs.values(): + if d not in f_costs: + # As infinite costs are not included in messages, + # if there is not cost for this value it means the costs + # is infinite and we can stop adding other costs. + d_costs[d] = INFINITY if mode == "min" else -INFINITY + break + d_costs[d] += f_costs[d] - self.damping = comp_def.algo.params["damping"] - self.logger.info("Running maxsum with damping %s", self.damping) + from operator import itemgetter - @property - def domain(self): - # Return a copy of the domain to make sure nobody modifies it. - return self._v.domain[:] + if mode == "min": + optimal_d = min(d_costs.items(), key=itemgetter(1)) + else: + optimal_d = max(d_costs.items(), key=itemgetter(1)) - @property - def factors(self): - """ - :return: a list containing the names of the factors which depend on - the variable managed by this algorithm. - """ - return self._factors[:] - - def footprint(self): - return computation_memory(self.computation_def.node) - - def add_factor(self, factor_name): - """ - Register a factor to this variable. - - All factors depending on a variable MUST be registered so that the - variable algorithm can send cost messages to them. - - :param factor_name: the name of a factor which depends on this - variable. - """ - self._factors.append(factor_name) - - def on_start(self): - init_stats = self._init_msg() - return init_stats - - def _init_msg(self): - # Each variable with integrated costs sends his costs to the factors - # which depends on it. - # A variable with no integrated costs simply sends neutral costs - msg_count, msg_size = 0, 0 - - # select our value - if self.var_with_cost: - self.value_selection(*self._select_value()) - elif self._v.initial_value: - self.value_selection(self._v.initial_value, None) - else: - self.value_selection(choice(self._v.domain)) - self.logger.info("Initial value selected %s ", self.current_value) - - if self.var_with_cost: - costs_factors = {} - for f in self.factors: - costs_f = self._costs_for_factor(f) - costs_factors[f] = costs_f - - if self.logger.isEnabledFor(logging.DEBUG): - debug = "Var : init msgt {} \n".format(self.name) - for dest, msg in costs_factors.items(): - debug += " * {} -> {} : {}\n".format(self.name, dest, msg) - self.logger.debug(debug + "\n") - else: - self.logger.info( - "Sending init msg from %s (with cost) to %s", - self.name, - costs_factors, - ) - - # Sent the messages to the factors - for f, c in costs_factors.items(): - msg_size += self._send_costs(f, c) - msg_count += 1 - else: - c = {d: 0 for d in self._v.domain} - debug = "Var : init msg {} \n".format(self.name) + return optimal_d[0], optimal_d[1] - self.logger.info("Sending init msg from %s to %s", self.name, self.factors) - for f in self.factors: - msg_size += self._send_costs(f, c) - msg_count += 1 - debug += " * {} -> {} : {}\n".format(self.name, f, c) - self.logger.debug(debug + "\n") +def costs_for_factor( + variable: Variable, factor: FactorName, factors: List[Constraint], costs: Dict +) -> Dict[VarVal, Cost]: + """ + Produce the message that must be sent to factor f. - return { - "num_msg_out": msg_count, - "size_msg_out": msg_size, - "current_value": self.current_value, - } + The content if this message is a d -> cost table, where + * d is a value from the domain + * cost is the sum of the costs received from all other factors except f + for this value d for the domain. - @register("max_sum") - def _on_maxsum_msg(self, factor_name, msg, t): - """ - Handling cost message from a neighbor factor. - - :param factor_name: the name of that factor that sent us this message. - :param msg: a message whose content is a map { d -> cost } where: - * d is a value from the domain of this variable - * cost if the minimum cost of the factor when taking value d - """ - self._costs[factor_name] = msg.costs - - # select our value - self.value_selection(*self._select_value()) - - # Compute and send our own costs to all other factors. - # If our variable has his own costs, we must sent them back even - # to the factor which sent us this message, as integrated costs are - # similar to an unary factor and with an unary factor we would have - # sent these costs back to the original sender: - # factor -> variable -> unary_cost_factor -> variable -> factor - fs = self.factors - if not self.var_with_cost: - fs.remove(factor_name) - - msg_count, msg_size = self._compute_and_send_costs(fs) - - # return stats about this cycle: - return { - "num_msg_out": msg_count, - "size_msg_out": msg_size, - "current_value": self.current_value, - } - - def _compute_and_send_costs(self, factor_names): - """ - Computes and send costs messages for all factors in factor_names. - - :param factor_names: a list of names of factors to compute and send - messages to. - """ - debug = "" - stable = True - send, no_send = [], [] - msg_count, msg_size = 0, 0 - for f_name in factor_names: - costs_f = self._costs_for_factor(f_name) - same, same_count = self._match_previous(f_name, costs_f) - if not same or same_count < SAME_COUNT: - debug += " * SEND : {} -> {} : {}\n".format(self.name, f_name, costs_f) - msg_size += self._send_costs(f_name, costs_f) - send.append(f_name) - self._prev_messages[f_name] = costs_f, same_count + 1 - stable = False - msg_count += 1 - - else: - no_send.append(f_name) - debug += " * NO-SEND : {} -> {} : {}\n".format( - self.name, f_name, costs_f - ) - self._is_stable = stable - - # Display sent messages - if self.logger.isEnabledFor(logging.DEBUG): - self.logger.debug("Sending messages from %s :\n%s", self.name, debug) - else: - self.logger.info( - "Sending messages from %s to %s, no_send %s", self.name, send, no_send - ) + Parameters + ---------- + variable: Variable + the variable sending the message + factor: str + the name of the factor the message will be sent to + factors: list of Constraints + the constraints this variables depends on + costs: dict + the accumulated costs received by the variable from all factors - return msg_count, msg_size - - def _send_costs(self, factor_name, costs): - """ - Sends a cost messages and return the size of the message sent. - :param factor_name: - :param costs: - :return: - """ - msg = MaxSumMessage(costs) - self.post_msg(factor_name, msg) - return msg.size - - def _select_value(self) -> Tuple[Any, float]: - """ - - Returns - ------- - a Tuple containing the selected value and the corresponding cost for - this computation. - """ - - # If we have received costs from all our factor, we can select a - # value from our domain. - if self.var_with_cost: - # If our variable has it's own cost, take them into account - d_costs = {d: self._v.cost_for_val(d) for d in self._v.domain} - else: - d_costs = {d: 0 for d in self._v.domain} - for d in self._v.domain: - for f_costs in self._costs.values(): - if d not in f_costs: - # As infinite costs are not included in messages, - # if there is not cost for this value it means the costs - # is infinite and we can stop adding other costs. - d_costs[d] = INFINITY if self.mode == "min" else -INFINITY - break - d_costs[d] += f_costs[d] - - from operator import itemgetter - - if self.mode == "min": - optimal_d = min(d_costs.items(), key=itemgetter(1)) - else: - optimal_d = max(d_costs.items(), key=itemgetter(1)) - - return optimal_d[0], optimal_d[1] - - def _match_previous(self, f_name, costs): - """ - Check if a cost message for a factor f_name match the previous message - sent to that factor. - - :param f_name: factor name - :param costs: costs sent to this factor - :return: - """ - prev_costs, count = self._prev_messages[f_name] - if prev_costs is not None: - same = approx_match(costs, prev_costs) - return same, count - else: - return False, 0 - - def _costs_for_factor(self, factor_name): - """ - Produce the message that must be sent to factor f. - - The content if this message is a d -> cost table, where - * d is a value from the domain - * cost is the sum of the costs received from all other factors except f - for this value d for the domain. - - :param factor_name: the name of a factor for this variable - :return: the value -> cost table - """ - # If our variable has integrated costs, add them - if self.var_with_cost: - msg_costs = {d: self._v.cost_for_val(d) for d in self._v.domain} - else: - msg_costs = {d: 0 for d in self._v.domain} - - sum_cost = 0 - for d in self._v.domain: - for f in [f for f in self.factors if f != factor_name and f in self._costs]: - f_costs = self._costs[f] - if d not in f_costs: - msg_costs[d] = INFINITY - break - c = f_costs[d] - sum_cost += c - msg_costs[d] += c - - # Experimentally, when we do not normalize costs the algorithm takes - # more cycles to stabilize - # return {d: c for d, c in msg_costs.items() if c != INFINITY} - - # Normalize costs with the average cost, to avoid exploding costs - avg_cost = sum_cost / len(msg_costs) - normalized_msg_costs = { - d: c - avg_cost for d, c in msg_costs.items() if c != INFINITY - } - msg_costs = normalized_msg_costs - - prev_costs, count = self._prev_messages[factor_name] - damped_costs = {} - if prev_costs is not None: - for d, c in msg_costs.items(): - damped_costs[d] = self.damping * prev_costs[d] + (1 - self.damping) * c - self.logger.warning("damping : replace %s with %s", msg_costs, damped_costs) - msg_costs = damped_costs - - return msg_costs + Returns + ------- + Dict: + a dict containing a cost for each value in the domain of the variable + """ + # If our variable has integrated costs, add them + msg_costs = {d: variable.cost_for_val(d) for d in variable.domain} + + sum_cost = 0 + for d in variable.domain: + for f in [f for f in factors if f != factor and f in costs]: + f_costs = costs[f] + if d not in f_costs: + msg_costs[d] = INFINITY + break + c = f_costs[d] + sum_cost += c + msg_costs[d] += c + + # Experimentally, when we do not normalize costs the algorithm takes + # more cycles to stabilize + # return {d: c for d, c in msg_costs.items() if c != INFINITY} + + # Normalize costs with the average cost, to avoid exploding costs + avg_cost = sum_cost / len(msg_costs) + normalized_msg_costs = { + d: c - avg_cost for d, c in msg_costs.items() if c != INFINITY + } + msg_costs = normalized_msg_costs + + # FIXME: restore damping support + # prev_costs, count = self._prev_messages[factor] + # damped_costs = {} + # if prev_costs is not None: + # for d, c in msg_costs.items(): + # damped_costs[d] = self.damping * prev_costs[d] + (1 - self.damping) * c + # self.logger.warning("damping : replace %s with %s", msg_costs, damped_costs) + # msg_costs = damped_costs + + return msg_costs diff --git a/pydcop/algorithms/maxsum_dynamic.py b/pydcop/algorithms/maxsum_dynamic.py index 4153bc49..62f173de 100644 --- a/pydcop/algorithms/maxsum_dynamic.py +++ b/pydcop/algorithms/maxsum_dynamic.py @@ -32,11 +32,12 @@ import logging from pydcop.infrastructure.computations import Message, register -from pydcop.algorithms.maxsum import FactorAlgo, MaxSumMessage, VariableAlgo +from pydcop.algorithms.amaxsum import MaxSumFactorComputation, MaxSumVariableComputation +from pydcop.algorithms.maxsum import MaxSumMessage from pydcop.dcop.relations import NeutralRelation -class DynamicFunctionFactorComputation(FactorAlgo): +class DynamicFunctionFactorComputation(MaxSumFactorComputation): """ This is a specialisation of the computation performed for factor in the @@ -68,9 +69,8 @@ class DynamicFunctionFactorComputation(FactorAlgo): """ - def __init__(self, factor, name=None, msg_sender=None, comp_def=None): - super().__init__(factor, name=name, msg_sender=msg_sender, - comp_def=comp_def) + def __init__(self, comp_def=None): + super().__init__(comp_def=comp_def) def change_factor_function(self, fn): """ @@ -80,27 +80,29 @@ def change_factor_function(self, fn): """ # Make sure the new function has the same dimension as the # previous one. - if len(self._factor.dimensions) != len(fn.dimensions): - raise ValueError('Dimensions must be the same when changing ' - 'function in DynamicFunctionFactorComputation') - diff1 = [v for v in self._factor.dimensions if v not in fn.dimensions] - diff2 = [v for v in fn.dimensions if v not in self._factor.dimensions] + if len(self.factor.dimensions) != len(fn.dimensions): + raise ValueError( + "Dimensions must be the same when changing " + "function in DynamicFunctionFactorComputation" + ) + diff1 = [v for v in self.factor.dimensions if v not in fn.dimensions] + diff2 = [v for v in fn.dimensions if v not in self.factor.dimensions] if diff1 or diff2: - raise ValueError('Dimensions must be the same when changing ' - 'function in DynamicFunctionFactorComputation') + raise ValueError( + "Dimensions must be the same when changing " + "function in DynamicFunctionFactorComputation" + ) # Dimensions are ok, change factor computation object and emit cost # messages - self._factor = fn + self.factor = fn return self._init_msg() def __str__(self): - return 'Maxsum dynamic function Factor computation for ' + \ - self._factor.name + return "Maxsum dynamic function Factor computation for " + self.factor.name def __repr__(self): - return 'Maxsum dynamic function Factor computation for ' + \ - self._factor.name + return "Maxsum dynamic function Factor computation for " + self.factor.name class FactorWithReadOnlyVariableComputation(DynamicFunctionFactorComputation): @@ -118,8 +120,7 @@ class FactorWithReadOnlyVariableComputation(DynamicFunctionFactorComputation): """ - def __init__(self, relation, read_only_variables, name=None, - msg_sender=None): + def __init__(self, relation, read_only_variables, name=None, msg_sender=None): self._relation = relation self._read_only_variables = read_only_variables @@ -129,23 +130,22 @@ def __init__(self, relation, read_only_variables, name=None, writable_vars = relation.dimensions[:] for v in read_only_variables: if v not in relation.dimensions: - raise ValueError('Read only {} variable must be in relation ' - 'scope {}'.format(v.name, relation.dimensions)) + raise ValueError( + "Read only {} variable must be in relation " + "scope {}".format(v.name, relation.dimensions) + ) writable_vars.remove(v) # We start with a neutral relation until we have all values from # the read-only variables the condition depends on: - self._sliced_relation = NeutralRelation(writable_vars, - name=self._relation.name) - super().__init__(self._sliced_relation, name=name, - msg_sender=msg_sender) + self._sliced_relation = NeutralRelation(writable_vars, name=self._relation.name) + super().__init__(self._sliced_relation, name=name, msg_sender=msg_sender) def on_start(self): # when starting, subscribe to all sensor variable used in the # condition of the rule for v in self._read_only_variables: - self._msg_sender.post_msg(self.name, v.name, Message('SUBSCRIBE', - None)) + self._msg_sender.post_msg(self.name, v.name, Message("SUBSCRIBE", None)) super().on_start() @register("VARIABLE_VALUE") @@ -154,8 +154,8 @@ def _on_new_var_value_msg(self, var_name, msg, t): value = msg.content if var_name not in [v.name for v in self._read_only_variables]: - self.logger.error('Unexpected value from %s - %s ', var_name, value) - self.logger.debug('Received new value for %s - %s ', var_name, value) + self.logger.error("Unexpected value from %s - %s ", var_name, value) + self.logger.debug("Received new value for %s - %s ", var_name, value) self._read_only_values[var_name] = value @@ -164,25 +164,23 @@ def _on_new_var_value_msg(self, var_name, msg, t): new_sliced = self._relation.slice(self._read_only_values) if hash(new_sliced) != hash(self._sliced_relation): - self.logger.info('Changing factor function %s ', self.name) + self.logger.info("Changing factor function %s ", self.name) msg_count, msg_size = self.change_factor_function(new_sliced) self._sliced_relation = new_sliced self._active = True else: - self.logger.info('Equivalent relation, no change %s ', - self.name) + self.logger.info("Equivalent relation, no change %s ", self.name) else: - self.logger.info('Still waiting for values to evaluate the ' - 'rule ', self._read_only_values) + self.logger.info( + "Still waiting for values to evaluate the " "rule ", + self._read_only_values, + ) - return { - 'num_msg_out': msg_count, - 'size_msg_out': msg_size, - } + return {"num_msg_out": msg_count, "size_msg_out": msg_size} -class DynamicFactorComputation(FactorAlgo): +class DynamicFactorComputation(MaxSumFactorComputation): """ Factor Computation for dynamic Max-Sum. @@ -211,16 +209,16 @@ def __init__(self, relation, name=None, msg_sender=None): # Check if the factor depends on external variables self._external_variables = {} for v in relation.dimensions: - if hasattr(v, 'value'): + if hasattr(v, "value"): self._external_variables[v.name] = v if self._external_variables: - external_values = {v.name: v.value for v - in self._external_variables.values()} + external_values = { + v.name: v.value for v in self._external_variables.values() + } self._current_relation = self._relation.slice(external_values) - super().__init__(self._current_relation, name=name, - msg_sender=msg_sender) + super().__init__(self._current_relation, name=name, msg_sender=msg_sender) def on_start(self): # subscribe to external variable @@ -231,20 +229,20 @@ def on_start(self): def change_factor_function(self, fn): msg_count, msg_size = 0, 0 - var_removed = [v for v in self._factor.dimensions if v not in - fn.dimensions] - var_added = [v for v in fn.dimensions if v not in - self._factor.dimensions] + var_removed = [v for v in self._factor.dimensions if v not in fn.dimensions] + var_added = [v for v in fn.dimensions if v not in self._factor.dimensions] if not var_removed and not var_added: # Dimensions have not changed, simply change factor object and emit # cost messages - self.logger.info('Function change with no change in ' - 'factor\'s dimension') + self.logger.info("Function change with no change in " "factor's dimension") self._factor = fn msg_count, msg_size = self._init_msg() else: - self.logger.info('Function change with new variables %s and ' - 'removed variables %s', var_added, var_removed) + self.logger.info( + "Function change with new variables %s and " "removed variables %s", + var_added, + var_removed, + ) self._factor = fn for v in var_removed: if v.name in self._costs: @@ -271,16 +269,15 @@ def _on_new_var_value_msg(self, var_name, msg, t): msg_count, msg_size = 0, 0 value = msg.content if var_name not in self._external_variables: - self.logger.error('Unexpected value from %s - %s ', var_name, value) - self.logger.debug('Received new value for %s - %s ', var_name, value) + self.logger.error("Unexpected value from %s - %s ", var_name, value) + self.logger.debug("Received new value for %s - %s ", var_name, value) self._external_variables[var_name].value = value - external_values = {v.name: v.value for v - in self._external_variables.values()} + external_values = {v.name: v.value for v in self._external_variables.values()} new_sliced = self._relation.slice(external_values) if hash(new_sliced) != hash(self._current_relation): - self.logger.info('Changing factor function %s ', self.name) + self.logger.info("Changing factor function %s ", self.name) msg_count, msg_size = self.change_factor_function(new_sliced) self._current_relation = new_sliced self._active = True @@ -298,16 +295,16 @@ def _send_add_var_msg(self, var_added): msg_debug = {} for v in var_added: costs_v = self._costs_for_var(v) - msg = MaxSumMessage('ADD', {'costs': costs_v}) + msg = MaxSumMessage("ADD", {"costs": costs_v}) self._msg_sender.post_msg(self.name, v.name, msg) msg_debug[v.name] = costs_v msg_size += msg.size msg_count += 1 - debug = 'ADD VAR MSG {} \n'.format(self.name) + debug = "ADD VAR MSG {} \n".format(self.name) for dest, msg in msg_debug.items(): - debug += ' * {} -> {} : {}\n'.format(self.name, dest, msg) - self.logger.info(debug + '\n') + debug += " * {} -> {} : {}\n".format(self.name, dest, msg) + self.logger.info(debug + "\n") return msg_count, msg_size @@ -323,33 +320,31 @@ def _send_remove_var_msg(self, var_removed): msg_count, msg_size = 0, 0 for v in var_removed: - msg = MaxSumMessage('REMOVE', {}) + msg = MaxSumMessage("REMOVE", {}) self._msg_sender.post_msg(self.name, v.name, msg) msg_size += msg.size msg_count += 1 - debug = 'REMOVE VAR INIT MSG {} \n'.format(self.name) + debug = "REMOVE VAR INIT MSG {} \n".format(self.name) for dest in var_removed: - debug += ' * {} -> {} \n'.format(self.name, dest) - self.logger.info(debug + '\n') + debug += " * {} -> {} \n".format(self.name, dest) + self.logger.info(debug + "\n") return msg_count, msg_size def subscribe(self, variable): - self._msg_sender.post_msg(self.name, variable.name, - Message('SUBSCRIBE', None)) + self._msg_sender.post_msg(self.name, variable.name, Message("SUBSCRIBE", None)) def unsubscribe(self, variable): - self._msg_sender.post_msg(self.name, variable.name, - Message('SUBSCRIBE', None)) + self._msg_sender.post_msg(self.name, variable.name, Message("SUBSCRIBE", None)) def __str__(self): - return 'Maxsum dynamic Factor computation for ' + self._factor.name + return "Maxsum dynamic Factor computation for " + self._factor.name def __repr__(self): - return 'Maxsum dynamic Factor computation for ' + self._factor.name + return "Maxsum dynamic Factor computation for " + self._factor.name -class DynamicFactorVariableComputation(VariableAlgo): +class DynamicFactorVariableComputation(MaxSumVariableComputation): """ Variable computation for dynamic Max-Sum. @@ -359,23 +354,25 @@ class DynamicFactorVariableComputation(VariableAlgo): """ + def __init__(self, variable, factor_names, msg_sender=None): - super().__init__(variable, factor_names=factor_names, - msg_sender=msg_sender) + super().__init__(variable, factor_names=factor_names, msg_sender=msg_sender) @register("REMOVE") def _on_remove_msg(self, factor_name, msg, t): - self.logger.debug("Received REMOVE msg from %s on var %s", - factor_name, self.name) + self.logger.debug( + "Received REMOVE msg from %s on var %s", factor_name, self.name + ) # The removed factor should always be in the list of our factors but we # might have not received any costs from him yet. try: self._factors.remove(factor_name) except ValueError: - msg = 'CANNOT remove factor {} from variable {}, not in {}'\ - .format(factor_name, self.name, self._factors) + msg = "CANNOT remove factor {} from variable {}, not in {}".format( + factor_name, self.name, self._factors + ) self.logger.error(msg) raise ValueError(msg) @@ -386,16 +383,18 @@ def _on_remove_msg(self, factor_name, msg, t): # Select a new value. self._current_value, self._current_cost = self._select_value() - self.logger.debug('On Remove msg, Variable %s select value %s with ' - 'cost %s', self.name, self._current_value, - self._current_cost) + self.logger.debug( + "On Remove msg, Variable %s select value %s with " "cost %s", + self.name, + self._current_value, + self._current_cost, + ) # Do not send init cost, we may still have costs from other factors ! msg_count, msg_size = self._compute_and_send_costs(self.factors) @register("ADD") def _on_add_msg(self, factor_name, msg, t): - self.logger.debug("Received ADD msg from %s : %s ", factor_name, - msg.content) + self.logger.debug("Received ADD msg from %s : %s ", factor_name, msg.content) self._factors.append(factor_name) return self._on_cost_msg(factor_name, msg) diff --git a/tests/api/test_api_distribute_adhoc.py b/tests/api/test_api_distribute_adhoc.py index 953a9eda..390e9767 100644 --- a/tests/api/test_api_distribute_adhoc.py +++ b/tests/api/test_api_distribute_adhoc.py @@ -38,7 +38,7 @@ def test_api_distribute_maxsum_adhoc(): from pydcop.computations_graph import factor_graph from pydcop.distribution import adhoc - from pydcop.algorithms import maxsum + from pydcop.algorithms import amaxsum dcop = dcop_graphcoloring_3() agents = create_agents('a', [1, 2, 3], capacity=100) @@ -46,8 +46,8 @@ def test_api_distribute_maxsum_adhoc(): cg = factor_graph.build_computation_graph(dcop) dist = adhoc.distribute(cg, dcop.agents.values(), - computation_memory=maxsum.computation_memory, - communication_load=maxsum.communication_load) + computation_memory=amaxsum.computation_memory, + communication_load=amaxsum.communication_load) assert dist.is_hosted(['v1', 'v2', 'v3']) diff --git a/tests/api/test_api_distribute_ilp_compref.py b/tests/api/test_api_distribute_ilp_compref.py index ec524525..d9196974 100644 --- a/tests/api/test_api_distribute_ilp_compref.py +++ b/tests/api/test_api_distribute_ilp_compref.py @@ -38,7 +38,7 @@ def test_api_distribute_maxsum_ilp_compref(): from pydcop.computations_graph import factor_graph from pydcop.distribution import ilp_compref - from pydcop.algorithms import maxsum + from pydcop.algorithms import amaxsum dcop = dcop_graphcoloring_3() agents = create_agents('a', range(1, 4), capacity=100) @@ -46,8 +46,8 @@ def test_api_distribute_maxsum_ilp_compref(): cg = factor_graph.build_computation_graph(dcop) dist = ilp_compref.distribute(cg, dcop.agents.values(), - computation_memory=maxsum.computation_memory, - communication_load=maxsum.communication_load) + computation_memory=amaxsum.computation_memory, + communication_load=amaxsum.communication_load) assert dist.is_hosted(['v1', 'v2', 'v3']) diff --git a/tests/api/test_api_distribute_ilp_fgdp.py b/tests/api/test_api_distribute_ilp_fgdp.py index fc9d0345..c9fcffb1 100644 --- a/tests/api/test_api_distribute_ilp_fgdp.py +++ b/tests/api/test_api_distribute_ilp_fgdp.py @@ -55,7 +55,7 @@ def create_dcop(): def test_api_distribute_maxsum_ilp_fgdp(): from pydcop.computations_graph import factor_graph from pydcop.distribution import ilp_fgdp - from pydcop.algorithms import maxsum + from pydcop.algorithms import amaxsum dcop = dcop_graphcoloring_3() agents = create_agents('a', range(1, 4), capacity=100) @@ -63,8 +63,8 @@ def test_api_distribute_maxsum_ilp_fgdp(): cg = factor_graph.build_computation_graph(dcop) dist = ilp_fgdp.distribute(cg, dcop.agents.values(), - computation_memory=maxsum.computation_memory, - communication_load=maxsum.communication_load) + computation_memory=amaxsum.computation_memory, + communication_load=amaxsum.communication_load) assert dist.is_hosted(['v1', 'v2', 'v3']) diff --git a/tests/api/test_api_distribute_oneagent.py b/tests/api/test_api_distribute_oneagent.py index 3bd14066..a78698c8 100644 --- a/tests/api/test_api_distribute_oneagent.py +++ b/tests/api/test_api_distribute_oneagent.py @@ -38,7 +38,7 @@ def test_api_distribute_maxsum_oneagent(): from pydcop.computations_graph import factor_graph from pydcop.distribution import oneagent - from pydcop.algorithms import maxsum + from pydcop.algorithms import amaxsum dcop = dcop_graphcoloring_3() # 5 constraints and 3 variables : we need 8 agents @@ -47,8 +47,8 @@ def test_api_distribute_maxsum_oneagent(): cg = factor_graph.build_computation_graph(dcop) dist = oneagent.distribute(cg, dcop.agents.values(), - computation_memory=maxsum.computation_memory, - communication_load=maxsum.communication_load) + computation_memory=amaxsum.computation_memory, + communication_load=amaxsum.communication_load) assert dist.is_hosted(['v1', 'v2', 'v3', 'cost_1', 'cost_2', 'cost_3', diff --git a/tests/integration/maxsum_equality.py b/tests/integration/maxsum_equality.py index 57eab136..a02a0188 100644 --- a/tests/integration/maxsum_equality.py +++ b/tests/integration/maxsum_equality.py @@ -32,7 +32,7 @@ import logging import sys -from pydcop.algorithms.maxsum import VariableAlgo, FactorAlgo +from pydcop.algorithms.amaxsum import VariableAlgo, FactorAlgo from pydcop.dcop.objects import VariableWithCostFunc, VariableNoisyCostFunc from pydcop.dcop.relations import AsNAryFunctionRelation from pydcop.infrastructure.agents import Agent diff --git a/tests/integration/maxsum_graphcoloring.py b/tests/integration/maxsum_graphcoloring.py index d0d777d7..37757d15 100644 --- a/tests/integration/maxsum_graphcoloring.py +++ b/tests/integration/maxsum_graphcoloring.py @@ -32,7 +32,7 @@ import logging import sys -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable from pydcop.infrastructure.agents import Agent @@ -83,10 +83,10 @@ def distribute_agents(var_facts): # build the list of factors that depend on this variable f_for_variable = [f.name for f in factors if v.name in [i.name for i in f.dimensions]] - v_a = maxsum.VariableAlgo(v, f_for_variable) + v_a = amaxsum.VariableAlgo(v, f_for_variable) # Algorithm for the factor - f_a = maxsum.FactorAlgo(f) + f_a = amaxsum.FactorAlgo(f) # Agent hosting the factor and variable a = Agent('a_'+str(i), comm) diff --git a/tests/integration/maxsum_graphcoloring_separateagents.py b/tests/integration/maxsum_graphcoloring_separateagents.py index d9cb359f..5f1495b3 100644 --- a/tests/integration/maxsum_graphcoloring_separateagents.py +++ b/tests/integration/maxsum_graphcoloring_separateagents.py @@ -33,7 +33,7 @@ import pydcop.infrastructure.communication from pydcop import infrastructure -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable @@ -87,12 +87,12 @@ def distribue_agent_for_all(variables, factors): [i.name for i in f.dimensions]] a = infrastructure.Agent('Var_' + v.name, comm) - a.add_computation(maxsum.VariableAlgo(v, f_for_variable)) + a.add_computation(amaxsum.VariableAlgo(v, f_for_variable)) node_agents.append(a) for f in factors: a = infrastructure.Agent('Fact_' + f.name, comm) - a.add_computation(maxsum.FactorAlgo(f)) + a.add_computation(amaxsum.FactorAlgo(f)) node_agents.append(a) return node_agents diff --git a/tests/integration/maxsum_graphcoloring_with_costs.py b/tests/integration/maxsum_graphcoloring_with_costs.py index f5e6ada0..13e27908 100644 --- a/tests/integration/maxsum_graphcoloring_with_costs.py +++ b/tests/integration/maxsum_graphcoloring_with_costs.py @@ -33,7 +33,7 @@ import sys from pydcop import infrastructure -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable from pydcop.dcop.relations import UnaryFunctionRelation @@ -71,12 +71,12 @@ def distribue_agent_for_all(variables, factors): [i.name for i in f.dimensions]] a = infrastructure.Agent('Var_' + v.name, comm) - a.add_computation(maxsum.VariableAlgo(v, f_for_variable)) + a.add_computation(amaxsum.VariableAlgo(v, f_for_variable)) node_agents.append(a) for f in factors: a = infrastructure.Agent('Fact_' + f.name, comm) - a.add_computation(maxsum.FactorAlgo(f)) + a.add_computation(amaxsum.FactorAlgo(f)) node_agents.append(a) return node_agents diff --git a/tests/integration/maxsum_smartlights_multiplecomputationagent.py b/tests/integration/maxsum_smartlights_multiplecomputationagent.py index 3729287d..184f3b3a 100644 --- a/tests/integration/maxsum_smartlights_multiplecomputationagent.py +++ b/tests/integration/maxsum_smartlights_multiplecomputationagent.py @@ -35,7 +35,7 @@ import pydcop.infrastructure.communication from pydcop import infrastructure -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable @@ -107,24 +107,24 @@ def rule_rel(l3, y1): # Create computation for factors and variables # Light 1 - algo_l1 = maxsum.VariableAlgo(l1, [cost_l1.name, scene_rel.name]) - algo_cost_l1 = maxsum.FactorAlgo(cost_l1) + algo_l1 = amaxsum.VariableAlgo(l1, [cost_l1.name, scene_rel.name]) + algo_cost_l1 = amaxsum.FactorAlgo(cost_l1) # Light 2 - algo_l2 = maxsum.VariableAlgo(l2, [cost_l2.name, scene_rel.name]) - algo_cost_l2 = maxsum.FactorAlgo(cost_l2) + algo_l2 = amaxsum.VariableAlgo(l2, [cost_l2.name, scene_rel.name]) + algo_cost_l2 = amaxsum.FactorAlgo(cost_l2) # Light 3 - algo_l3 = maxsum.VariableAlgo(l3, [cost_l3.name, scene_rel.name, - rule_rel.name]) - algo_cost_l3 = maxsum.FactorAlgo(cost_l3) + algo_l3 = amaxsum.VariableAlgo(l3, [cost_l3.name, scene_rel.name, + rule_rel.name]) + algo_cost_l3 = amaxsum.FactorAlgo(cost_l3) # Scene - algo_y1 = maxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) - algo_scene = maxsum.FactorAlgo(scene_rel) + algo_y1 = amaxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) + algo_scene = amaxsum.FactorAlgo(scene_rel) # Rule - algo_rule = maxsum.FactorAlgo(rule_rel) + algo_rule = amaxsum.FactorAlgo(rule_rel) # Distribution of the computation on the three physical light-bulb nodes. # We have 9 computations to distribute on 3 agents, mapping the 3 light diff --git a/tests/integration/maxsum_smartlights_multiplecomputationagent_variablecost.py b/tests/integration/maxsum_smartlights_multiplecomputationagent_variablecost.py index 7cdd1d0f..4f2aea59 100644 --- a/tests/integration/maxsum_smartlights_multiplecomputationagent_variablecost.py +++ b/tests/integration/maxsum_smartlights_multiplecomputationagent_variablecost.py @@ -34,7 +34,7 @@ import pydcop.infrastructure.communication from pydcop import infrastructure -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable, VariableWithCostFunc @@ -95,21 +95,21 @@ def rule_rel(l3, y1): # Create computation for factors and variables # Light 1 - algo_l1 = maxsum.VariableAlgo(l1, [scene_rel.name]) + algo_l1 = amaxsum.VariableAlgo(l1, [scene_rel.name]) # Light 2 - algo_l2 = maxsum.VariableAlgo(l2, [scene_rel.name]) + algo_l2 = amaxsum.VariableAlgo(l2, [scene_rel.name]) # Light 3 - algo_l3 = maxsum.VariableAlgo(l3, [scene_rel.name, - rule_rel.name]) + algo_l3 = amaxsum.VariableAlgo(l3, [scene_rel.name, + rule_rel.name]) # Scene - algo_y1 = maxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) - algo_scene = maxsum.FactorAlgo(scene_rel) + algo_y1 = amaxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) + algo_scene = amaxsum.FactorAlgo(scene_rel) # Rule - algo_rule = maxsum.FactorAlgo(rule_rel) + algo_rule = amaxsum.FactorAlgo(rule_rel) # Distribution of the computation on the three physical light-bulb nodes. # We have 9 computations to distribute on 3 agents, mapping the 3 light diff --git a/tests/integration/maxsum_smartlights_simple.py b/tests/integration/maxsum_smartlights_simple.py index f8279e27..ed5d2bac 100644 --- a/tests/integration/maxsum_smartlights_simple.py +++ b/tests/integration/maxsum_smartlights_simple.py @@ -34,7 +34,7 @@ import pydcop.infrastructure.communication from pydcop import infrastructure -from pydcop.algorithms import maxsum +from pydcop.algorithms import amaxsum from pydcop.dcop import relations from pydcop.dcop.objects import Variable @@ -109,24 +109,24 @@ def rule_rel(l3, y1): # Create computation for factors and variables # Light 1 - algo_l1 = maxsum.VariableAlgo(l1, [cost_l1.name, scene_rel.name]) - algo_cost_l1 = maxsum.FactorAlgo(cost_l1) + algo_l1 = amaxsum.VariableAlgo(l1, [cost_l1.name, scene_rel.name]) + algo_cost_l1 = amaxsum.FactorAlgo(cost_l1) # Light 2 - algo_l2 = maxsum.VariableAlgo(l2, [cost_l2.name, scene_rel.name]) - algo_cost_l2 = maxsum.FactorAlgo(cost_l2) + algo_l2 = amaxsum.VariableAlgo(l2, [cost_l2.name, scene_rel.name]) + algo_cost_l2 = amaxsum.FactorAlgo(cost_l2) # Light 3 - algo_l3 = maxsum.VariableAlgo(l3, [cost_l3.name, scene_rel.name, - rule_rel.name]) - algo_cost_l3 = maxsum.FactorAlgo(cost_l3) + algo_l3 = amaxsum.VariableAlgo(l3, [cost_l3.name, scene_rel.name, + rule_rel.name]) + algo_cost_l3 = amaxsum.FactorAlgo(cost_l3) # Scene - algo_y1 = maxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) - algo_scene = maxsum.FactorAlgo(scene_rel) + algo_y1 = amaxsum.VariableAlgo(y1, [rule_rel.name, scene_rel.name]) + algo_scene = amaxsum.FactorAlgo(scene_rel) #Rule - algo_rule = maxsum.FactorAlgo(rule_rel) + algo_rule = amaxsum.FactorAlgo(rule_rel) # Distribution of the computation on the three physical light-bulb nodes. # We have 9 computations to distribute on 3 agents, mapping the 3 light diff --git a/tests/unit/test_algorithms_amaxsum.py b/tests/unit/test_algorithms_amaxsum.py new file mode 100644 index 00000000..bfe59262 --- /dev/null +++ b/tests/unit/test_algorithms_amaxsum.py @@ -0,0 +1,283 @@ +# BSD-3-Clause License +# +# Copyright 2017 Orange +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +import json +import unittest +from unittest.mock import MagicMock + +from pydcop.algorithms.amaxsum import ( + approx_match, + MaxSumFactorComputation, + computation_memory, + VARIABLE_UNIT_SIZE, + FACTOR_UNIT_SIZE, + communication_load, + HEADER_SIZE, + UNIT_SIZE, +) +from pydcop.algorithms.maxsum import MaxSumMessage +from pydcop.computations_graph.factor_graph import ( + VariableComputationNode, + FactorComputationNode, +) +from pydcop.dcop.objects import Variable, VariableDomain +from pydcop.dcop.relations import AsNAryFunctionRelation, relation_from_str +from pydcop.utils.simple_repr import simple_repr, from_repr + + +class MaxSumFactorAlgoTest(unittest.TestCase): + def test_init(self): + domain = list(range(10)) + x1 = Variable("x1", domain) + x2 = Variable("x2", domain) + + @AsNAryFunctionRelation(x1, x2) + def phi(x1_, x2_): + return x1_ + x2_ + + comp_def = MagicMock() + comp_def.algo.algo = "amaxsum" + comp_def.algo.mode = "min" + comp_def.node.factor = phi + + f = MaxSumFactorComputation(comp_def=comp_def) + + self.assertEqual(f.name, "phi") + self.assertEqual(len(f.variables), 2) + + def test_cost_for_1var(self): + domain = list(range(10)) + x1 = Variable("x1", domain) + + @AsNAryFunctionRelation(x1) + def cost(x1_): + return x1_ * 2 + + comp_def = MagicMock() + comp_def.algo.algo = "amaxsum" + comp_def.algo.mode = "min" + comp_def.node.factor = cost + f = MaxSumFactorComputation(comp_def=comp_def) + + costs = f._costs_for_var(x1) + + # in the max-sum algorithm, for an unary factor the costs is simply + # the result of the factor function + self.assertEqual(costs[0], 0) + self.assertEqual(costs[5], 10) + + def test_cost_for_1var_2(self): + + # TODO test for min and max + + domain = list(range(10)) + x1 = Variable("x1", domain) + + @AsNAryFunctionRelation(x1) + def cost(x1): + return x1 * 2 + + comp_def = MagicMock() + comp_def.algo.algo = "amaxsum" + comp_def.algo.mode = "min" + comp_def.node.factor = cost + f = MaxSumFactorComputation(comp_def=comp_def) + + costs = f._costs_for_var(x1) + + # in the maxsum algorithm, for an unary factor the costs is simply + # the result of the factor function + self.assertEqual(costs[0], 0) + self.assertEqual(costs[5], 10) + + def test_cost_for_2var(self): + domain = list(range(10)) + x1 = Variable("x1", domain) + domain = list(range(5)) + x2 = Variable("x2", domain) + + @AsNAryFunctionRelation(x1, x2) + def cost(x1_, x2_): + return abs((x1_ - x2_) / 2) + + comp_def = MagicMock() + comp_def.algo.algo = "amaxsum" + comp_def.algo.mode = "min" + comp_def.node.factor = cost + f = MaxSumFactorComputation(comp_def=comp_def) + + costs = f._costs_for_var(x1) + + # in this test, the factor did not receive any costs messages from + # other variables, this means it only uses the factor function when + # calculating costs. + + # x1 = 5, best val for x2 is 4, with cost = 0.5 + self.assertEqual(costs[5], (5 - 4) / 2) + self.assertEqual(costs[9], (9 - 4) / 2) + self.assertEqual(costs[2], 0) + + +class VarDummy: + def __init__(self, name): + self.name = name + self.current_value = None + self.current_cost = None + + +class ApproxMatchTests(unittest.TestCase): + def test_match_exact(self): + c1 = {0: 0, 1: 0, 2: 0} + c2 = {0: 0, 1: 0, 2: 0} + + self.assertTrue(approx_match(c1, c2)) + + def test_nomatch(self): + c1 = {0: 0, 1: 0, 2: 0} + c2 = {0: 0, 1: 1, 2: 0} + + self.assertFalse(approx_match(c1, c2)) + + def test_nomatch2(self): + c1 = { + 0: -46.0, + 1: -46.5, + 2: -55.5, + 3: -56.0, + 4: -56.5, + 5: -65.5, + 6: -66.0, + 7: -66.5, + 8: -67.0, + 9: -67.5, + } + c2 = { + 0: 0.0, + 1: 0.0, + 2: 0.0, + 3: 0.0, + 4: 0.0, + 5: 0.0, + 6: 0.0, + 7: 0.0, + 8: 0.0, + 9: 0.0, + } + + self.assertFalse(approx_match(c1, c2)) + + +class ComputationMemory(unittest.TestCase): + def test_variable_memory_no_neighbor(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 5]) + v1 = Variable("v1", d1) + + vn1 = VariableComputationNode(v1, []) + + # If a variable has no neighbors, it does not need to keep any cost + # and thus requires no memory + self.assertEqual(computation_memory(vn1), 0) + + def test_variable_memory_one_neighbor(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 5]) + v1 = Variable("v1", d1) + f1 = relation_from_str("f1", "v1 * 0.5", [v1]) + + cv1 = VariableComputationNode(v1, ["f1"]) + cf1 = FactorComputationNode(f1) + + self.assertEqual(computation_memory(cv1), VARIABLE_UNIT_SIZE * 4) + + def test_factor_memory_one_neighbor(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 5]) + v1 = Variable("v1", d1) + f1 = relation_from_str("f1", "v1 * 0.5", [v1]) + + cv1 = VariableComputationNode(v1, ["f1"]) + cf1 = FactorComputationNode(f1) + + self.assertEqual(computation_memory(cf1), FACTOR_UNIT_SIZE * 4) + + def test_factor_memory_two_neighbor(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 4, 5]) + v1 = Variable("v1", d1) + d2 = VariableDomain("d1", "", [1, 2, 3]) + v2 = Variable("v2", d2) + f1 = relation_from_str("f1", "v1 * 0.5 + v2", [v1, v2]) + + cv1 = VariableComputationNode(v1, ["f1"]) + cv2 = VariableComputationNode(v2, ["f1"]) + cf1 = FactorComputationNode(f1) + + self.assertEqual(computation_memory(cf1), FACTOR_UNIT_SIZE * (5 + 3)) + + def test_variable_memory_two_neighbor(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 5]) + v1 = Variable("v1", d1) + cv1 = VariableComputationNode(v1, ["f1", "f2"]) + + self.assertEqual(computation_memory(cv1), VARIABLE_UNIT_SIZE * 4 * 2) + + +class CommunicationCost(unittest.TestCase): + def test_variable_one_neighbors(self): + d1 = VariableDomain("d1", "", [1, 2, 3, 5]) + v1 = Variable("v1", d1) + f1 = relation_from_str("f1", "v1 * 0.5", [v1]) + + cv1 = VariableComputationNode(v1, ["f1"]) + cf1 = FactorComputationNode(f1) + + # If a variable has no neighbors, it does not need to keep any cost + # and thus requires no memory + self.assertEqual( + communication_load(cv1, "f1"), HEADER_SIZE + UNIT_SIZE * len(v1.domain) + ) + self.assertEqual( + communication_load(cf1, "v1"), HEADER_SIZE + UNIT_SIZE * len(v1.domain) + ) + + +class TestsMaxsumMessage(unittest.TestCase): + def test_serialize_repr(self): + # Make sure that even after serialization / deserialization, + # from_repr and simple_repr still produce equal messages. + # This has been causing problems with maxsum costs dict where key + # were integers + + msg = MaxSumMessage({1: 10, 2: 20}) + r = simple_repr(msg) + msg_json = json.dumps(r) + + r2 = json.loads(msg_json) + msg2 = from_repr(r2) + + self.assertEqual(msg, msg2) diff --git a/tests/unit/test_algorithms_dynamic_maxsum.py b/tests/unit/test_algorithms_dynamic_maxsum.py index 986fc2ad..4248401e 100644 --- a/tests/unit/test_algorithms_dynamic_maxsum.py +++ b/tests/unit/test_algorithms_dynamic_maxsum.py @@ -51,9 +51,10 @@ def phi(x1_, x2_): return x1_ + x2_ comp_def = MagicMock() - comp_def.algo.algo = "maxsum" + comp_def.algo.algo = "amaxsum" comp_def.algo.mode = "min" - f = DynamicFunctionFactorComputation(phi, comp_def=comp_def) + comp_def.node.factor = phi + f = DynamicFunctionFactorComputation(comp_def=comp_def) self.assertEqual(f.name, "phi") @@ -71,12 +72,11 @@ def phi2(x1_, x2_): return x1_ - x2_ comp_def = MagicMock() - comp_def.algo.algo = "maxsum" + comp_def.algo.algo = "amaxsum" comp_def.algo.mode = "min" - f = DynamicFunctionFactorComputation( - phi, msg_sender=MagicMock(), comp_def=comp_def - ) - + comp_def.node.factor = phi + f = DynamicFunctionFactorComputation(comp_def=comp_def) + f.message_sender = MagicMock() f.change_factor_function(phi2) self.assertEqual(f.name, "phi") @@ -95,11 +95,11 @@ def phi2(x2_, x1_): return x1_ - x2_ comp_def = MagicMock() - comp_def.algo.algo = "maxsum" + comp_def.algo.algo = "amaxsum" comp_def.algo.mode = "min" - f = DynamicFunctionFactorComputation( - phi, msg_sender=MagicMock(), comp_def=comp_def - ) + comp_def.node.factor = phi + f = DynamicFunctionFactorComputation(comp_def=comp_def) + f.message_sender = MagicMock() f.change_factor_function(phi2) self.assertEqual(f.name, "phi") @@ -120,9 +120,11 @@ def phi2(x1_, x2_, x3_): return x1_ - x2_ + x3_ comp_def = MagicMock() - comp_def.algo.algo = "maxsum" + comp_def.algo.algo = "amaxsum" comp_def.algo.mode = "min" - f = DynamicFunctionFactorComputation(phi, comp_def=comp_def) + comp_def.node.factor = phi + + f = DynamicFunctionFactorComputation(comp_def=comp_def) # Monkey patch post_msg method with dummy mock to avoid error: f.post_msg = types.MethodType(lambda w, x, y, z: None, f) @@ -143,9 +145,11 @@ def phi2(x1_, x3_): return x1_ + x3_ comp_def = MagicMock() - comp_def.algo.algo = "maxsum" + comp_def.algo.algo = "amaxsum" comp_def.algo.mode = "min" - f = DynamicFunctionFactorComputation(phi, comp_def=comp_def) + comp_def.node.factor = phi + + f = DynamicFunctionFactorComputation(comp_def=comp_def) # Monkey patch post_msg method with dummy mock to avoid error: f.post_msg = types.MethodType(lambda w, x, y, z: None, f) diff --git a/tests/unit/test_algorithms_maxsum.py b/tests/unit/test_algorithms_maxsum.py index a2abfe21..bd328798 100644 --- a/tests/unit/test_algorithms_maxsum.py +++ b/tests/unit/test_algorithms_maxsum.py @@ -28,251 +28,100 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. - -import json -import unittest -from unittest.mock import MagicMock - +from pydcop.algorithms import ComputationDef, AlgorithmDef from pydcop.algorithms.maxsum import ( - approx_match, - FactorAlgo, - computation_memory, - VARIABLE_UNIT_SIZE, - FACTOR_UNIT_SIZE, - communication_load, - HEADER_SIZE, - UNIT_SIZE, - MaxSumMessage, + MaxSumVariableComputation, + MaxSumFactorComputation, + build_computation, + factor_costs_for_var, + select_value, ) -from pydcop.computations_graph.factor_graph import ( - VariableComputationNode, - FactorComputationNode, +from pydcop.computations_graph.factor_graph import build_computation_graph +from pydcop.dcop.objects import ( + Variable, + Domain, + VariableWithCostDict, + VariableWithCostFunc, ) -from pydcop.dcop.objects import Variable, VariableDomain -from pydcop.dcop.relations import AsNAryFunctionRelation, relation_from_str -from pydcop.utils.simple_repr import simple_repr, from_repr - - -class MaxSumFactorAlgoTest(unittest.TestCase): - def test_init(self): - domain = list(range(10)) - x1 = Variable("x1", domain) - x2 = Variable("x2", domain) - - @AsNAryFunctionRelation(x1, x2) - def phi(x1_, x2_): - return x1_ + x2_ - - comp_def = MagicMock() - comp_def.algo.algo = "maxsum" - comp_def.algo.mode = "min" - f = FactorAlgo(phi, comp_def=comp_def) - - self.assertEqual(f.name, "phi") - self.assertEqual(len(f.variables), 2) - - def test_cost_for_1var(self): - domain = list(range(10)) - x1 = Variable("x1", domain) - - @AsNAryFunctionRelation(x1) - def cost(x1_): - return x1_ * 2 - - comp_def = MagicMock() - comp_def.algo.algo = "maxsum" - comp_def.algo.mode = "min" - f = FactorAlgo(cost, comp_def=comp_def) - - costs = f._costs_for_var(x1) - - # in the max-sum algorithm, for an unary factor the costs is simply - # the result of the factor function - self.assertEqual(costs[0], 0) - self.assertEqual(costs[5], 10) - - def test_cost_for_1var_2(self): - - # TODO test for min and max - - domain = list(range(10)) - x1 = Variable("x1", domain) - - @AsNAryFunctionRelation(x1) - def cost(x1): - return x1 * 2 - - comp_def = MagicMock() - comp_def.algo.algo = "maxsum" - comp_def.algo.mode = "min" - f = FactorAlgo(cost, comp_def=comp_def) - - costs = f._costs_for_var(x1) - - # in the maxsum algorithm, for an unary factor the costs is simply - # the result of the factor function - self.assertEqual(costs[0], 0) - self.assertEqual(costs[5], 10) - - def test_cost_for_2var(self): - domain = list(range(10)) - x1 = Variable("x1", domain) - domain = list(range(5)) - x2 = Variable("x2", domain) - - @AsNAryFunctionRelation(x1, x2) - def cost(x1_, x2_): - return abs((x1_ - x2_) / 2) - - comp_def = MagicMock() - comp_def.algo.algo = "maxsum" - comp_def.algo.mode = "min" - f = FactorAlgo(cost, comp_def=comp_def) - - costs = f._costs_for_var(x1) - - # in this test, the factor did not receive any costs messages from - # other variables, this means it only uses the factor function when - # calculating costs. - - # x1 = 5, best val for x2 is 4, with cost = 0.5 - self.assertEqual(costs[5], (5 - 4) / 2) - self.assertEqual(costs[9], (9 - 4) / 2) - self.assertEqual(costs[2], 0) - - -class VarDummy: - def __init__(self, name): - self.name = name - self.current_value = None - self.current_cost = None - - -class ApproxMatchTests(unittest.TestCase): - def test_match_exact(self): - c1 = {0: 0, 1: 0, 2: 0} - c2 = {0: 0, 1: 0, 2: 0} - - self.assertTrue(approx_match(c1, c2)) - - def test_nomatch(self): - c1 = {0: 0, 1: 0, 2: 0} - c2 = {0: 0, 1: 1, 2: 0} - - self.assertFalse(approx_match(c1, c2)) - - def test_nomatch2(self): - c1 = { - 0: -46.0, - 1: -46.5, - 2: -55.5, - 3: -56.0, - 4: -56.5, - 5: -65.5, - 6: -66.0, - 7: -66.5, - 8: -67.0, - 9: -67.5, - } - c2 = { - 0: 0.0, - 1: 0.0, - 2: 0.0, - 3: 0.0, - 4: 0.0, - 5: 0.0, - 6: 0.0, - 7: 0.0, - 8: 0.0, - 9: 0.0, - } - - self.assertFalse(approx_match(c1, c2)) - - -class ComputationMemory(unittest.TestCase): - def test_variable_memory_no_neighbor(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 5]) - v1 = Variable("v1", d1) - - vn1 = VariableComputationNode(v1, []) - - # If a variable has no neighbors, it does not need to keep any cost - # and thus requires no memory - self.assertEqual(computation_memory(vn1), 0) +from pydcop.dcop.relations import constraint_from_str - def test_variable_memory_one_neighbor(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 5]) - v1 = Variable("v1", d1) - f1 = relation_from_str("f1", "v1 * 0.5", [v1]) - cv1 = VariableComputationNode(v1, ["f1"]) - cf1 = FactorComputationNode(f1) +def test_comp_creation(): + d = Domain("d", "", ["R", "G"]) + v1 = Variable("v1", d) + v2 = Variable("v2", d) + c1 = constraint_from_str("c1", "10 if v1 == v2 else 0", [v1, v2]) + graph = build_computation_graph(None, constraints=[c1], variables=[v1, v2]) - self.assertEqual(computation_memory(cv1), VARIABLE_UNIT_SIZE * 4) + comp_node = graph.computation("c1") + algo_def = AlgorithmDef.build_with_default_param("maxsum") + comp_def = ComputationDef(comp_node, algo_def) - def test_factor_memory_one_neighbor(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 5]) - v1 = Variable("v1", d1) - f1 = relation_from_str("f1", "v1 * 0.5", [v1]) + comp = MaxSumFactorComputation(comp_def) + assert comp is not None + assert comp.name == "c1" + assert comp.factor == c1 - cv1 = VariableComputationNode(v1, ["f1"]) - cf1 = FactorComputationNode(f1) + comp_node = graph.computation("v1") + algo_def = AlgorithmDef.build_with_default_param("maxsum") + comp_def = ComputationDef(comp_node, algo_def) - self.assertEqual(computation_memory(cf1), FACTOR_UNIT_SIZE * 4) + comp = MaxSumVariableComputation(comp_def) + assert comp is not None + assert comp.name == "v1" + assert comp.variable.name == "v1" + assert comp.factor_names == ["c1"] - def test_factor_memory_two_neighbor(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 4, 5]) - v1 = Variable("v1", d1) - d2 = VariableDomain("d1", "", [1, 2, 3]) - v2 = Variable("v2", d2) - f1 = relation_from_str("f1", "v1 * 0.5 + v2", [v1, v2]) - cv1 = VariableComputationNode(v1, ["f1"]) - cv2 = VariableComputationNode(v2, ["f1"]) - cf1 = FactorComputationNode(f1) +def test_comp_creation_with_factory_method(): + d = Domain("d", "", ["R", "G"]) + v1 = Variable("v1", d) + v2 = Variable("v2", d) + c1 = constraint_from_str("c1", "10 if v1 == v2 else 0", [v1, v2]) + graph = build_computation_graph(None, constraints=[c1], variables=[v1, v2]) - self.assertEqual(computation_memory(cf1), FACTOR_UNIT_SIZE * (5 + 3)) + comp_node = graph.computation("c1") + algo_def = AlgorithmDef.build_with_default_param("maxsum") + comp_def = ComputationDef(comp_node, algo_def) - def test_variable_memory_two_neighbor(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 5]) - v1 = Variable("v1", d1) - cv1 = VariableComputationNode(v1, ["f1", "f2"]) + comp = build_computation(comp_def) + assert comp is not None + assert comp.name == "c1" + assert comp.factor == c1 - self.assertEqual(computation_memory(cv1), VARIABLE_UNIT_SIZE * 4 * 2) + comp_node = graph.computation("v1") + algo_def = AlgorithmDef.build_with_default_param("maxsum") + comp_def = ComputationDef(comp_node, algo_def) + comp = build_computation(comp_def) + assert comp is not None + assert comp.name == "v1" + assert comp.variable.name == "v1" + assert comp.factor_names == ["c1"] -class CommunicationCost(unittest.TestCase): - def test_variable_one_neighbors(self): - d1 = VariableDomain("d1", "", [1, 2, 3, 5]) - v1 = Variable("v1", d1) - f1 = relation_from_str("f1", "v1 * 0.5", [v1]) - cv1 = VariableComputationNode(v1, ["f1"]) - cf1 = FactorComputationNode(f1) +def test_compute_factor_cost_at_start(): + d = Domain("d", "", ["R", "G"]) + v1 = Variable("v1", d) + v2 = Variable("v2", d) + c1 = constraint_from_str("c1", "10 if v1 == v2 else 0", [v1, v2]) - # If a variable has no neighbors, it does not need to keep any cost - # and thus requires no memory - self.assertEqual( - communication_load(cv1, "f1"), HEADER_SIZE + UNIT_SIZE * len(v1.domain) - ) - self.assertEqual( - communication_load(cf1, "v1"), HEADER_SIZE + UNIT_SIZE * len(v1.domain) - ) + obtained = factor_costs_for_var(c1, v1, {}, "min") + assert obtained["R"] == 0 + assert obtained["G"] == 0 + assert len(obtained) == 2 -class TestsMaxsumMessage(unittest.TestCase): - def test_serialize_repr(self): - # Make sure that even after serialization / deserialization, - # from_repr and simple_repr still produce equal messages. - # This has been causing problems with maxsum costs dict where key - # were integers +def test_select_value_no_cost_var(): + d = Domain("d", "", ["R", "G", "B"]) + v1 = Variable("v1", d) - msg = MaxSumMessage({1: 10, 2: 20}) - r = simple_repr(msg) - msg_json = json.dumps(r) + selected, cost = select_value(v1, {}, "min") + assert selected in {"R", "G", "B"} + assert cost == 0 - r2 = json.loads(msg_json) - msg2 = from_repr(r2) + v1 = VariableWithCostFunc("v1", [1, 2, 3], lambda v: (4 - v) / 10) - self.assertEqual(msg, msg2) + selected, cost = select_value(v1, {}, "min") + assert selected == 3 + assert cost == 0.1 diff --git a/tests/unit/test_algorithms_objects.py b/tests/unit/test_algorithms_objects.py index 3be16ad4..1259b342 100644 --- a/tests/unit/test_algorithms_objects.py +++ b/tests/unit/test_algorithms_objects.py @@ -29,58 +29,61 @@ # POSSIBILITY OF SUCH DAMAGE. -from pydcop.algorithms import AlgorithmDef, list_available_algorithms, \ - load_algorithm_module +from pydcop.algorithms import ( + AlgorithmDef, + list_available_algorithms, + load_algorithm_module, +) from pydcop.utils.simple_repr import simple_repr, from_repr def test_algo_def(): - a = AlgorithmDef('maxsum', {'stability': 0.01}, 'min') + a = AlgorithmDef("maxsum", {"stability": 0.01}, "min") - assert a.algo == 'maxsum' - assert a.mode == 'min' - assert 'stability' in a.param_names() - assert a.param_value('stability') == 0.01 + assert a.algo == "maxsum" + assert a.mode == "min" + assert "stability" in a.param_names() + assert a.param_value("stability") == 0.01 def test_simple_repr(): - a = AlgorithmDef('maxsum', {'stability': 0.01}, 'min') + a = AlgorithmDef("maxsum", {"stability": 0.01}, "min") r = simple_repr(a) - assert r['algo'] == 'maxsum' - assert r['mode'] == 'min' - assert r['params']['stability'] == 0.01 + assert r["algo"] == "maxsum" + assert r["mode"] == "min" + assert r["params"]["stability"] == 0.01 def test_from_repr(): - a = AlgorithmDef('maxsum', {'stability': 0.01}, 'min') + a = AlgorithmDef("maxsum", {"stability": 0.01}, "min") r = simple_repr(a) a2 = from_repr(r) assert a == a2 - assert a2.param_value('stability') == 0.01 + assert a2.param_value("stability") == 0.01 def test_building_algodef_with_default_params(): - a = AlgorithmDef.build_with_default_param('maxsum') + a = AlgorithmDef.build_with_default_param("amaxsum") - assert a.params['damping'] == 0 + assert a.params["damping"] == 0 def test_building_algodef_with_provided_and_default_params(): - a = AlgorithmDef.build_with_default_param('dsa', {'variant': 'B'}, mode='max') + a = AlgorithmDef.build_with_default_param("dsa", {"variant": "B"}, mode="max") - assert a.params['variant'] == 'B' # provided param - assert a.params['probability'] == 0.7 # default param - assert a.algo == 'dsa' - assert a.mode == 'max' + assert a.params["variant"] == "B" # provided param + assert a.params["probability"] == 0.7 # default param + assert a.algo == "dsa" + assert a.mode == "max" def test_load_algorithm(): @@ -90,15 +93,15 @@ def test_load_algorithm(): algo = load_algorithm_module(a) assert algo.algorithm_name == a - assert hasattr(algo, 'communication_load') - assert hasattr(algo, 'computation_memory') + assert hasattr(algo, "communication_load") + assert hasattr(algo, "computation_memory") def test_load_algorithm_with_default_footprint(): # dsatuto has no load method defined : check that we get instead default # implementations - algo = load_algorithm_module('dsatuto') - assert algo.algorithm_name == 'dsatuto' + algo = load_algorithm_module("dsatuto") + assert algo.algorithm_name == "dsatuto" assert algo.communication_load(None, None) == 1 assert algo.computation_memory(None) == 1 diff --git a/tests/unit/test_distribution_ilp_fgdp.py b/tests/unit/test_distribution_ilp_fgdp.py index 487ebc82..ed4ab5c9 100644 --- a/tests/unit/test_distribution_ilp_fgdp.py +++ b/tests/unit/test_distribution_ilp_fgdp.py @@ -32,8 +32,8 @@ import unittest from collections import namedtuple -from pydcop.algorithms import maxsum as ms -from pydcop.algorithms.maxsum import communication_load, computation_memory, \ +from pydcop.algorithms import amaxsum as ms +from pydcop.algorithms.amaxsum import communication_load, computation_memory, \ VARIABLE_UNIT_SIZE from pydcop.computations_graph.factor_graph import ComputationsFactorGraph, \ VariableComputationNode, FactorComputationNode, FactorGraphLink diff --git a/tests/unit/test_infra_computations.py b/tests/unit/test_infra_computations.py index c95f3088..b6a7fd51 100644 --- a/tests/unit/test_infra_computations.py +++ b/tests/unit/test_infra_computations.py @@ -35,24 +35,27 @@ import pytest from pydcop.algorithms import AlgorithmDef, ComputationDef, load_algorithm_module -from pydcop.computations_graph.constraints_hypergraph import \ - VariableComputationNode +from pydcop.computations_graph.constraints_hypergraph import VariableComputationNode from pydcop.dcop.objects import Variable from pydcop.infrastructure.agents import Agent -from pydcop.infrastructure.computations import Message, message_type, \ - MessagePassingComputation, register +from pydcop.infrastructure.computations import ( + Message, + message_type, + MessagePassingComputation, + register, +) from pydcop.utils.simple_repr import simple_repr from pydcop.utils.simple_repr import from_repr def test_message(): - msg = Message('msg_type', 'foo') - assert msg.type == 'msg_type' - assert msg.content == 'foo' + msg = Message("msg_type", "foo") + assert msg.type == "msg_type" + assert msg.content == "foo" def test_message_serialization(): - msg = Message('msg_type') + msg = Message("msg_type") r = simple_repr(msg) obtained = from_repr(r) assert msg == obtained @@ -60,9 +63,9 @@ def test_message_serialization(): def test_message_factory(): - MyMessage = message_type('my_msg', ['foo', 'bar']) + MyMessage = message_type("my_msg", ["foo", "bar"]) msg = MyMessage(42, 21) - assert msg.type == 'my_msg' + assert msg.type == "my_msg" assert msg.foo == 42 assert msg.bar == 21 @@ -72,9 +75,9 @@ def test_message_factory(): def test_message_factory_kwargs(): - MyMessage = message_type('my_msg', ['foo', 'bar']) + MyMessage = message_type("my_msg", ["foo", "bar"]) msg = MyMessage(bar=42, foo=21) - assert msg.type == 'my_msg' + assert msg.type == "my_msg" assert msg.foo == 21 assert msg.bar == 42 @@ -84,7 +87,7 @@ def test_message_factory_kwargs(): def test_message_factory_serialization(): - MyMessage = message_type('my_msg', ['foo', 'bar']) + MyMessage = message_type("my_msg", ["foo", "bar"]) msg = MyMessage(42, 21) r = simple_repr(msg) print(r) @@ -94,19 +97,19 @@ def test_message_factory_serialization(): def test_setting_message_sender_on_computation(): - c = MessagePassingComputation('c') + c = MessagePassingComputation("c") c.message_sender = MagicMock() - msg = Message('type') - c.post_msg('target', msg) + msg = Message("type") + c.post_msg("target", msg) - c.message_sender.assert_called_with('c', 'target', msg, None, None) + c.message_sender.assert_called_with("c", "target", msg, None, None) def test_setting_message_sender_only_works_once(): - c = MessagePassingComputation('c') + c = MessagePassingComputation("c") c.message_sender = MagicMock() with pytest.raises(AttributeError): @@ -115,11 +118,11 @@ def test_setting_message_sender_only_works_once(): def test_periodic_action_on_computation(): - a = Agent('a', MagicMock()) + a = Agent("a", MagicMock()) class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() def on_start(self): @@ -140,11 +143,11 @@ def action(self): def test_remove_periodic_action_on_computation(): - a = Agent('a', MagicMock()) + a = Agent("a", MagicMock()) class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() def on_start(self): @@ -177,11 +180,11 @@ def test_remove(self): def test_oneshot_delayed_action_on_computation(): # To implement a one-shot action, add a periodic action and remove it # the first time it is called: - a = Agent('a', MagicMock()) + a = Agent("a", MagicMock()) class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() def on_start(self): @@ -210,11 +213,11 @@ def action(self): def test_several_periodic_action_on_computation(): - a = Agent('a', MagicMock()) + a = Agent("a", MagicMock()) class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock1 = MagicMock() self.mock2 = MagicMock() @@ -236,17 +239,17 @@ def action2(self): sleep(0.25) a.stop() - assert 1 <=c.mock1.call_count <= 2 + assert 1 <= c.mock1.call_count <= 2 assert c.mock2.call_count == 1 def test_periodic_action_not_called_when_paused(): - a = Agent('a', MagicMock()) + a = Agent("a", MagicMock()) class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() def on_start(self): @@ -261,10 +264,10 @@ def action(self): a.start() a.run() sleep(0.25) - assert 1 <=c.mock.call_count <= 2 + assert 1 <= c.mock.call_count <= 2 c.mock.reset_mock() - a.pause_computations('test') + a.pause_computations("test") sleep(0.25) assert c.mock.call_count == 0 @@ -272,10 +275,9 @@ def action(self): def test_register_handler_decorator(): - class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() @register("test_type") @@ -288,10 +290,9 @@ def on_msg(self, sender: str, msg: Message, t: float): def test_handler_decorator(): - class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() @register("test_type") @@ -303,7 +304,7 @@ def on_msg(self, sender: str, msg: Message, t: float): c.start() msg = Message("test_type") - c.on_message('foo', msg, 0) + c.on_message("foo", msg, 0) c.mock.assert_called_once_with("foo", msg, 0) @@ -311,7 +312,7 @@ def on_msg(self, sender: str, msg: Message, t: float): def test_handler_decorator_not_called_before_start(): class TestComputation(MessagePassingComputation): def __init__(self): - super().__init__('test') + super().__init__("test") self.mock = MagicMock() @register("test_type") @@ -321,7 +322,7 @@ def on_msg(self, sender: str, msg: Message, t: float): c = TestComputation() msg = Message("test_type") - c.on_message('foo', msg, 0) + c.on_message("foo", msg, 0) # Computation is NOT started, handled must NOT be called c.mock.assert_not_called() @@ -329,14 +330,17 @@ def on_msg(self, sender: str, msg: Message, t: float): def test_memory_footprint(): # use maxsum as is has a computation_memory function defined - maxsum_module = load_algorithm_module('maxsum') - from pydcop.computations_graph.factor_graph import \ - VariableComputationNode as FGVariableComputationNode - - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(FGVariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('maxsum')) - comp = maxsum_module.VariableAlgo(v1, [], comp_def=comp_def) + maxsum_module = load_algorithm_module("amaxsum") + from pydcop.computations_graph.factor_graph import ( + VariableComputationNode as FGVariableComputationNode, + ) + + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + FGVariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("amaxsum"), + ) + comp = maxsum_module.MaxSumVariableComputation(comp_def=comp_def) # The variable has no neighbors : footprint is 0 assert comp.footprint() == 0 @@ -344,14 +348,17 @@ def test_memory_footprint(): def test_memory_footprint_from_import_module(): # use maxsum as is has a computation_memory function defined - maxsum_module = import_module('pydcop.algorithms.maxsum') - from pydcop.computations_graph.factor_graph import \ - VariableComputationNode as FGVariableComputationNode - - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(FGVariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('maxsum')) - comp = maxsum_module.VariableAlgo(v1, [], comp_def=comp_def) + maxsum_module = import_module("pydcop.algorithms.amaxsum") + from pydcop.computations_graph.factor_graph import ( + VariableComputationNode as FGVariableComputationNode, + ) + + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + FGVariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("amaxsum"), + ) + comp = maxsum_module.MaxSumVariableComputation(comp_def=comp_def) # The variable has no neighbors : footprint is 0 assert comp.footprint() == 0 @@ -359,14 +366,17 @@ def test_memory_footprint_from_import_module(): def test_memory_footprint_from_classic_import(): # use maxsum as is has a computation_memory function defined - import pydcop.algorithms.maxsum as maxsum_module - from pydcop.computations_graph.factor_graph import \ - VariableComputationNode as FGVariableComputationNode - - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(FGVariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('maxsum')) - comp = maxsum_module.VariableAlgo(v1, [], comp_def=comp_def) + import pydcop.algorithms.amaxsum as maxsum_module + from pydcop.computations_graph.factor_graph import ( + VariableComputationNode as FGVariableComputationNode, + ) + + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + FGVariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("amaxsum"), + ) + comp = maxsum_module.MaxSumVariableComputation(comp_def=comp_def) # The variable has no neighbors : footprint is 0 assert comp.footprint() == 0 @@ -374,11 +384,13 @@ def test_memory_footprint_from_classic_import(): def test_fallback_memory_footprint(): # use dsatuto as is has no computation_memory function defined - dsa_module = load_algorithm_module('dsatuto') + dsa_module = load_algorithm_module("dsatuto") - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(VariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('dsatuto')) + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + VariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("dsatuto"), + ) comp = dsa_module.DsaTutoComputation(comp_def) assert comp.footprint() == 1 @@ -386,11 +398,13 @@ def test_fallback_memory_footprint(): def test_fallback_memory_footprint_from_import_module(): # use dsatuto as is has no computation_memory function defined - dsa_module = import_module('pydcop.algorithms.dsatuto') + dsa_module = import_module("pydcop.algorithms.dsatuto") - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(VariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('dsatuto')) + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + VariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("dsatuto"), + ) comp = dsa_module.DsaTutoComputation(comp_def) assert comp.footprint() == 1 @@ -400,9 +414,11 @@ def test_fallback_memory_footprint_from_classic_import(): # use dsatuto as is has no computation_memory function defined import pydcop.algorithms.dsatuto as dsa_module - v1 = Variable('v1', [1,2]) - comp_def = ComputationDef(VariableComputationNode(v1, []), - AlgorithmDef.build_with_default_param('dsatuto')) + v1 = Variable("v1", [1, 2]) + comp_def = ComputationDef( + VariableComputationNode(v1, []), + AlgorithmDef.build_with_default_param("dsatuto"), + ) comp = dsa_module.DsaTutoComputation(comp_def) assert comp.footprint() == 1