Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deffuant model #33

Merged
merged 17 commits into from
Mar 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ dependencies:
- cmake
- ninja
- cpp-argparse
- clang-format
- clang-format=18.1.1
- tomlplusplus
- catch2
22 changes: 22 additions & 0 deletions examples/Deffuant/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[simulation]
model = "Deffuant"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
# n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then does not prints
output_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 1000 # If not set, max iterations is infinite

[Deffuant]
homophily_threshold = 0.2 # d in the paper; agents interact if difference in opinion is less than this value
mu = 0.5 # convergence parameter; similar to social interaction strength K (0,0.5]
use_network = false # If true, will use a square lattice Will throw if sqrt(n_agents) is not an integer

[network]
number_of_agents = 1000
connections_per_agent = 0
58 changes: 58 additions & 0 deletions include/agents/discrete_vector_agent.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#pragma once

#include "agent.hpp"
#include "agent_io.hpp"
#include "util/misc.hpp"
#include <cstddef>
#include <string>
#include <vector>

namespace Seldon
{

struct DiscreteVectorAgentData
{
std::vector<int> opinion{};
};

using DiscreteVectorAgent = Agent<DiscreteVectorAgentData>;

template<>
inline std::string agent_to_string<DiscreteVectorAgent>( const DiscreteVectorAgent & agent )
{
if( agent.data.opinion.empty() )
return "";

auto res = fmt::format( "{}", agent.data.opinion[0] );
for( size_t i = 1; i < agent.data.opinion.size(); i++ )
{
res += fmt::format( ", {}", agent.data.opinion[i] );
}
return res;
}

template<>
inline std::string opinion_to_string<DiscreteVectorAgent>( const DiscreteVectorAgent & agent )
{
return agent_to_string( agent );
}

template<>
inline DiscreteVectorAgent agent_from_string<DiscreteVectorAgent>( const std::string & str )
{
DiscreteVectorAgent res{};

auto callback = [&]( int idx_list [[maybe_unused]], const auto & substring )
{ res.data.opinion.push_back( std::stoi( substring ) ); };

parse_comma_separated_list( str, callback );
return res;
};

// template<>
// inline std::vector<std::string> agent_to_string_column_names<ActivityAgent>()
// {
// return { "opinion", "activity", "reluctance" };
// }

} // namespace Seldon
14 changes: 12 additions & 2 deletions include/config_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ namespace Seldon::Config
enum class Model
{
DeGroot,
ActivityDrivenModel
ActivityDrivenModel,
DeffuantModel
};

struct OutputSettings
Expand All @@ -43,6 +44,15 @@ struct DeGrootSettings
double convergence_tol;
};

struct DeffuantSettings
{
std::optional<int> max_iterations = std::nullopt;
double homophily_threshold
= 0.2; // d in the paper; agents interact if difference in opinion is less than this value
double mu = 0.5; // convergence parameter; similar to social interaction strength K (0,0.5]
bool use_network = false;
};

struct ActivityDrivenSettings
{
std::optional<int> max_iterations = std::nullopt;
Expand Down Expand Up @@ -81,7 +91,7 @@ struct SimulationOptions
std::string model_string;
int rng_seed = std::random_device()();
OutputSettings output_settings;
std::variant<DeGrootSettings, ActivityDrivenSettings> model_settings;
std::variant<DeGrootSettings, ActivityDrivenSettings, DeffuantSettings> model_settings;
InitialNetworkSettings network_settings;
};

Expand Down
86 changes: 42 additions & 44 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,48 @@ class ActivityDrivenModel : public Model<ActivityAgent>
using AgentT = ActivityAgent;
using NetworkT = Network<AgentT>;

ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;

// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
// Various free parameters
int m{}; // Number of agents contacted, when the agent is active
double eps{}; // Minimum activity epsilon; a_i belongs to [epsilon,1]
double gamma{}; // Exponent of activity power law distribution of activities
double alpha{}; // Controversialness of the issue, must be greater than 0.
double homophily{}; // aka beta. if zero, agents pick their interaction partners at random
// Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling
// j also sends feedback to i. So every agent can have more than m incoming connections
double reciprocity{};
double K{}; // Social interaction strength; K>0

bool mean_activities = false;
bool mean_weights = false;

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean{};
double reluctance_sigma{};
double reluctance_eps{};
double covariance_factor{};

// bot @TODO: less hacky
size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

private:
NetworkT & network;
std::vector<std::vector<NetworkT::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
Expand Down Expand Up @@ -62,50 +104,6 @@ class ActivityDrivenModel : public Model<ActivityAgent>
void update_network_probabilistic();
void update_network_mean();
void update_network();

public:
// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
// Various free parameters
int m = 10; // Number of agents contacted, when the agent is active
double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1]
double gamma = 2.1; // Exponent of activity power law distribution of activities
double alpha = 3.0; // Controversialness of the issue, must be greater than 0.
double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random
// Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling
// j also sends feedback to i. So every agent can have more than m incoming connections
double reciprocity = 0.5;
double K = 3.0; // Social interaction strength; K>0

bool mean_activities = false;
bool mean_weights = false;

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;

// bot @TODO: less hacky

size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;
};

} // namespace Seldon
113 changes: 113 additions & 0 deletions include/models/DeffuantModel.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#pragma once

#include "agents/discrete_vector_agent.hpp"
#include "agents/simple_agent.hpp"
#include "model.hpp"
#include "network.hpp"
#include "util/math.hpp"
#include <cstddef>
#include <random>

#include "network_generation.hpp"
#include <set>
#include <string>
#include <utility>

#include <vector>

namespace Seldon
{

template<typename AgentT_>
class DeffuantModelAbstract : public Model<AgentT_>
{
public:
using AgentT = AgentT_;
using NetworkT = Network<AgentT>;

double homophily_threshold = 0.2; // d in paper
double mu = 0.5; // convergence parameter
bool use_network = false; // for the basic Deffuant model

DeffuantModelAbstract( NetworkT & network, std::mt19937 & gen, bool use_network )
: Model<AgentT>(), use_network( use_network ), network( network ), gen( gen )
{
// Generate the network as a square lattice if use_network is true
if( use_network )
{
size_t n_edge = std::sqrt( network.n_agents() );
if( n_edge * n_edge != network.n_agents() )
{
throw std::runtime_error( "Number of agents is not a square number." );
}
network = NetworkGeneration::generate_square_lattice<AgentT>( n_edge );
}
initialize_agents();
}

std::vector<std::size_t> select_interacting_agent_pair()
{
auto interacting_agents = std::vector<std::size_t>();
// If the basic model is being used, then search from all possible agents
if( !use_network )
{

// Pick any two agents to interact, randomly, without repetition
draw_unique_k_from_n( std::nullopt, 2, network.n_agents(), interacting_agents, gen );

return interacting_agents;
}
else
{
// First select an agent randomly
auto dist = std::uniform_int_distribution<size_t>( 0, network.n_agents() - 1 );
auto agent1_idx = dist( gen );
interacting_agents.push_back( agent1_idx );

// Choose a neighbour randomly from the neighbour list of agent1_idx
auto neighbours = network.get_neighbours( agent1_idx );
auto n_neighbours = neighbours.size();
auto dist_n = std::uniform_int_distribution<size_t>( 0, n_neighbours - 1 );
auto index_in_neigh = dist_n( gen ); // Index inside neighbours list
auto agent2_idx = neighbours[index_in_neigh];
interacting_agents.push_back( agent2_idx );

return interacting_agents;
}
}

void iteration() override
{
Model<AgentT>::iteration(); // Update n_iterations

// Although the model defines each iteration as choosing *one*
// pair of agents, we will define each iteration as sampling
// n_agents pairs (similar to the time unit in evolution plots in the paper)
for( size_t i = 0; i < network.n_agents(); i++ )
{

auto interacting_agents = select_interacting_agent_pair();

auto & agent1 = network.agents[interacting_agents[0]];
auto & agent2 = network.agents[interacting_agents[1]];

update_rule( agent1, agent2 );
}
}

// template<typename T>
void update_rule( AgentT & agent1, AgentT & agent2 );
void initialize_agents();

// void iteration() override;
// bool finished() override;

private:
NetworkT & network;
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
};

using DeffuantModel = DeffuantModelAbstract<SimpleAgent>;
using DeffuantModelVector = DeffuantModelAbstract<DiscreteVectorAgent>;

} // namespace Seldon
Loading