Skip to content

Commit

Permalink
Merge pull request #36 from seldon-code/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
amritagos authored Mar 22, 2024
2 parents 229937f + f7386a3 commit 2481785
Show file tree
Hide file tree
Showing 29 changed files with 1,192 additions and 168 deletions.
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@ dependencies:
- cmake
- ninja
- cpp-argparse
- clang-format
- clang-format=18.1.1
- tomlplusplus
- catch2
22 changes: 22 additions & 0 deletions examples/Deffuant/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[simulation]
model = "Deffuant"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
# n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then does not prints
output_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 1000 # If not set, max iterations is infinite

[Deffuant]
homophily_threshold = 0.2 # d in the paper; agents interact if difference in opinion is less than this value
mu = 0.5 # convergence parameter; similar to social interaction strength K (0,0.5]
use_network = false # If true, will use a square lattice Will throw if sqrt(n_agents) is not an integer

[network]
number_of_agents = 1000
connections_per_agent = 0
24 changes: 24 additions & 0 deletions examples/DeffuantVector/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
[simulation]
model = "Deffuant"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
# n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then does not prints
output_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 1000 # If not set, max iterations is infinite

[Deffuant]
homophily_threshold = 0.2 # d in the paper; agents interact if difference in opinion is less than this value
mu = 0.5 # convergence parameter; similar to social interaction strength K (0,0.5]
use_network = false # If true, will use a square lattice Will throw if sqrt(n_agents) is not an integer
binary_vector = true # If true, this will be the multi-dimensional binary vector Deffuant model
dim = 5 # For the multi-dimensional binary vector Deffuant model, define the number of dimensions in each opinion vector

[network]
number_of_agents = 1000
connections_per_agent = 0
58 changes: 58 additions & 0 deletions include/agents/discrete_vector_agent.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#pragma once

#include "agent.hpp"
#include "agent_io.hpp"
#include "util/misc.hpp"
#include <cstddef>
#include <string>
#include <vector>

namespace Seldon
{

struct DiscreteVectorAgentData
{
std::vector<int> opinion{};
};

using DiscreteVectorAgent = Agent<DiscreteVectorAgentData>;

template<>
inline std::string agent_to_string<DiscreteVectorAgent>( const DiscreteVectorAgent & agent )
{
if( agent.data.opinion.empty() )
return "";

auto res = fmt::format( "{}", agent.data.opinion[0] );
for( size_t i = 1; i < agent.data.opinion.size(); i++ )
{
res += fmt::format( ", {}", agent.data.opinion[i] );
}
return res;
}

template<>
inline std::string opinion_to_string<DiscreteVectorAgent>( const DiscreteVectorAgent & agent )
{
return agent_to_string( agent );
}

template<>
inline DiscreteVectorAgent agent_from_string<DiscreteVectorAgent>( const std::string & str )
{
DiscreteVectorAgent res{};

auto callback = [&]( int idx_list [[maybe_unused]], const auto & substring )
{ res.data.opinion.push_back( std::stoi( substring ) ); };

parse_comma_separated_list( str, callback );
return res;
};

// template<>
// inline std::vector<std::string> agent_to_string_column_names<ActivityAgent>()
// {
// return { "opinion", "activity", "reluctance" };
// }

} // namespace Seldon
18 changes: 16 additions & 2 deletions include/config_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ namespace Seldon::Config
enum class Model
{
DeGroot,
ActivityDrivenModel
ActivityDrivenModel,
DeffuantModel
};

struct OutputSettings
Expand All @@ -43,6 +44,18 @@ struct DeGrootSettings
double convergence_tol;
};

struct DeffuantSettings
{
std::optional<int> max_iterations = std::nullopt;
double homophily_threshold
= 0.2; // d in the paper; agents interact if difference in opinion is less than this value
double mu = 0.5; // convergence parameter; similar to social interaction strength K (0,0.5]
bool use_network = false; // For using a square lattice network
bool use_binary_vector = false; // For the multi-dimensional DeffuantModelVector; by default set to false
size_t dim
= 1; // The size of the opinions vector. This is used for the multi-dimensional DeffuantModelVector model.
};

struct ActivityDrivenSettings
{
std::optional<int> max_iterations = std::nullopt;
Expand Down Expand Up @@ -77,11 +90,12 @@ struct InitialNetworkSettings

struct SimulationOptions
{
using ModelVariantT = std::variant<DeGrootSettings, ActivityDrivenSettings, DeffuantSettings>;
Model model;
std::string model_string;
int rng_seed = std::random_device()();
OutputSettings output_settings;
std::variant<DeGrootSettings, ActivityDrivenSettings> model_settings;
ModelVariantT model_settings;
InitialNetworkSettings network_settings;
};

Expand Down
4 changes: 3 additions & 1 deletion include/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ class Model
public:
using AgentT = AgentT_;

std::optional<size_t> max_iterations = std::nullopt;
Model() = default;
Model( std::optional<size_t> max_iterations ) : max_iterations( max_iterations ){};

virtual void initialize_iterations()
{
Expand Down Expand Up @@ -45,6 +46,7 @@ class Model
virtual ~Model() = default;

private:
std::optional<size_t> max_iterations = std::nullopt;
size_t _n_iterations{};
};

Expand Down
99 changes: 99 additions & 0 deletions include/model_factory.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
#include "config_parser.hpp"
#include "model.hpp"
#include "models/ActivityDrivenModel.hpp"
#include "models/DeGroot.hpp"
#include "models/DeffuantModel.hpp"
#include "network.hpp"
#include <memory>
#include <random>
#include <stdexcept>
#include <type_traits>

namespace Seldon::ModelFactory
{

using ModelVariantT = Config::SimulationOptions::ModelVariantT;

template<typename AgentT, typename ModelT, typename FuncT>
auto check_agent_type( FuncT func )
{
if constexpr( std::is_same_v<AgentT, typename ModelT::AgentT> )
{
return func();
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

template<typename AgentT>
inline auto create_model_degroot( Network<AgentT> & network, const ModelVariantT & model_settings )
{
if constexpr( std::is_same_v<AgentT, DeGrootModel::AgentT> )
{
auto degroot_settings = std::get<Config::DeGrootSettings>( model_settings );
auto model = std::make_unique<DeGrootModel>( degroot_settings, network );
return model;
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

template<typename AgentT>
inline auto
create_model_activity_driven( Network<AgentT> & network, const ModelVariantT & model_settings, std::mt19937 & gen )
{
if constexpr( std::is_same_v<AgentT, ActivityDrivenModel::AgentT> )
{
auto activitydriven_settings = std::get<Config::ActivityDrivenSettings>( model_settings );
auto model = std::make_unique<ActivityDrivenModel>( activitydriven_settings, network, gen );
return model;
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

template<typename AgentT>
inline auto create_model_deffuant( Network<AgentT> & network, const ModelVariantT & model_settings, std::mt19937 & gen )
{
if constexpr( std::is_same_v<AgentT, DeffuantModel::AgentT> )
{
auto deffuant_settings = std::get<Config::DeffuantSettings>( model_settings );
auto model = std::make_unique<DeffuantModel>( deffuant_settings, network, gen );
model->initialize_agents( deffuant_settings.dim );
return model;
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

template<typename AgentT>
inline auto
create_model_deffuant_vector( Network<AgentT> & network, const ModelVariantT & model_settings, std::mt19937 & gen )
{
if constexpr( std::is_same_v<AgentT, DeffuantModelVector::AgentT> )
{
auto deffuant_settings = std::get<Config::DeffuantSettings>( model_settings );
auto model = std::make_unique<DeffuantModelVector>( deffuant_settings, network, gen );
model->initialize_agents( deffuant_settings.dim );
return model;
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

} // namespace Seldon::ModelFactory
85 changes: 41 additions & 44 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#pragma once

#include "agents/activity_agent.hpp"
#include "config_parser.hpp"

#include "model.hpp"
#include "network.hpp"
#include <cstddef>
Expand All @@ -19,19 +21,58 @@ class ActivityDrivenModel : public Model<ActivityAgent>
using AgentT = ActivityAgent;
using NetworkT = Network<AgentT>;

ActivityDrivenModel( const Config::ActivityDrivenSettings & settings, NetworkT & network, std::mt19937 & gen );

void iteration() override;

private:
NetworkT & network;
std::vector<std::vector<NetworkT::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};

// Model-specific parameters
double dt{}; // Timestep for the integration of the coupled ODEs
// Various free parameters
int m{}; // Number of agents contacted, when the agent is active
double eps{}; // Minimum activity epsilon; a_i belongs to [epsilon,1]
double gamma{}; // Exponent of activity power law distribution of activities
double alpha{}; // Controversialness of the issue, must be greater than 0.
double homophily{}; // aka beta. if zero, agents pick their interaction partners at random
// Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling
// j also sends feedback to i. So every agent can have more than m incoming connections
double reciprocity{};
double K{}; // Social interaction strength; K>0

bool mean_activities = false;
bool mean_weights = false;

bool use_reluctances = false;
double reluctance_mean{};
double reluctance_sigma{};
double reluctance_eps{};
double covariance_factor{};

size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );

// Buffers for RK4 integration
std::vector<double> k1_buffer{};
std::vector<double> k2_buffer{};
std::vector<double> k3_buffer{};
std::vector<double> k4_buffer{};

void get_agents_from_power_law();

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

template<typename Opinion_Callback>
void get_euler_slopes( std::vector<double> & k_buffer, Opinion_Callback opinion )
{
Expand Down Expand Up @@ -62,50 +103,6 @@ class ActivityDrivenModel : public Model<ActivityAgent>
void update_network_probabilistic();
void update_network_mean();
void update_network();

public:
// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
// Various free parameters
int m = 10; // Number of agents contacted, when the agent is active
double eps = 0.01; // Minimum activity epsilon; a_i belongs to [epsilon,1]
double gamma = 2.1; // Exponent of activity power law distribution of activities
double alpha = 3.0; // Controversialness of the issue, must be greater than 0.
double homophily = 0.5; // aka beta. if zero, agents pick their interaction partners at random
// Reciprocity aka r. probability that when agent i contacts j via weighted reservoir sampling
// j also sends feedback to i. So every agent can have more than m incoming connections
double reciprocity = 0.5;
double K = 3.0; // Social interaction strength; K>0

bool mean_activities = false;
bool mean_weights = false;

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;

// bot @TODO: less hacky

size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;
};

} // namespace Seldon
Loading

0 comments on commit 2481785

Please sign in to comment.