Skip to content

Commit

Permalink
Merge pull request #26 from seldon-code/reluctant_activity_driven_model
Browse files Browse the repository at this point in the history
Reluctant activity driven model
  • Loading branch information
MSallermann authored Mar 18, 2024
2 parents f6dec98 + cfd05ad commit 78a5d1b
Show file tree
Hide file tree
Showing 14 changed files with 172 additions and 46 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ jobs:
- name: Test with meson
shell: micromamba-shell {0}
run: |
meson test -C build
meson test -C build --verbose
32 changes: 32 additions & 0 deletions examples/ActivityDrivenReluctance/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
[simulation]
model = "ActivityDriven"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 500 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.65 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 1.0 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 2.0
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1
reluctance_mean = 1.0 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0)
reluctance_sigma = 0.25 # Width of normal distribution (before truncating)
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value

[network]
number_of_agents = 1000
connections_per_agent = 10
5 changes: 5 additions & 0 deletions include/config_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,11 @@ struct ActivityDrivenSettings
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );
bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;
};

struct InitialNetworkSettings
Expand Down
46 changes: 33 additions & 13 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <cstddef>
#include <random>
#include <set>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>

Expand All @@ -15,25 +15,41 @@ namespace Seldon

struct ActivityAgentData
{
double opinion = 0; // x_i
double activity = 0; // a_i
double opinion = 0; // x_i
double activity = 0; // a_i
double reluctance = 1.0; // m_i
};

template<>
inline std::string Agent<ActivityAgentData>::to_string() const
{
return fmt::format( "{}, {}", data.opinion, data.activity );
return fmt::format( "{}, {}, {}", data.opinion, data.activity, data.reluctance );
};

template<>
inline void Agent<ActivityAgentData>::from_string( const std::string & str )
{
auto pos_comma = str.find_first_of( ',' );
data.opinion = std::stod( str.substr( 0, pos_comma ) );
data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) );
auto pos_comma = str.find_first_of( ',' );
auto pos_next_comma = str.find( ',', pos_comma + 1 );

data.opinion = std::stod( str.substr( 0, pos_comma ) );

if( pos_next_comma == std::string::npos )
{
data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) );
}
else
{
data.activity = std::stod( str.substr( pos_comma + 1, pos_next_comma ) );
}

if( pos_next_comma != std::string::npos )
{
data.reluctance = std::stod( str.substr( pos_next_comma + 1, str.size() ) );
}
};

class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
class ActivityDrivenModel : public Model<Agent<ActivityAgentData>>
{
public:
using AgentT = Agent<ActivityAgentData>;
Expand Down Expand Up @@ -69,7 +85,8 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
j_index = neighbour_buffer[j];
k_buffer[idx_agent] += K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) );
k_buffer[idx_agent] += 1.0 / network.agents[idx_agent].data.reluctance * K * weight_buffer[j]
* std::tanh( alpha * opinion( j_index ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
Expand Down Expand Up @@ -101,6 +118,12 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;

// bot @TODO: less hacky

size_t n_bots = 0; // The first n_bots agents are bots
Expand All @@ -114,14 +137,11 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
return n_bots > 0;
}

ActivityAgentModel( NetworkT & network, std::mt19937 & gen );
ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;

// bool finished() overteration() override;
// bool finished() override;
};

} // namespace Seldon
11 changes: 8 additions & 3 deletions include/simulation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,13 @@ class Simulation : public SimulationInterface
network.agents = AgentGeneration::generate_from_file<DeGrootModel::AgentT>( cli_agent_file.value() );
}
}
else if constexpr( std::is_same_v<AgentType, ActivityAgentModel::AgentT> )
else if constexpr( std::is_same_v<AgentType, ActivityDrivenModel::AgentT> )
{
auto activitydriven_settings = std::get<Config::ActivityDrivenSettings>( options.model_settings );

model = [&]()
{
auto model = std::make_unique<ActivityAgentModel>( network, gen );
auto model = std::make_unique<ActivityDrivenModel>( network, gen );
model->dt = activitydriven_settings.dt;
model->m = activitydriven_settings.m;
model->eps = activitydriven_settings.eps;
Expand All @@ -116,6 +116,11 @@ class Simulation : public SimulationInterface
model->mean_activities = activitydriven_settings.mean_activities;
model->mean_weights = activitydriven_settings.mean_weights;
model->max_iterations = activitydriven_settings.max_iterations;
// Reluctance
model->use_reluctances = activitydriven_settings.use_reluctances;
model->reluctance_mean = activitydriven_settings.reluctance_mean;
model->reluctance_sigma = activitydriven_settings.reluctance_sigma;
model->reluctance_eps = activitydriven_settings.reluctance_eps;
// bot
model->n_bots = activitydriven_settings.n_bots;
model->bot_opinion = activitydriven_settings.bot_opinion;
Expand All @@ -129,7 +134,7 @@ class Simulation : public SimulationInterface
if( cli_agent_file.has_value() )
{
network.agents
= AgentGeneration::generate_from_file<ActivityAgentModel::AgentT>( cli_agent_file.value() );
= AgentGeneration::generate_from_file<ActivityDrivenModel::AgentT>( cli_agent_file.value() );
}
}
}
Expand Down
34 changes: 34 additions & 0 deletions include/util/math.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -138,4 +138,38 @@ class power_law_distribution
}
};

/**
* @brief Truncated normal distribution
* A continuous random distribution on the range [eps, infty)
* with p(x) ~ e^(-(x-mean)^2/(2 sigma^2))
*/
template<typename ScalarT = double>
class truncated_normal_distribution
{
private:
ScalarT mean{};
ScalarT sigma{};
ScalarT eps{};
std::normal_distribution<ScalarT> normal_dist{};
size_t max_tries = 5000;

public:
truncated_normal_distribution( ScalarT mean, ScalarT sigma, ScalarT eps )
: mean( mean ), sigma( sigma ), eps( eps ), normal_dist( std::normal_distribution<ScalarT>( mean, sigma ) )
{
}

template<typename Generator>
ScalarT operator()( Generator & gen )
{
for( size_t i = 0; i < max_tries; i++ )
{
auto sample = normal_dist( gen );
if( sample > eps )
return sample;
}
return eps;
}
};

} // namespace Seldon
14 changes: 12 additions & 2 deletions src/config_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,11 @@ SimulationOptions parse_config_file( std::string_view config_file_path )
model_settings.K = tbl["ActivityDriven"]["K"].value_or<double>( 3.0 );
model_settings.mean_activities = tbl["ActivityDriven"]["mean_activities"].value_or<bool>( false );
model_settings.mean_weights = tbl["ActivityDriven"]["mean_weights"].value_or<bool>( false );
// Reluctances
model_settings.use_reluctances = tbl["ActivityDriven"]["reluctances"].value_or<bool>( false );
model_settings.reluctance_mean = tbl["ActivityDriven"]["reluctance_mean"].value_or<double>( 1.0 );
model_settings.reluctance_sigma = tbl["ActivityDriven"]["reluctance_sigma"].value_or<double>( 0.25 );
model_settings.reluctance_eps = tbl["ActivityDriven"]["reluctance_eps"].value_or<double>( 0.01 );

model_settings.max_iterations = tbl["model"]["max_iterations"].value<int>();

Expand Down Expand Up @@ -165,7 +170,12 @@ void validate_settings( const SimulationOptions & options )
check( name_and_var( model_settings.alpha ), geq_zero );
// check( name_and_var( model_settings.homophily ), geq_zero );
check( name_and_var( model_settings.reciprocity ), geq_zero );

// Reluctance options
check( name_and_var( model_settings.reluctance_mean ), g_zero );
check( name_and_var( model_settings.reluctance_sigma ), g_zero );
check( name_and_var( model_settings.reluctance_eps ), g_zero );
check( name_and_var( model_settings.covariance_factor ), geq_zero );
// Bot options
size_t n_bots = model_settings.n_bots;
auto check_bot_size = [&]( auto x ) { return x.size() >= n_bots; };
const std::string bot_msg = "Length needs to be >= n_bots";
Expand All @@ -186,7 +196,7 @@ void print_settings( const SimulationOptions & options )
fmt::print( "INFO: Seeding with seed {}\n", options.rng_seed );
fmt::print( "Model type: {}\n", options.model_string );
fmt::print( "Network has {} agents\n", options.network_settings.n_agents );

// @TODO: Optionally print *all* settings to the console, including defaults that were set
if( options.model == Model::ActivityDrivenModel )
{
auto model_settings = std::get<ActivityDrivenSettings>( options.model_settings );
Expand Down
2 changes: 1 addition & 1 deletion src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ int main( int argc, char * argv[] )
}
else if( simulation_options.model == Seldon::Config::Model::ActivityDrivenModel )
{
simulation = std::make_unique<Seldon::Simulation<Seldon::ActivityAgentModel::AgentT>>(
simulation = std::make_unique<Seldon::Simulation<Seldon::ActivityDrivenModel::AgentT>>(
simulation_options, network_file, agent_file );
}

Expand Down
23 changes: 15 additions & 8 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@
namespace Seldon
{

ActivityAgentModel::ActivityAgentModel( NetworkT & network, std::mt19937 & gen )
: Model<ActivityAgentModel::AgentT>(),
ActivityDrivenModel::ActivityDrivenModel( NetworkT & network, std::mt19937 & gen )
: Model<ActivityDrivenModel::AgentT>(),
network( network ),
contact_prob_list( std::vector<std::vector<NetworkT::WeightT>>( network.n_agents() ) ),
gen( gen )
{
}

double ActivityAgentModel::homophily_weight( size_t idx_contacter, size_t idx_contacted )
double ActivityDrivenModel::homophily_weight( size_t idx_contacter, size_t idx_contacted )
{
double homophily = this->homophily;

Expand All @@ -34,10 +34,12 @@ double ActivityAgentModel::homophily_weight( size_t idx_contacter, size_t idx_co
return std::pow( opinion_diff, -homophily );
}

void ActivityAgentModel::get_agents_from_power_law()
void ActivityDrivenModel::get_agents_from_power_law()
{
std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values
power_law_distribution<> dist_activity( eps, gamma );
truncated_normal_distribution<> dist_reluctance( reluctance_mean, reluctance_sigma, reluctance_eps );

auto mean_activity = dist_activity.mean();

// Initial conditions for the opinions, initialize to [-1,1]
Expand All @@ -54,6 +56,11 @@ void ActivityAgentModel::get_agents_from_power_law()
{
network.agents[i].data.activity = mean_activity;
}

if( use_reluctances )
{
network.agents[i].data.reluctance = dist_reluctance( gen );
}
}

if( bot_present() )
Expand All @@ -66,7 +73,7 @@ void ActivityAgentModel::get_agents_from_power_law()
}
}

void ActivityAgentModel::update_network_probabilistic()
void ActivityDrivenModel::update_network_probabilistic()
{
network.switch_direction_flag();

Expand Down Expand Up @@ -133,7 +140,7 @@ void ActivityAgentModel::update_network_probabilistic()
network.toggle_incoming_outgoing(); // switch direction, so that we have incoming edges
}

void ActivityAgentModel::update_network_mean()
void ActivityDrivenModel::update_network_mean()
{
using WeightT = NetworkT::WeightT;
std::vector<WeightT> weights( network.n_agents(), 0.0 );
Expand Down Expand Up @@ -205,7 +212,7 @@ void ActivityAgentModel::update_network_mean()
}
}

void ActivityAgentModel::update_network()
void ActivityDrivenModel::update_network()
{

if( !mean_weights )
Expand All @@ -218,7 +225,7 @@ void ActivityAgentModel::update_network()
}
}

void ActivityAgentModel::iteration()
void ActivityDrivenModel::iteration()
{
Model<AgentT>::iteration();

Expand Down
2 changes: 1 addition & 1 deletion test/res/10_agents_meanfield_activity.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ model = "ActivityDriven"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 1 # Write the network every 20 iterations
# n_output_network = 1 # Write the network every 20 iterations
# n_output_agents = 1
print_progress = false # Print the iteration time ; if not set, then always print

Expand Down
11 changes: 8 additions & 3 deletions test/res/1bot_1agent_activity_prob.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,23 @@ eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 1.5 # Controversialness of the issue, must be greater than 0.
alpha = 1.5 # Controversialness of the issue, must be greater than 0.
K = 2.0 # Social interaction strength
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1
reluctance_mean = 1.5 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0)
reluctance_sigma = 0.1 # Width of normal distribution (before truncating)
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value

n_bots = 1 # The number of bots to be used; if not specified defaults to 0 (which means bots are deactivated)
# Bots are agents with fixed opinions and different parameters, the parameters are specified in the following lists
# If n_bots is smaller than the length of any of the lists, the first n_bots entries are used. If n_bots is greater the code will throw an exception.
bot_m = [1] # If not specified, defaults to `m`
bot_m = [1] # If not specified, defaults to `m`
bot_homophily = [0.7] # If not specified, defaults to `homophily`
bot_activity = [1.0] # If not specified, defaults to 0
bot_opinion = [2] # The fixed opinions of the bots
bot_opinion = [2] # The fixed opinions of the bots

[network]
number_of_agents = 2
Expand Down
2 changes: 1 addition & 1 deletion test/res/opinions.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
# comment
0, 2.1127107987061544, 0.044554683389757696
1,0.8088982488089491, 0.015813166022685163
2,-0.8802809369462433 , 0.015863953902810535
2,-0.8802809369462433 , 0.015863953902810535, 2.3

Loading

0 comments on commit 78a5d1b

Please sign in to comment.