Skip to content

Commit

Permalink
ActivityDrivenModel: Generation of reluctances
Browse files Browse the repository at this point in the history
ActivityDrivenModel::generate_agents_from_power_law(), now also
generates the reluctances from a truncated normal distribution.
Optionally they can be correlated with the activities.

TODO: parsing of the config file to control these options

Co-authored-by: Amrita Goswami <[email protected]>
  • Loading branch information
MSallermann and amritagos committed Mar 16, 2024
1 parent 1b7cee5 commit c8520b9
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 24 deletions.
6 changes: 6 additions & 0 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,12 @@ class ActivityDrivenModel : public Model<Agent<ActivityAgentData>>

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;

// bot @TODO: less hacky

size_t n_bots = 0; // The first n_bots agents are bots
Expand Down
6 changes: 2 additions & 4 deletions include/simulation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,7 @@ class Simulation : public SimulationInterface
auto degroot_settings = std::get<Config::DeGrootSettings>( options.model_settings );

// DeGroot specific parameters
model = [&]()
{
model = [&]() {
auto model = std::make_unique<DeGrootModel>( network );
model->max_iterations = degroot_settings.max_iterations;
model->convergence_tol = degroot_settings.convergence_tol;
Expand All @@ -102,8 +101,7 @@ class Simulation : public SimulationInterface
{
auto activitydriven_settings = std::get<Config::ActivityDrivenSettings>( options.model_settings );

model = [&]()
{
model = [&]() {
auto model = std::make_unique<ActivityDrivenModel>( network, gen );
model->dt = activitydriven_settings.dt;
model->m = activitydriven_settings.m;
Expand Down
2 changes: 1 addition & 1 deletion include/util/math.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ class truncated_normal_distribution
template<typename Generator>
ScalarT operator()( Generator & gen )
{
for( int i = 0; i < max_tries; i++ )
for( size_t i = 0; i < max_tries; i++ )
{
auto sample = normal_dist( gen );
if( sample > eps )
Expand Down
23 changes: 10 additions & 13 deletions src/config_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,24 +76,21 @@ SimulationOptions parse_config_file( std::string_view config_file_path )
// bot
model_settings.n_bots = tbl["ActivityDriven"]["n_bots"].value_or<size_t>( 0 );

auto push_back_bot_array = [&]( auto toml_node, auto & options_array, auto default_value )
{
auto push_back_bot_array = [&]( auto toml_node, auto & options_array, auto default_value ) {
if( toml_node.is_array() )
{
toml::array * toml_arr = toml_node.as_array();

toml_arr->for_each(
[&]( auto && elem )
toml_arr->for_each( [&]( auto && elem ) {
if( elem.is_integer() )
{
options_array.push_back( elem.as_integer()->get() );
}
else if( elem.is_floating_point() )
{
if( elem.is_integer() )
{
options_array.push_back( elem.as_integer()->get() );
}
else if( elem.is_floating_point() )
{
options_array.push_back( elem.as_floating_point()->get() );
}
} );
options_array.push_back( elem.as_floating_point()->get() );
}
} );
}
else
{
Expand Down
13 changes: 11 additions & 2 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ void ActivityDrivenModel::get_agents_from_power_law()
{
std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values
power_law_distribution<> dist_activity( eps, gamma );
truncated_normal_distribution<> dist_reluctance( reluctance_mean, reluctance_sigma, reluctance_eps );

auto mean_activity = dist_activity.mean();

// Initial conditions for the opinions, initialize to [-1,1]
Expand All @@ -54,6 +56,14 @@ void ActivityDrivenModel::get_agents_from_power_law()
{
network.agents[i].data.activity = mean_activity;
}

if( use_reluctances )
{
network.agents[i].data.reluctance = dist_reluctance( gen );
auto a = network.agents[i].data.activity;
network.agents[i].data.activity += covariance_factor * network.agents[i].data.reluctance;
network.agents[i].data.reluctance += covariance_factor * a;
}
}

if( bot_present() )
Expand Down Expand Up @@ -145,8 +155,7 @@ void ActivityDrivenModel::update_network_mean()
contact_prob_list[idx_agent] = weights; // set to zero
}

auto probability_helper = []( double omega, size_t m )
{
auto probability_helper = []( double omega, size_t m ) {
double p = 0;
for( size_t i = 1; i <= m; i++ )
p += ( std::pow( -omega, i + 1 ) + omega ) / ( omega + 1 );
Expand Down
3 changes: 1 addition & 2 deletions test/test_activity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,7 @@ TEST_CASE( "Test the meanfield activity driven model with 10 agents", "[activity
auto mean_activity = dist.mean();

// Calculate the critical controversialness
auto set_opinions_and_run = [&]( bool above_critical_controversialness )
{
auto set_opinions_and_run = [&]( bool above_critical_controversialness ) {
auto simulation = Simulation<AgentT>( options, std::nullopt, std::nullopt );
auto initial_opinion_delta = 0.1; // Set the initial opinion in the interval [-delta, delta]

Expand Down
3 changes: 1 addition & 2 deletions test/test_sampling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,7 @@ TEST_CASE( "Testing sampling functions" )

std::vector<size_t> histogram( n, 0 ); // Count how often each element occurs amongst all samples

auto weight_callback = []( size_t idx )
{
auto weight_callback = []( size_t idx ) {
if( ( idx == ignore_idx ) || ( idx == ignore_idx2 ) )
{
return 0.0;
Expand Down

0 comments on commit c8520b9

Please sign in to comment.