diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 64ec97f..4001517 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,4 +34,4 @@ jobs: - name: Test with meson shell: micromamba-shell {0} run: | - meson test -C build \ No newline at end of file + meson test -C build --verbose \ No newline at end of file diff --git a/examples/ActivityDrivenReluctance/conf.toml b/examples/ActivityDrivenReluctance/conf.toml new file mode 100644 index 0000000..7ba5655 --- /dev/null +++ b/examples/ActivityDrivenReluctance/conf.toml @@ -0,0 +1,32 @@ +[simulation] +model = "ActivityDriven" +# rng_seed = 120 # Leaving this empty will pick a random seed + +[io] +n_output_network = 20 # Write the network every 20 iterations +n_output_agents = 1 # Write the opinions of agents after every iteration +print_progress = true # Print the iteration time ; if not set, then always print + +[model] +max_iterations = 500 # If not set, max iterations is infinite + +[ActivityDriven] +dt = 0.01 # Timestep for the integration of the coupled ODEs +m = 10 # Number of agents contacted, when the agent is active +eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1] +gamma = 2.1 # Exponent of activity power law distribution of activities +reciprocity = 0.65 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections +homophily = 1.0 # aka beta. if zero, agents pick their interaction partners at random +alpha = 3.0 # Controversialness of the issue, must be greater than 0. +K = 2.0 +mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents +mean_weights = false # Use the meanfield approximation of the network edges + +reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1 +reluctance_mean = 1.0 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0) +reluctance_sigma = 0.25 # Width of normal distribution (before truncating) +reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value + +[network] +number_of_agents = 1000 +connections_per_agent = 10 diff --git a/include/config_parser.hpp b/include/config_parser.hpp index e8e3e8a..a420989 100644 --- a/include/config_parser.hpp +++ b/include/config_parser.hpp @@ -60,6 +60,11 @@ struct ActivityDrivenSettings std::vector bot_activity = std::vector( 0 ); std::vector bot_opinion = std::vector( 0 ); std::vector bot_homophily = std::vector( 0 ); + bool use_reluctances = false; + double reluctance_mean = 1.0; + double reluctance_sigma = 0.25; + double reluctance_eps = 0.01; + double covariance_factor = 0.0; }; struct InitialNetworkSettings diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 6dbbe98..c642fa6 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -15,25 +15,41 @@ namespace Seldon struct ActivityAgentData { - double opinion = 0; // x_i - double activity = 0; // a_i + double opinion = 0; // x_i + double activity = 0; // a_i + double reluctance = 1.0; // m_i }; template<> inline std::string Agent::to_string() const { - return fmt::format( "{}, {}", data.opinion, data.activity ); + return fmt::format( "{}, {}, {}", data.opinion, data.activity, data.reluctance ); }; template<> inline void Agent::from_string( const std::string & str ) { - auto pos_comma = str.find_first_of( ',' ); - data.opinion = std::stod( str.substr( 0, pos_comma ) ); - data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) ); + auto pos_comma = str.find_first_of( ',' ); + auto pos_next_comma = str.find( ',', pos_comma + 1 ); + + data.opinion = std::stod( str.substr( 0, pos_comma ) ); + + if( pos_next_comma == std::string::npos ) + { + data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) ); + } + else + { + data.activity = std::stod( str.substr( pos_comma + 1, pos_next_comma ) ); + } + + if( pos_next_comma != std::string::npos ) + { + data.reluctance = std::stod( str.substr( pos_next_comma + 1, str.size() ) ); + } }; -class ActivityAgentModel : public Model> +class ActivityDrivenModel : public Model> { public: using AgentT = Agent; @@ -69,7 +85,8 @@ class ActivityAgentModel : public Model> for( size_t j = 0; j < neighbour_buffer.size(); j++ ) { j_index = neighbour_buffer[j]; - k_buffer[idx_agent] += K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) ); + k_buffer[idx_agent] += 1.0 / network.agents[idx_agent].data.reluctance * K * weight_buffer[j] + * std::tanh( alpha * opinion( j_index ) ); } // Multiply by the timestep k_buffer[idx_agent] *= dt; @@ -101,6 +118,12 @@ class ActivityAgentModel : public Model> double convergence_tol = 1e-12; // TODO: ?? + bool use_reluctances = false; + double reluctance_mean = 1.0; + double reluctance_sigma = 0.25; + double reluctance_eps = 0.01; + double covariance_factor = 0.0; + // bot @TODO: less hacky size_t n_bots = 0; // The first n_bots agents are bots @@ -114,14 +137,11 @@ class ActivityAgentModel : public Model> return n_bots > 0; } - ActivityAgentModel( NetworkT & network, std::mt19937 & gen ); + ActivityDrivenModel( NetworkT & network, std::mt19937 & gen ); void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set void iteration() override; - - // bool finished() overteration() override; - // bool finished() override; }; } // namespace Seldon \ No newline at end of file diff --git a/include/simulation.hpp b/include/simulation.hpp index d40a6e4..5f9124a 100644 --- a/include/simulation.hpp +++ b/include/simulation.hpp @@ -98,13 +98,13 @@ class Simulation : public SimulationInterface network.agents = AgentGeneration::generate_from_file( cli_agent_file.value() ); } } - else if constexpr( std::is_same_v ) + else if constexpr( std::is_same_v ) { auto activitydriven_settings = std::get( options.model_settings ); model = [&]() { - auto model = std::make_unique( network, gen ); + auto model = std::make_unique( network, gen ); model->dt = activitydriven_settings.dt; model->m = activitydriven_settings.m; model->eps = activitydriven_settings.eps; @@ -116,6 +116,11 @@ class Simulation : public SimulationInterface model->mean_activities = activitydriven_settings.mean_activities; model->mean_weights = activitydriven_settings.mean_weights; model->max_iterations = activitydriven_settings.max_iterations; + // Reluctance + model->use_reluctances = activitydriven_settings.use_reluctances; + model->reluctance_mean = activitydriven_settings.reluctance_mean; + model->reluctance_sigma = activitydriven_settings.reluctance_sigma; + model->reluctance_eps = activitydriven_settings.reluctance_eps; // bot model->n_bots = activitydriven_settings.n_bots; model->bot_opinion = activitydriven_settings.bot_opinion; @@ -129,7 +134,7 @@ class Simulation : public SimulationInterface if( cli_agent_file.has_value() ) { network.agents - = AgentGeneration::generate_from_file( cli_agent_file.value() ); + = AgentGeneration::generate_from_file( cli_agent_file.value() ); } } } diff --git a/include/util/math.hpp b/include/util/math.hpp index dcd0690..607fe72 100644 --- a/include/util/math.hpp +++ b/include/util/math.hpp @@ -138,4 +138,38 @@ class power_law_distribution } }; +/** + * @brief Truncated normal distribution + * A continuous random distribution on the range [eps, infty) + * with p(x) ~ e^(-(x-mean)^2/(2 sigma^2)) + */ +template +class truncated_normal_distribution +{ +private: + ScalarT mean{}; + ScalarT sigma{}; + ScalarT eps{}; + std::normal_distribution normal_dist{}; + size_t max_tries = 5000; + +public: + truncated_normal_distribution( ScalarT mean, ScalarT sigma, ScalarT eps ) + : mean( mean ), sigma( sigma ), eps( eps ), normal_dist( std::normal_distribution( mean, sigma ) ) + { + } + + template + ScalarT operator()( Generator & gen ) + { + for( size_t i = 0; i < max_tries; i++ ) + { + auto sample = normal_dist( gen ); + if( sample > eps ) + return sample; + } + return eps; + } +}; + } // namespace Seldon \ No newline at end of file diff --git a/src/config_parser.cpp b/src/config_parser.cpp index fd4e4b3..bf498e8 100644 --- a/src/config_parser.cpp +++ b/src/config_parser.cpp @@ -74,6 +74,11 @@ SimulationOptions parse_config_file( std::string_view config_file_path ) model_settings.K = tbl["ActivityDriven"]["K"].value_or( 3.0 ); model_settings.mean_activities = tbl["ActivityDriven"]["mean_activities"].value_or( false ); model_settings.mean_weights = tbl["ActivityDriven"]["mean_weights"].value_or( false ); + // Reluctances + model_settings.use_reluctances = tbl["ActivityDriven"]["reluctances"].value_or( false ); + model_settings.reluctance_mean = tbl["ActivityDriven"]["reluctance_mean"].value_or( 1.0 ); + model_settings.reluctance_sigma = tbl["ActivityDriven"]["reluctance_sigma"].value_or( 0.25 ); + model_settings.reluctance_eps = tbl["ActivityDriven"]["reluctance_eps"].value_or( 0.01 ); model_settings.max_iterations = tbl["model"]["max_iterations"].value(); @@ -165,7 +170,12 @@ void validate_settings( const SimulationOptions & options ) check( name_and_var( model_settings.alpha ), geq_zero ); // check( name_and_var( model_settings.homophily ), geq_zero ); check( name_and_var( model_settings.reciprocity ), geq_zero ); - + // Reluctance options + check( name_and_var( model_settings.reluctance_mean ), g_zero ); + check( name_and_var( model_settings.reluctance_sigma ), g_zero ); + check( name_and_var( model_settings.reluctance_eps ), g_zero ); + check( name_and_var( model_settings.covariance_factor ), geq_zero ); + // Bot options size_t n_bots = model_settings.n_bots; auto check_bot_size = [&]( auto x ) { return x.size() >= n_bots; }; const std::string bot_msg = "Length needs to be >= n_bots"; @@ -186,7 +196,7 @@ void print_settings( const SimulationOptions & options ) fmt::print( "INFO: Seeding with seed {}\n", options.rng_seed ); fmt::print( "Model type: {}\n", options.model_string ); fmt::print( "Network has {} agents\n", options.network_settings.n_agents ); - + // @TODO: Optionally print *all* settings to the console, including defaults that were set if( options.model == Model::ActivityDrivenModel ) { auto model_settings = std::get( options.model_settings ); diff --git a/src/main.cpp b/src/main.cpp index f804b02..d026801 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -66,7 +66,7 @@ int main( int argc, char * argv[] ) } else if( simulation_options.model == Seldon::Config::Model::ActivityDrivenModel ) { - simulation = std::make_unique>( + simulation = std::make_unique>( simulation_options, network_file, agent_file ); } diff --git a/src/models/ActivityDrivenModel.cpp b/src/models/ActivityDrivenModel.cpp index 3e14da5..c2f4a0a 100644 --- a/src/models/ActivityDrivenModel.cpp +++ b/src/models/ActivityDrivenModel.cpp @@ -8,15 +8,15 @@ namespace Seldon { -ActivityAgentModel::ActivityAgentModel( NetworkT & network, std::mt19937 & gen ) - : Model(), +ActivityDrivenModel::ActivityDrivenModel( NetworkT & network, std::mt19937 & gen ) + : Model(), network( network ), contact_prob_list( std::vector>( network.n_agents() ) ), gen( gen ) { } -double ActivityAgentModel::homophily_weight( size_t idx_contacter, size_t idx_contacted ) +double ActivityDrivenModel::homophily_weight( size_t idx_contacter, size_t idx_contacted ) { double homophily = this->homophily; @@ -34,10 +34,12 @@ double ActivityAgentModel::homophily_weight( size_t idx_contacter, size_t idx_co return std::pow( opinion_diff, -homophily ); } -void ActivityAgentModel::get_agents_from_power_law() +void ActivityDrivenModel::get_agents_from_power_law() { std::uniform_real_distribution<> dis_opinion( -1, 1 ); // Opinion initial values power_law_distribution<> dist_activity( eps, gamma ); + truncated_normal_distribution<> dist_reluctance( reluctance_mean, reluctance_sigma, reluctance_eps ); + auto mean_activity = dist_activity.mean(); // Initial conditions for the opinions, initialize to [-1,1] @@ -54,6 +56,11 @@ void ActivityAgentModel::get_agents_from_power_law() { network.agents[i].data.activity = mean_activity; } + + if( use_reluctances ) + { + network.agents[i].data.reluctance = dist_reluctance( gen ); + } } if( bot_present() ) @@ -66,7 +73,7 @@ void ActivityAgentModel::get_agents_from_power_law() } } -void ActivityAgentModel::update_network_probabilistic() +void ActivityDrivenModel::update_network_probabilistic() { network.switch_direction_flag(); @@ -133,7 +140,7 @@ void ActivityAgentModel::update_network_probabilistic() network.toggle_incoming_outgoing(); // switch direction, so that we have incoming edges } -void ActivityAgentModel::update_network_mean() +void ActivityDrivenModel::update_network_mean() { using WeightT = NetworkT::WeightT; std::vector weights( network.n_agents(), 0.0 ); @@ -205,7 +212,7 @@ void ActivityAgentModel::update_network_mean() } } -void ActivityAgentModel::update_network() +void ActivityDrivenModel::update_network() { if( !mean_weights ) @@ -218,7 +225,7 @@ void ActivityAgentModel::update_network() } } -void ActivityAgentModel::iteration() +void ActivityDrivenModel::iteration() { Model::iteration(); diff --git a/test/res/10_agents_meanfield_activity.toml b/test/res/10_agents_meanfield_activity.toml index 63191c8..66d8463 100644 --- a/test/res/10_agents_meanfield_activity.toml +++ b/test/res/10_agents_meanfield_activity.toml @@ -3,7 +3,7 @@ model = "ActivityDriven" # rng_seed = 120 # Leaving this empty will pick a random seed [io] -n_output_network = 1 # Write the network every 20 iterations +# n_output_network = 1 # Write the network every 20 iterations # n_output_agents = 1 print_progress = false # Print the iteration time ; if not set, then always print diff --git a/test/res/1bot_1agent_activity_prob.toml b/test/res/1bot_1agent_activity_prob.toml index d8e0f58..4c5b050 100644 --- a/test/res/1bot_1agent_activity_prob.toml +++ b/test/res/1bot_1agent_activity_prob.toml @@ -17,18 +17,23 @@ eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1] gamma = 2.1 # Exponent of activity power law distribution of activities reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random -alpha = 1.5 # Controversialness of the issue, must be greater than 0. +alpha = 1.5 # Controversialness of the issue, must be greater than 0. K = 2.0 # Social interaction strength mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents mean_weights = false # Use the meanfield approximation of the network edges +reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1 +reluctance_mean = 1.5 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0) +reluctance_sigma = 0.1 # Width of normal distribution (before truncating) +reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value + n_bots = 1 # The number of bots to be used; if not specified defaults to 0 (which means bots are deactivated) # Bots are agents with fixed opinions and different parameters, the parameters are specified in the following lists # If n_bots is smaller than the length of any of the lists, the first n_bots entries are used. If n_bots is greater the code will throw an exception. -bot_m = [1] # If not specified, defaults to `m` +bot_m = [1] # If not specified, defaults to `m` bot_homophily = [0.7] # If not specified, defaults to `homophily` bot_activity = [1.0] # If not specified, defaults to 0 -bot_opinion = [2] # The fixed opinions of the bots +bot_opinion = [2] # The fixed opinions of the bots [network] number_of_agents = 2 diff --git a/test/res/opinions.txt b/test/res/opinions.txt index 6669744..4cede8c 100644 --- a/test/res/opinions.txt +++ b/test/res/opinions.txt @@ -2,5 +2,5 @@ # comment 0, 2.1127107987061544, 0.044554683389757696 1,0.8088982488089491, 0.015813166022685163 -2,-0.8802809369462433 , 0.015863953902810535 +2,-0.8802809369462433 , 0.015863953902810535, 2.3 diff --git a/test/test_activity.cpp b/test/test_activity.cpp index c681e37..75e83c8 100644 --- a/test/test_activity.cpp +++ b/test/test_activity.cpp @@ -18,7 +18,7 @@ TEST_CASE( { using namespace Seldon; using namespace Catch::Matchers; - using AgentT = ActivityAgentModel::AgentT; + using AgentT = ActivityDrivenModel::AgentT; auto proj_root_path = fs::current_path(); @@ -50,7 +50,7 @@ TEST_CASE( "Test the probabilistic activity driven model for two agents", "[acti { using namespace Seldon; using namespace Catch::Matchers; - using AgentT = ActivityAgentModel::AgentT; + using AgentT = ActivityDrivenModel::AgentT; auto proj_root_path = fs::current_path(); auto input_file = proj_root_path / fs::path( "test/res/2_agents_activity_prob.toml" ); @@ -85,11 +85,13 @@ TEST_CASE( "Test the probabilistic activity driven model for two agents", "[acti } } -TEST_CASE( "Test the probabilistic activity driven model with one bot and one agent", "[activity1Bot1Agent]" ) +TEST_CASE( + "Test the probabilistic activity driven model with one bot and one (reluctant) agent", + "[activity1Bot1AgentReluctance]" ) { using namespace Seldon; using namespace Catch::Matchers; - using AgentT = ActivityAgentModel::AgentT; + using AgentT = ActivityDrivenModel::AgentT; auto proj_root_path = fs::current_path(); auto input_file = proj_root_path / fs::path( "test/res/1bot_1agent_activity_prob.toml" ); @@ -122,8 +124,11 @@ TEST_CASE( "Test the probabilistic activity driven model with one bot and one ag auto time_elapsed = iterations * dt; // Final agent and bot opinions after the simulation run - auto x_t = agent.data.opinion; - auto x_t_bot = bot.data.opinion; + auto x_t = agent.data.opinion; + auto x_t_bot = bot.data.opinion; + auto reluctance = agent.data.reluctance; + + fmt::print( "Agent reluctance is = {}\n", reluctance ); // The bot opinion should not change during the simulation REQUIRE_THAT( x_t_bot, WithinAbs( x_bot, 1e-16 ) ); @@ -131,7 +136,8 @@ TEST_CASE( "Test the probabilistic activity driven model with one bot and one ag // Test that the agent opinion matches the analytical solution for an agent with a bot // Analytical solution is: // x_t = [x(0) - Ktanh(alpha*x_bot)]e^(-t) + Ktanh(alpha*x_bot) - auto x_t_analytical = ( x_0 - K * tanh( alpha * x_bot ) ) * exp( -time_elapsed ) + K * tanh( alpha * x_bot ); + auto x_t_analytical = ( x_0 - K / reluctance * tanh( alpha * x_bot ) ) * exp( -time_elapsed ) + + K / reluctance * tanh( alpha * x_bot ); REQUIRE_THAT( x_t, WithinAbs( x_t_analytical, 1e-5 ) ); } @@ -140,7 +146,7 @@ TEST_CASE( "Test the meanfield activity driven model with 10 agents", "[activity { using namespace Seldon; using namespace Catch::Matchers; - using AgentT = ActivityAgentModel::AgentT; + using AgentT = ActivityDrivenModel::AgentT; auto proj_root_path = fs::current_path(); auto input_file = proj_root_path / fs::path( "test/res/10_agents_meanfield_activity.toml" ); @@ -185,7 +191,7 @@ TEST_CASE( "Test the meanfield activity driven model with 10 agents", "[activity // We need an output path for Simulation, but we won't write anything out there fs::path output_dir_path = proj_root_path / fs::path( "test/output_meanfield_test" ); - fs::create_directories( output_dir_path ); + // fs::create_directories( output_dir_path ); // run that mofo simulation.run( output_dir_path ); diff --git a/test/test_io.cpp b/test/test_io.cpp index eca20ba..e79fd21 100644 --- a/test/test_io.cpp +++ b/test/test_io.cpp @@ -17,7 +17,7 @@ TEST_CASE( "Test reading in the network from a file", "[io_network]" ) { using namespace Seldon; using namespace Catch::Matchers; - using AgentT = ActivityAgentModel::AgentT; + using AgentT = ActivityDrivenModel::AgentT; using Network = Network; auto proj_root_path = fs::current_path(); @@ -46,10 +46,11 @@ TEST_CASE( "Test reading in the agents from a file", "[io_agents]" ) auto proj_root_path = fs::current_path(); auto network_file = proj_root_path / fs::path( "test/res/opinions.txt" ); - auto agents = Seldon::AgentGeneration::generate_from_file( network_file ); + auto agents = Seldon::AgentGeneration::generate_from_file( network_file ); - std::vector opinions_expected = { 2.1127107987061544, 0.8088982488089491, -0.8802809369462433 }; - std::vector activities_expected = { 0.044554683389757696, 0.015813166022685163, 0.015863953902810535 }; + std::vector opinions_expected = { 2.1127107987061544, 0.8088982488089491, -0.8802809369462433 }; + std::vector activities_expected = { 0.044554683389757696, 0.015813166022685163, 0.015863953902810535 }; + std::vector reluctances_expected = { 1.0, 1.0, 2.3 }; REQUIRE( agents.size() == 3 ); @@ -58,5 +59,6 @@ TEST_CASE( "Test reading in the agents from a file", "[io_agents]" ) fmt::print( "{}", i ); REQUIRE_THAT( agents[i].data.opinion, Catch::Matchers::WithinAbs( opinions_expected[i], 1e-16 ) ); REQUIRE_THAT( agents[i].data.activity, Catch::Matchers::WithinAbs( activities_expected[i], 1e-16 ) ); + REQUIRE_THAT( agents[i].data.reluctance, Catch::Matchers::WithinAbs( reluctances_expected[i], 1e-16 ) ); } } \ No newline at end of file