Skip to content

Commit

Permalink
Tests: Finished activity driven model with one bot and 1 agent. Static
Browse files Browse the repository at this point in the history
cast changed to dynamic cast.

Co-authored-by: Moritz Sallermann <[email protected]>
  • Loading branch information
amritagos and MSallermann committed Mar 13, 2024
1 parent 0d9913c commit b086caf
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 14 deletions.
2 changes: 1 addition & 1 deletion include/model_base.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class ModelBase
template<typename AgentT>
AgentT * get_agent_as( int idx )
{
return static_cast<AgentT *>( get_agent( idx ) );
return dynamic_cast<AgentT *>( get_agent( idx ) );
}

virtual void iteration() = 0;
Expand Down
5 changes: 2 additions & 3 deletions src/simulation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ void Seldon::Simulation::run( fs::path output_dir_path )

typedef std::chrono::milliseconds ms;
auto t_simulation_start = std::chrono::high_resolution_clock::now();
do
while( !this->model->finished() )
{
auto t_iter_start = std::chrono::high_resolution_clock::now();

Expand Down Expand Up @@ -143,8 +143,7 @@ void Seldon::Simulation::run( fs::path output_dir_path )
auto filename = fmt::format( "network_{}.txt", this->model->n_iterations );
Seldon::IO::network_to_file( *this, ( output_dir_path / fs::path( filename ) ).string() );
}

} while( !this->model->finished() );
}

auto t_simulation_end = std::chrono::high_resolution_clock::now();
auto total_time = std::chrono::duration_cast<ms>( t_simulation_end - t_simulation_start );
Expand Down
35 changes: 35 additions & 0 deletions test/res/1bot_1agent_activity_prob.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
[simulation]
model = "ActivityDriven"
rng_seed = 120 # Leaving this empty will pick a random seed

[io]
# n_output_network = 1 # Write the network every 20 iterations
# n_output_agents = 1
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 1000 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.001 # Timestep for the integration of the coupled ODEs
m = 1 # Number of agents contacted, when the agent is active
eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random
alpha = 1.5 # Controversialness of the issue, must be greater than 0.
K = 2.0 # Social interaction strength
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

n_bots = 1 # The number of bots to be used; if not specified defaults to 0 (which means bots are deactivated)
# Bots are agents with fixed opinions and different parameters, the parameters are specified in the following lists
# If n_bots is smaller than the length of any of the lists, the first n_bots entries are used. If n_bots is greater the code will throw an exception.
bot_m = [1] # If not specified, defaults to `m`
bot_homophily = [0.7] # If not specified, defaults to `homophily`
bot_activity = [1.0] # If not specified, defaults to 0
bot_opinion = [2] # The fixed opinions of the bots

[network]
number_of_agents = 2
connections_per_agent = 1
4 changes: 2 additions & 2 deletions test/res/2_agents_activity_prob.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ model = "ActivityDriven"
rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 1 # Write the network every 20 iterations
n_output_agents = 1
# n_output_network = 1 # Write the network every 20 iterations
# n_output_agents = 1
print_progress = true # Print the iteration time ; if not set, then always print

[model]
Expand Down
58 changes: 50 additions & 8 deletions test/test_activity.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#include "catch2/matchers/catch_matchers.hpp"
#include "models/ActivityDrivenModel.hpp"
#include <catch2/catch_test_macros.hpp>
#include <catch2/matchers/catch_matchers_floating_point.hpp>
Expand Down Expand Up @@ -55,16 +56,9 @@ TEST_CASE( "Test the probabilistic activity driven model for two agents", "[acti

auto simulation = Simulation( options, std::nullopt, std::nullopt );

// We need an output path for Simulation, but we won't write anything out there?
// We need an output path for Simulation, but we won't write anything out there
fs::path output_dir_path = proj_root_path / fs::path( "test/output" );

fs::remove_all( output_dir_path );
fs::create_directories( output_dir_path );

// Zero step
auto filename = fmt::format( "opinions_{}.txt", 0 );
Seldon::IO::opinions_to_file( simulation, ( output_dir_path / fs::path( filename ) ).string() );

simulation.run( output_dir_path );

auto model_settings = std::get<Seldon::Config::ActivityDrivenSettings>( options.model_settings );
Expand All @@ -86,4 +80,52 @@ TEST_CASE( "Test the probabilistic activity driven model for two agents", "[acti
fmt::print( "{} \n", agent->data.opinion );
REQUIRE_THAT( agent->data.opinion, WithinAbs( analytical_x, 1e-4 ) );
}
}

TEST_CASE( "Test the probabilistic activity driven model with one bot and one agent", "[activity1Bot1Agent]" )
{
using namespace Seldon;
using namespace Catch::Matchers;

auto proj_root_path = fs::current_path();
auto input_file = proj_root_path / fs::path( "test/res/1bot_1agent_activity_prob.toml" );

auto options = Config::parse_config_file( input_file.string() );

auto simulation = Simulation( options, std::nullopt, std::nullopt );

// We need an output path for Simulation, but we won't write anything out there
fs::path output_dir_path = proj_root_path / fs::path( "test/output" );

// Get the bot opinion (which won't change)
auto * bot = simulation.model->get_agent_as<ActivityAgentModel::AgentT>( 0 );
auto x_bot = bot->data.opinion; // Bot opinion
// Get the initial agent opinion
auto * agent = simulation.model->get_agent_as<ActivityAgentModel::AgentT>( 1 );
agent->data.opinion = 1.0;
auto x_0 = agent->data.opinion;
fmt::print( "We have set agent x_0 = {}\n", x_0 );

simulation.run( output_dir_path );

auto model_settings = std::get<Seldon::Config::ActivityDrivenSettings>( options.model_settings );
auto K = model_settings.K;
auto alpha = model_settings.alpha;
auto iterations = model_settings.max_iterations.value();
auto dt = model_settings.dt;
auto time_elapsed = iterations * dt;

// Final agent and bot opinions after the simulation run
auto x_t = agent->data.opinion;
auto x_t_bot = bot->data.opinion;

// The bot opinion should not change during the simulation
REQUIRE_THAT( x_t_bot, WithinAbs( x_bot, 1e-16 ) );

// Test that the agent opinion matches the analytical solution for an agent with a bot
// Analytical solution is:
// x_t = [x(0) - Ktanh(alpha*x_bot)]e^(-t) + Ktanh(alpha*x_bot)
auto x_t_analytical = ( x_0 - K * tanh( alpha * x_bot ) ) * exp( -time_elapsed ) + K * tanh( alpha * x_bot );

REQUIRE_THAT( x_t, WithinAbs( x_t_analytical, 1e-6 ) );
}

0 comments on commit b086caf

Please sign in to comment.