-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
InertialModel: Implemented unit test
We have implemented a unit test with 1 agent and 1 bot for the activity driven inertial model Co-authored-by: Moritz Sallermann <[email protected]>
- Loading branch information
1 parent
ece80c6
commit 6c0ff3e
Showing
3 changed files
with
119 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
[simulation] | ||
model = "ActivityDrivenInertial" | ||
rng_seed = 120 # Leaving this empty will pick a random seed | ||
|
||
[io] | ||
# n_output_network = 1 # Write the network every 20 iterations | ||
# n_output_agents = 1 | ||
print_progress = false # Print the iteration time ; if not set, then does not print | ||
|
||
[model] | ||
max_iterations = 1000 # If not set, max iterations is infinite | ||
|
||
[ActivityDrivenInertial] | ||
dt = 0.001 # Timestep for the integration of the coupled ODEs | ||
m = 1 # Number of agents contacted, when the agent is active | ||
eps = 1 # Minimum activity epsilon; a_i belongs to [epsilon,1] | ||
gamma = 2.1 # Exponent of activity power law distribution of activities | ||
reciprocity = 1 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections | ||
homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random | ||
alpha = 1.5 # Controversialness of the issue, must be greater than 0. | ||
K = 2.0 # Social interaction strength | ||
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents | ||
mean_weights = false # Use the meanfield approximation of the network edges | ||
|
||
reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1 | ||
reluctance_mean = 1.5 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0) | ||
reluctance_sigma = 0.1 # Width of normal distribution (before truncating) | ||
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value | ||
|
||
n_bots = 1 # The number of bots to be used; if not specified defaults to 0 (which means bots are deactivated) | ||
# Bots are agents with fixed opinions and different parameters, the parameters are specified in the following lists | ||
# If n_bots is smaller than the length of any of the lists, the first n_bots entries are used. If n_bots is greater the code will throw an exception. | ||
bot_m = [1] # If not specified, defaults to `m` | ||
bot_homophily = [0.7] # If not specified, defaults to `homophily` | ||
bot_activity = [1.0] # If not specified, defaults to 0 | ||
bot_opinion = [2] # The fixed opinions of the bots | ||
|
||
friction_coefficient = 0.5 # Friction coefficient | ||
|
||
[network] | ||
number_of_agents = 2 | ||
connections_per_agent = 1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,76 @@ | ||
#include "catch2/matchers/catch_matchers.hpp" | ||
#include "models/InertialModel.hpp" | ||
#include "util/math.hpp" | ||
#include <catch2/catch_test_macros.hpp> | ||
#include <catch2/matchers/catch_matchers_floating_point.hpp> | ||
#include <complex> | ||
#include <config_parser.hpp> | ||
#include <filesystem> | ||
#include <network_io.hpp> | ||
#include <optional> | ||
#include <simulation.hpp> | ||
|
||
namespace fs = std::filesystem; | ||
|
||
TEST_CASE( "Test the probabilistic inertial activity driven model with one bot and one agent", "[inertial1Bot1Agent]" ) | ||
{ | ||
using namespace Seldon; | ||
using namespace Catch::Matchers; | ||
using AgentT = InertialModel::AgentT; | ||
|
||
auto proj_root_path = fs::current_path(); | ||
auto input_file = proj_root_path / fs::path( "test/res/1bot_1agent_inertial.toml" ); | ||
|
||
auto options = Config::parse_config_file( input_file.string() ); | ||
Config::print_settings( options ); | ||
|
||
auto simulation = Simulation<AgentT>( options, std::nullopt, std::nullopt ); | ||
|
||
// We need an output path for Simulation, but we won't write anything out there | ||
fs::path output_dir_path = proj_root_path / fs::path( "test/output_inertial" ); | ||
|
||
// Get the bot opinion (which won't change) | ||
auto bot = simulation.network.agents[0]; | ||
auto x_bot = bot.data.opinion; // Bot opinion | ||
fmt::print( "We have set the bot opinion = {}\n", x_bot ); | ||
|
||
// Get the initial agent opinion | ||
auto & agent = simulation.network.agents[1]; | ||
auto x_0 = agent.data.opinion; // Agent opinion | ||
fmt::print( "We have set agent x_0 = {}\n", x_0 ); | ||
|
||
simulation.run( output_dir_path ); | ||
|
||
auto model_settings = std::get<Seldon::Config::ActivityDrivenInertialSettings>( options.model_settings ); | ||
auto K = model_settings.K; | ||
auto alpha = model_settings.alpha; | ||
auto iterations = model_settings.max_iterations.value(); | ||
auto dt = model_settings.dt; | ||
std::complex<double> mu = model_settings.friction_coefficient; | ||
auto time_elapsed = iterations * dt; | ||
|
||
// Final agent and bot opinions after the simulation run | ||
auto x_t = agent.data.opinion; | ||
auto x_t_bot = bot.data.opinion; | ||
auto reluctance = agent.data.reluctance; | ||
|
||
fmt::print( "Agent reluctance is = {}\n", reluctance ); | ||
|
||
// The bot opinion should not change during the simulation | ||
REQUIRE_THAT( x_t_bot, WithinAbs( x_bot, 1e-16 ) ); | ||
|
||
// C = K/m tanh (alpha*x_bot) | ||
auto C = K / reluctance * std::tanh( alpha * x_bot ); | ||
|
||
fmt::print( "C = {}\n", C ); | ||
auto a1 = 0.5 * ( -std::sqrt( mu * mu - 4.0 ) - mu ); | ||
auto a2 = 0.5 * ( std::sqrt( mu * mu - 4.0 ) - mu ); | ||
auto c1 = ( x_0 - C ) / ( 1.0 - a1 / a2 ); | ||
auto c2 = -c1 * a1 / a2; | ||
// Test that the agent opinion matches the analytical solution for an agent with a bot | ||
// Analytical solution is | ||
// x_t = c1 * exp(a1*t) + c2 *exp(a2*t) + C | ||
auto x_t_analytical = c1 * std::exp( a1 * time_elapsed ) + c2 * std::exp( a2 * time_elapsed ) + C; | ||
|
||
REQUIRE_THAT( x_t, WithinAbs( x_t_analytical.real(), 1e-5 ) ); | ||
} |