Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Inertial model #40

Merged
merged 19 commits into from
Apr 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions examples/ActivityDrivenInertial/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
[simulation]
model = "ActivityDrivenInertial"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then does not print

[model]
max_iterations = 500 # If not set, max iterations is infinite

[ActivityDrivenInertial]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.65 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 1.0 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 2.0
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1
reluctance_mean = 1.0 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0)
reluctance_sigma = 0.25 # Width of normal distribution (before truncating)
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value

friction_coefficient = 1.0 # Friction coefficient, making agents tend to go to rest without acceleration

[network]
number_of_agents = 1000
connections_per_agent = 10
3 changes: 2 additions & 1 deletion examples/ActivityDrivenReluctance/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@ mean_weights = false # Use the meanfield approximation of the network edges

reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1
reluctance_mean = 1.0 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0)
reluctance_sigma = 0.25 # Width of normal distribution (before truncating)
reluctance_sigma = 0.15 # Width of normal distribution (before truncating)
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value
covariance_factor = 0.0 # 0.0 means that the reluctances and activities are uncorrelated. Should be in the range of [-1,1]

[network]
number_of_agents = 1000
Expand Down
2 changes: 1 addition & 1 deletion examples/DeffuantVector/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ model = "Deffuant"
[io]
# n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then does not prints
print_progress = true # Print the iteration time ; if not set, then does not print
output_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

Expand Down
68 changes: 68 additions & 0 deletions include/agents/inertial_agent.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
#pragma once

#include "agent.hpp"
#include "agent_io.hpp"
#include <util/misc.hpp>

namespace Seldon
{

struct InertialAgentData
{
double opinion = 0; // x_i
double activity = 0; // alpha_i
double reluctance = 1.0; // m_i
double velocity = 0.0; // d(x_i)/dt
};

using InertialAgent = Agent<InertialAgentData>;

template<>
inline std::string agent_to_string<InertialAgent>( const InertialAgent & agent )
{
return fmt::format(
"{}, {}, {}, {}", agent.data.opinion, agent.data.velocity, agent.data.activity, agent.data.reluctance );
}

template<>
inline std::string opinion_to_string<InertialAgent>( const InertialAgent & agent )
{
return fmt::format( "{}", agent.data.opinion );
}

template<>
inline InertialAgent agent_from_string<InertialAgent>( const std::string & str )
{
InertialAgent res{};

auto callback = [&]( int idx_list, std::string & substr )
{
if( idx_list == 0 )
{
res.data.opinion = std::stod( substr );
}
else if( idx_list == 1 )
{
res.data.velocity = std::stod( substr );
}
else if( idx_list == 2 )
{
res.data.activity = std::stod( substr );
}
else if( idx_list == 3 )
{
res.data.reluctance = std::stod( substr );
}
};

Seldon::parse_comma_separated_list( str, callback );

return res;
};

template<>
inline std::vector<std::string> agent_to_string_column_names<InertialAgent>()
{
return { "opinion", "velocity", "activity", "reluctance" };
}
} // namespace Seldon
11 changes: 9 additions & 2 deletions include/config_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ namespace Seldon::Config
enum class Model
{
DeGroot,
ActivityDrivenModel,
ActivityDrivenModel, // @TODO : no need for model here
ActivityDrivenInertial,
DeffuantModel
};

Expand Down Expand Up @@ -81,6 +82,11 @@ struct ActivityDrivenSettings
double covariance_factor = 0.0;
};

struct ActivityDrivenInertialSettings : public ActivityDrivenSettings
{
double friction_coefficient = 1.0;
};

struct InitialNetworkSettings
{
std::optional<std::string> file;
Expand All @@ -90,7 +96,8 @@ struct InitialNetworkSettings

struct SimulationOptions
{
using ModelVariantT = std::variant<DeGrootSettings, ActivityDrivenSettings, DeffuantSettings>;
using ModelVariantT
= std::variant<DeGrootSettings, ActivityDrivenSettings, ActivityDrivenInertialSettings, DeffuantSettings>;
Model model;
std::string model_string;
int rng_seed = std::random_device()();
Expand Down
18 changes: 18 additions & 0 deletions include/model_factory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include "models/ActivityDrivenModel.hpp"
#include "models/DeGroot.hpp"
#include "models/DeffuantModel.hpp"
#include "models/InertialModel.hpp"
#include "network.hpp"
#include <memory>
#include <random>
Expand Down Expand Up @@ -61,6 +62,23 @@ create_model_activity_driven( Network<AgentT> & network, const ModelVariantT & m
}
}

template<typename AgentT>
inline auto create_model_activity_driven_inertial(
Network<AgentT> & network, const ModelVariantT & model_settings, std::mt19937 & gen )
{
if constexpr( std::is_same_v<AgentT, InertialModel::AgentT> )
{
auto settings = std::get<Config::ActivityDrivenInertialSettings>( model_settings );
auto model = std::make_unique<InertialModel>( settings, network, gen );
return model;
}
else
{
throw std::runtime_error( "Incompatible agent and model type!" );
return std::unique_ptr<Model<AgentT>>{};
}
}

template<typename AgentT>
inline auto create_model_deffuant( Network<AgentT> & network, const ModelVariantT & model_settings, std::mt19937 & gen )
{
Expand Down
Loading