Skip to content

Commit

Permalink
Merge pull request #28 from seldon-code/develop
Browse files Browse the repository at this point in the history
Network refactor and reluctant Activity driven model
  • Loading branch information
MSallermann authored Mar 18, 2024
2 parents d79eb37 + 78a5d1b commit 0fd353b
Show file tree
Hide file tree
Showing 37 changed files with 935 additions and 822 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ jobs:
- name: Test with meson
shell: micromamba-shell {0}
run: |
meson test -C build
meson test -C build --verbose
19 changes: 19 additions & 0 deletions .gitmessage
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Title: Summary, imperative, start upper case, don't end with a period
# No more than 50 chars. #### 50 chars is here: #

# Remember blank line between title and body.

# Body: Explain *what* and *why* (not *how*).
# Wrap at 72 chars. ################################## which is here: #

# At the end: Include Co-authored-by for all contributors.
# Include at least one empty line before it. Format:
# Co-authored-by: name <[email protected]>

# 1. Separate subject from body with a blank line
# 2. Limit the subject line to 50 characters
# 3. Capitalize the subject line
# 4. Do not end the subject line with a period
# 5. Use the imperative mood in the subject line
# 6. Wrap the body at 72 characters
# 7. Use the body to explain what and why vs. how
9 changes: 9 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,12 @@ A good commit should have:
<!-- * Follow our [style guide][style]. -->

<!-- [style]: https://github.com/thoughtbot/guides/tree/master/style -->

### Commit template
If you are not already using a commit template, consider doing so. The Seldon repository includes a template under `.gitmessage`.

```
git config --global commit.template .gitmessage
```

You can omit the `--global` to only use the template when committing to Seldon.
2 changes: 2 additions & 0 deletions examples/ActivityDriven/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ model = "ActivityDriven"
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then always print
print_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 2 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 500 # If not set, max iterations is infinite
Expand Down
2 changes: 2 additions & 0 deletions examples/ActivityDrivenBot/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ rng_seed = 120 # Leaving this empty will pick a random seed
n_output_network = 1 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time; if not set, then always print
print_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 1000 # If not set, max iterations is infinite
Expand Down
2 changes: 2 additions & 0 deletions examples/ActivityDrivenMeanField/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ rng_seed = 12345678 # Leaving this empty will pick a random seed
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then always print
print_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 2000 # If not set, max iterations is infinite
Expand Down
32 changes: 32 additions & 0 deletions examples/ActivityDrivenReluctance/conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
[simulation]
model = "ActivityDriven"
# rng_seed = 120 # Leaving this empty will pick a random seed

[io]
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = true # Print the iteration time ; if not set, then always print

[model]
max_iterations = 500 # If not set, max iterations is infinite

[ActivityDriven]
dt = 0.01 # Timestep for the integration of the coupled ODEs
m = 10 # Number of agents contacted, when the agent is active
eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1]
gamma = 2.1 # Exponent of activity power law distribution of activities
reciprocity = 0.65 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections
homophily = 1.0 # aka beta. if zero, agents pick their interaction partners at random
alpha = 3.0 # Controversialness of the issue, must be greater than 0.
K = 2.0
mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents
mean_weights = false # Use the meanfield approximation of the network edges

reluctances = true # Assigns a "reluctance" (m_i) to each agent. By default; false and every agent has a reluctance of 1
reluctance_mean = 1.0 # Mean of distribution before drawing from a truncated normal distribution (default set to 1.0)
reluctance_sigma = 0.25 # Width of normal distribution (before truncating)
reluctance_eps = 0.01 # Minimum such that the normal distribution is truncated at this value

[network]
number_of_agents = 1000
connections_per_agent = 10
2 changes: 2 additions & 0 deletions examples/DeGroot/conf.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ model = "DeGroot"
n_output_network = 20 # Write the network every 20 iterations
n_output_agents = 1 # Write the opinions of agents after every iteration
print_progress = false # Print the iteration time ; if not set, then always prints
print_initial = true # Print the initial opinions and network file from step 0. If not set, this is true by default.
start_output = 1 # Start writing out opinions and/or network files from this iteration. If not set, this is 1.

[model]
max_iterations = 20 # If not set, max iterations is infinite
Expand Down
6 changes: 3 additions & 3 deletions include/agent.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#pragma once
#include "agent_base.hpp"
#include <fmt/format.h>
namespace Seldon
{
Expand All @@ -8,17 +7,18 @@ namespace Seldon
(which contains an opinion and perhaps some other things),
it needs to implement to_string and from_string*/
template<typename T>
class Agent : public AgentBase
class Agent
{
public:
using data_t = T;
data_t data;
Agent() = default;
Agent( data_t data ) : data( data ) {}
virtual ~Agent() = default;

void from_string( const std::string & str );

std::string to_string() const override
virtual std::string to_string() const
{
return fmt::format( "{:.16f}", data );
}
Expand Down
17 changes: 0 additions & 17 deletions include/agent_base.hpp

This file was deleted.

2 changes: 1 addition & 1 deletion include/agent_generation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ std::vector<AgentT> generate_from_file( const std::string & file )
// Get the current line as a substring
auto line = file_contents.substr( start_of_line, end_of_line - start_of_line );
start_of_line = end_of_line + 1;
// TODO: check if empty or comment

if( line.empty() )
{
break;
Expand Down
7 changes: 7 additions & 0 deletions include/config_parser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ struct OutputSettings
std::optional<size_t> n_output_agents = std::nullopt;
std::optional<size_t> n_output_network = std::nullopt;
bool print_progress = true; // Print the iteration time, by default always prints
bool print_initial = true; // Output initial opinions and network, by default always outputs.
int start_output = 1; // Start printing opinion and/or network files from this iteration number
};

struct DeGrootSettings
Expand All @@ -58,6 +60,11 @@ struct ActivityDrivenSettings
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );
bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;
};

struct InitialNetworkSettings
Expand Down
35 changes: 21 additions & 14 deletions include/model.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#pragma once
#include "model_base.hpp"
#include <cstddef>
#include <optional>

namespace Seldon
Expand All @@ -8,37 +8,44 @@ namespace Seldon
/* Model<T> is a base class from which the acutal models would derive. They have efficient access to a vector of AgentT,
* without any pointer indirections */
template<typename AgentT_>
class Model : public ModelBase
class Model
{
public:
using AgentT = AgentT_;
std::vector<AgentT> agents;

std::optional<int> max_iterations = std::nullopt;
Model( size_t n_agents ) : agents( std::vector<AgentT>( int( n_agents ), AgentT() ) ) {}
Model( std::vector<AgentT> && agents ) : agents( agents ) {}
std::optional<size_t> max_iterations = std::nullopt;

void iteration() override
virtual void initialize_iterations()
{
n_iterations++;
_n_iterations = 0;
}

virtual void iteration()
{
_n_iterations++;
};

bool finished() override
size_t n_iterations()
{
return _n_iterations;
}

virtual bool finished()
{
if( max_iterations.has_value() )
{
return max_iterations.value() <= n_iterations;
return max_iterations.value() <= n_iterations();
}
else
{
return false;
}
};

AgentBase * get_agent( int idx ) override // For this to work AgentT needs to be a subclass of AgentBase
{
return &agents[idx];
}
virtual ~Model() = default;

private:
size_t _n_iterations{};
};

} // namespace Seldon
29 changes: 0 additions & 29 deletions include/model_base.hpp

This file was deleted.

53 changes: 37 additions & 16 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <cstddef>
#include <random>
#include <set>
#include <stdexcept>
#include <string>
#include <utility>
#include <vector>

Expand All @@ -15,32 +15,49 @@ namespace Seldon

struct ActivityAgentData
{
double opinion = 0; // x_i
double activity = 0; // a_i
double opinion = 0; // x_i
double activity = 0; // a_i
double reluctance = 1.0; // m_i
};

template<>
inline std::string Agent<ActivityAgentData>::to_string() const
{
return fmt::format( "{}, {}", data.opinion, data.activity );
return fmt::format( "{}, {}, {}", data.opinion, data.activity, data.reluctance );
};

template<>
inline void Agent<ActivityAgentData>::from_string( const std::string & str )
{
auto pos_comma = str.find_first_of( ',' );
data.opinion = std::stod( str.substr( 0, pos_comma ) );
data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) );
auto pos_comma = str.find_first_of( ',' );
auto pos_next_comma = str.find( ',', pos_comma + 1 );

data.opinion = std::stod( str.substr( 0, pos_comma ) );

if( pos_next_comma == std::string::npos )
{
data.activity = std::stod( str.substr( pos_comma + 1, str.size() ) );
}
else
{
data.activity = std::stod( str.substr( pos_comma + 1, pos_next_comma ) );
}

if( pos_next_comma != std::string::npos )
{
data.reluctance = std::stod( str.substr( pos_next_comma + 1, str.size() ) );
}
};

class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
class ActivityDrivenModel : public Model<Agent<ActivityAgentData>>
{
public:
using AgentT = Agent<ActivityAgentData>;
using AgentT = Agent<ActivityAgentData>;
using NetworkT = Network<AgentT>;

private:
Network & network;
std::vector<std::vector<Network::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
NetworkT & network;
std::vector<std::vector<NetworkT::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};
Expand Down Expand Up @@ -68,7 +85,8 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
for( size_t j = 0; j < neighbour_buffer.size(); j++ )
{
j_index = neighbour_buffer[j];
k_buffer[idx_agent] += K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) );
k_buffer[idx_agent] += 1.0 / network.agents[idx_agent].data.reluctance * K * weight_buffer[j]
* std::tanh( alpha * opinion( j_index ) );
}
// Multiply by the timestep
k_buffer[idx_agent] *= dt;
Expand Down Expand Up @@ -100,6 +118,12 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean = 1.0;
double reluctance_sigma = 0.25;
double reluctance_eps = 0.01;
double covariance_factor = 0.0;

// bot @TODO: less hacky

size_t n_bots = 0; // The first n_bots agents are bots
Expand All @@ -113,14 +137,11 @@ class ActivityAgentModel : public Model<Agent<ActivityAgentData>>
return n_bots > 0;
}

ActivityAgentModel( int n_agents, Network & network, std::mt19937 & gen );
ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set

void iteration() override;

// bool finished() overteration() override;
// bool finished() override;
};

} // namespace Seldon
Loading

0 comments on commit 0fd353b

Please sign in to comment.