diff --git a/examples/ActivityDriven/conf.toml b/examples/ActivityDriven/conf.toml index 5660bc4..06ad8d3 100644 --- a/examples/ActivityDriven/conf.toml +++ b/examples/ActivityDriven/conf.toml @@ -4,6 +4,7 @@ model = "ActivityDriven" [io] n_output_network = 20 # Write the network every 20 iterations +print_progress = true # Print the iteration time ; if not set, then always print [model] max_iterations = 500 # If not set, max iterations is infinite diff --git a/examples/ActivityDrivenMeanField/conf.toml b/examples/ActivityDrivenMeanField/conf.toml index 81c82ca..25f9dd3 100644 --- a/examples/ActivityDrivenMeanField/conf.toml +++ b/examples/ActivityDrivenMeanField/conf.toml @@ -1,12 +1,13 @@ [simulation] model = "ActivityDriven" -# rng_seed = 120 # Leaving this empty will pick a random seed +rng_seed = 12345678 # Leaving this empty will pick a random seed [io] n_output_network = 20 # Write the network every 20 iterations +print_progress = true # Print the iteration time ; if not set, then always print [model] -max_iterations = 500 # If not set, max iterations is infinite +max_iterations = 2000 # If not set, max iterations is infinite [ActivityDriven] dt = 0.01 # Timestep for the integration of the coupled ODEs @@ -14,10 +15,10 @@ m = 10 # Number of agents contacted, when the agent is active eps = 0.01 # Minimum activity epsilon; a_i belongs to [epsilon,1] gamma = 2.1 # Exponent of activity power law distribution of activities reciprocity = 0.5 # probability that when agent i contacts j via weighted reservoir sampling, j also sends feedback to i. So every agent can have more than m incoming connections -homophily = 0.5 # aka beta. if zero, agents pick their interaction partners at random +homophily = 0.0 # aka beta. if zero, agents pick their interaction partners at random alpha = 3.0 # Controversialness of the issue, must be greater than 0. K = 3.0 # Social interaction strength -mean_activities = true # Use the mean value of the powerlaw distribution for the activities of all agents +mean_activities = false # Use the mean value of the powerlaw distribution for the activities of all agents mean_weights = true # Use the meanfield approximation of the network edges [network] diff --git a/examples/DeGroot/conf.toml b/examples/DeGroot/conf.toml index 4d256cb..7d58b66 100644 --- a/examples/DeGroot/conf.toml +++ b/examples/DeGroot/conf.toml @@ -2,6 +2,9 @@ model = "DeGroot" # rng_seed = 120 # Leaving this empty will pick a random seed +[io] +print_progress = false # Print the iteration time ; if not set, then always prints + [model] max_iterations = 20 # If not set, max iterations is infinite diff --git a/include/simulation.hpp b/include/simulation.hpp index 9198985..42ff77b 100644 --- a/include/simulation.hpp +++ b/include/simulation.hpp @@ -17,6 +17,7 @@ class Simulation // Write out the agents/network every n iterations, nullopt means never std::optional n_output_agents = 1; std::optional n_output_network = std::nullopt; + bool print_progress = true; // Print the iteration time, by default always prints }; private: diff --git a/src/main.cpp b/src/main.cpp index 79612ee..ad78599 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -53,8 +53,8 @@ int main( int argc, char * argv[] ) const std::optional n_output_agents = simulation.output_settings.n_output_agents; const std::optional n_output_network = simulation.output_settings.n_output_network; + typedef std::chrono::milliseconds ms; auto t_simulation_start = std::chrono::high_resolution_clock::now(); - auto print_progress = true; do { auto t_iter_start = std::chrono::high_resolution_clock::now(); @@ -62,12 +62,14 @@ int main( int argc, char * argv[] ) simulation.model->iteration(); auto t_iter_end = std::chrono::high_resolution_clock::now(); - auto iter_time = std::chrono::duration_cast( t_iter_end - t_iter_start ); + auto iter_time = std::chrono::duration_cast( t_iter_end - t_iter_start ); // Print the iteration time? - if( print_progress ) + if( simulation.output_settings.print_progress ) { - fmt::print( "Iteration {} iter_time = {:%Hh %Mm %Ss}\n", simulation.model->n_iterations, iter_time ); + fmt::print( + "Iteration {} iter_time = {:%Hh %Mm %Ss} \n", simulation.model->n_iterations, + std::chrono::floor( iter_time ) ); } // Write out the opinion? @@ -87,10 +89,11 @@ int main( int argc, char * argv[] ) } while( !simulation.model->finished() ); auto t_simulation_end = std::chrono::high_resolution_clock::now(); - auto total_time = std::chrono::duration_cast( t_simulation_end - t_simulation_start ); + auto total_time = std::chrono::duration_cast( t_simulation_end - t_simulation_start ); fmt::print( "-----------------------------------------------------------------\n" ); fmt::print( - "Finished after {} iterations, total time = {:%Hh %Mm %Ss}\n", simulation.model->n_iterations, total_time ); + "Finished after {} iterations, total time = {:%Hh %Mm %Ss}\n", simulation.model->n_iterations, + std::chrono::floor( total_time ) ); return 0; } \ No newline at end of file diff --git a/src/simulation.cpp b/src/simulation.cpp index e8c1324..f7c8648 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -46,6 +46,12 @@ Seldon::Simulation::Simulation( output_settings.n_output_agents = n_output_agents.value(); } + auto print_progress = tbl["io"]["print_progress"].value(); + if( print_progress.has_value() ) + { + output_settings.print_progress = print_progress.value(); + } + // Check if the 'model' keyword exists std::optional model_opt = tbl["simulation"]["model"].value(); if( !model_opt.has_value() )