Skip to content
This repository has been archived by the owner on Mar 20, 2023. It is now read-only.

Commit

Permalink
Merge pull request #884 from BlueBrain/jblanco/memory_report_init
Browse files Browse the repository at this point in the history
Improve memory usage in report initialization (#882)
  • Loading branch information
pramodk authored Nov 24, 2022
2 parents 8bea2f0 + c0fef44 commit 7a29e1b
Show file tree
Hide file tree
Showing 21 changed files with 248 additions and 185 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/coreneuron-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ jobs:
- {cmake_option: "-DCORENRN_ENABLE_MPI_DYNAMIC=ON", flag_warnings: ON}
- {cmake_option: "-DCORENRN_ENABLE_MPI_DYNAMIC=ON -DCORENRN_ENABLE_SHARED=OFF"}
- {cmake_option: "-DCORENRN_ENABLE_MPI=OFF"}
- {use_nmodl: ON, py_version: 3.6.7}
- {use_nmodl: ON, py_version: 3.7}
- {use_nmodl: ON}
- {use_ispc: ON, py_version: 3.6.7}
- {use_ispc: ON, py_version: 3.7}
include:
- os: ubuntu-20.04
config:
Expand Down
10 changes: 6 additions & 4 deletions .github/workflows/test-as-submodule.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ jobs:
fail-fast: false
env:
CMAKE_BUILD_PARALLEL_LEVEL: ${{matrix.cores}}
SDK_ROOT: $(xcrun --sdk macosx --show-sdk-path)

steps:

- name: Install homebrew packages
Expand All @@ -36,16 +38,16 @@ jobs:
brew install bison coreutils flex ninja openmpi
python3 -m pip install --upgrade numpy pytest pytest-cov
echo /usr/local/opt/flex/bin:/usr/local/opt/bison/bin >> $GITHUB_PATH
echo "CC=clang" >> $GITHUB_ENV
echo "CXX=clang++" >> $GITHUB_ENV
echo "CC=gcc" >> $GITHUB_ENV
echo "CXX=g++" >> $GITHUB_ENV
- name: Install apt packages
if: startsWith(matrix.os, 'ubuntu')
run: |
sudo apt-get update
sudo apt-get install bison cython3 flex libfl-dev libopenmpi-dev \
ninja-build openmpi-bin python3-dev python3-numpy python3-pytest \
python3-pytest-cov
ninja-build openmpi-bin python3-dev
python3 -m pip install --upgrade numpy pytest pytest-cov
echo "CC=gcc" >> $GITHUB_ENV
echo "CXX=g++" >> $GITHUB_ENV
Expand Down
2 changes: 1 addition & 1 deletion CMake/packages/Findnmodl.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
find_program(nmodl_BINARY NAMES nmodl${CMAKE_EXECUTABLE_SUFFIX}
HINTS "${CORENRN_NMODL_DIR}/bin" QUIET)

find_path(nmodl_INCLUDE "nmodl/fast_math.ispc" HINTS "${CORENRN_NMODL_DIR}/include")
find_path(nmodl_INCLUDE "nmodl/fast_math.hpp" HINTS "${CORENRN_NMODL_DIR}/include")
find_path(nmodl_PYTHONPATH "nmodl/__init__.py" HINTS "${CORENRN_NMODL_DIR}/lib")

# Checks 'REQUIRED', 'QUIET' and versions.
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -395,4 +395,4 @@ You can see current [contributors here](https://github.com/BlueBrain/CoreNeuron/

CoreNEURON is developed in a joint collaboration between the Blue Brain Project and Yale University. This work is supported by funding to the Blue Brain Project, a research center of the École polytechnique fédérale de Lausanne (EPFL), from the Swiss government’s ETH Board of the Swiss Federal Institutes of Technology, NIH grant number R01NS11613 (Yale University), the European Union Seventh Framework Program (FP7/20072013) under grant agreement n◦ 604102 (HBP) and the European Union’s Horizon 2020 Framework Programme for Research and Innovation under Specific Grant Agreement n◦ 720270 (Human Brain Project SGA1), n◦ 785907 (Human Brain Project SGA2) and n◦ 945539 (Human Brain Project SGA3).

Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
2 changes: 1 addition & 1 deletion coreneuron/apps/corenrn_parameters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ corenrn_parameters::corenrn_parameters() {
app.add_set(
"--verbose",
this->verbose,
{verbose_level::NONE, verbose_level::ERROR, verbose_level::INFO, verbose_level::DEBUG},
{verbose_level::NONE, verbose_level::ERROR, verbose_level::INFO, verbose_level::DEBUG_INFO},
"Verbose level: 0 = NONE, 1 = ERROR, 2 = INFO, 3 = DEBUG. Default is INFO");
app.add_flag("--model-stats",
this->model_stats,
Expand Down
8 changes: 7 additions & 1 deletion coreneuron/apps/corenrn_parameters.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,13 @@
namespace coreneuron {

struct corenrn_parameters_data {
enum verbose_level : std::uint32_t { NONE = 0, ERROR = 1, INFO = 2, DEBUG = 3, DEFAULT = INFO };
enum verbose_level : std::uint32_t {
NONE = 0,
ERROR = 1,
INFO = 2,
DEBUG_INFO = 3,
DEFAULT = INFO
};

static constexpr int report_buff_size_default = 4;

Expand Down
8 changes: 4 additions & 4 deletions coreneuron/apps/main1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,13 +428,13 @@ static void trajectory_return() {
}
}

std::unique_ptr<ReportHandler> create_report_handler(ReportConfiguration& config,
std::unique_ptr<ReportHandler> create_report_handler(const ReportConfiguration& config,
const SpikesInfo& spikes_info) {
std::unique_ptr<ReportHandler> report_handler;
if (config.format == "Bin") {
report_handler = std::make_unique<BinaryReportHandler>(config);
report_handler = std::make_unique<BinaryReportHandler>();
} else if (config.format == "SONATA") {
report_handler = std::make_unique<SonataReportHandler>(config, spikes_info);
report_handler = std::make_unique<SonataReportHandler>(spikes_info);
} else {
if (nrnmpi_myid == 0) {
printf(" WARNING : Report name '%s' has unknown format: '%s'.\n",
Expand Down Expand Up @@ -595,7 +595,7 @@ extern "C" int run_solve_core(int argc, char** argv) {
std::unique_ptr<ReportHandler> report_handler = create_report_handler(configs[i],
spikes_info);
if (report_handler) {
report_handler->create_report(dt, tstop, delay);
report_handler->create_report(configs[i], dt, tstop, delay);
report_handlers.push_back(std::move(report_handler));
}
if (configs[i].report_dt < min_report_dt) {
Expand Down
24 changes: 19 additions & 5 deletions coreneuron/io/mech_report.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <vector>

#include "coreneuron/coreneuron.hpp"
#include "coreneuron/io/nrn_setup.hpp"
#include "coreneuron/mpi/nrnmpi.h"
#include "coreneuron/apps/corenrn_parameters.hpp"

Expand All @@ -19,6 +20,7 @@ void write_mech_report() {
/// mechanim count across all gids, local to rank
const auto n_memb_func = corenrn.get_memb_funcs().size();
std::vector<long> local_mech_count(n_memb_func, 0);
std::vector<long> local_mech_size(n_memb_func, 0);

/// each gid record goes on separate row, only check non-empty threads
for (int i = 0; i < nrn_nthread; i++) {
Expand All @@ -27,10 +29,12 @@ void write_mech_report() {
const int type = tml->index;
const auto& ml = tml->ml;
local_mech_count[type] += ml->nodecount;
local_mech_size[type] = memb_list_size(tml, true);
}
}

std::vector<long> total_mech_count(n_memb_func);
std::vector<long> total_mech_size(n_memb_func);

#if NRNMPI
if (corenrn_param.mpi_enable) {
Expand All @@ -39,21 +43,31 @@ void write_mech_report() {
&total_mech_count[0],
local_mech_count.size(),
1);

nrnmpi_long_allreduce_vec(&local_mech_size[0],
&total_mech_size[0],
local_mech_size.size(),
1);
} else
#endif
{
total_mech_count = local_mech_count;
total_mech_size = local_mech_size;
}

/// print global stats to stdout
if (nrnmpi_myid == 0) {
printf("\n================ MECHANISMS COUNT BY TYPE ==================\n");
printf("%4s %20s %10s\n", "Id", "Name", "Count");
printf("\n============== MECHANISMS COUNT AND SIZE BY TYPE =============\n");
printf("%4s %20s %10s %25s\n", "Id", "Name", "Count", "Total memory size (KiB)");
for (size_t i = 0; i < total_mech_count.size(); i++) {
printf("%4lu %20s %10ld\n", i, nrn_get_mechname(i), total_mech_count[i]);
if (total_mech_count[i] > 0) {
printf("%4lu %20s %10ld %25.2lf\n",
i,
nrn_get_mechname(i),
total_mech_count[i],
static_cast<double>(total_mech_size[i]) / 1024);
}
}
printf("=============================================================\n");
printf("==============================================================\n");
}
}

Expand Down
36 changes: 32 additions & 4 deletions coreneuron/io/nrn_setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -966,9 +966,37 @@ void read_phase3(NrnThread& nt, UserParams& userParams) {
nt.summation_report_handler_ = std::make_unique<SummationReportMapping>();
}

static size_t memb_list_size(NrnThreadMembList* tml) {
/* Returns the size of the dynamically allocated memory for NrnThreadMembList
* Includes:
* - Size of NrnThreadMembList
* - Size of Memb_list
* - Size of nodeindices
* - Size of _permute
* - Size of _thread
* - Size of NetReceive and NetSend Buffers
* - Size of int variables
* - Size of double variables (If include_data is enabled. Those variables are already counted
* since they point to nt->_data.)
*/
size_t memb_list_size(NrnThreadMembList* tml, bool include_data) {
size_t nbyte = sizeof(NrnThreadMembList) + sizeof(Memb_list);
nbyte += tml->ml->nodecount * sizeof(int);
if (tml->ml->_permute) {
nbyte += tml->ml->nodecount * sizeof(int);
}
if (tml->ml->_thread) {
Memb_func& mf = corenrn.get_memb_func(tml->index);
nbyte += mf.thread_size_ * sizeof(ThreadDatum);
}
if (tml->ml->_net_receive_buffer) {
nbyte += sizeof(NetReceiveBuffer_t) + tml->ml->_net_receive_buffer->size_of_object();
}
if (tml->ml->_net_send_buffer) {
nbyte += sizeof(NetSendBuffer_t) + tml->ml->_net_send_buffer->size_of_object();
}
if (include_data) {
nbyte += corenrn.get_prop_param_size()[tml->index] * tml->ml->nodecount * sizeof(double);
}
nbyte += corenrn.get_prop_dparam_size()[tml->index] * tml->ml->nodecount * sizeof(Datum);
#ifdef DEBUG
int i = tml->index;
Expand All @@ -991,7 +1019,7 @@ size_t output_presyn_size(void) {
size_t nbyte = sizeof(gid2out) + sizeof(int) * gid2out.size() +
sizeof(PreSyn*) * gid2out.size();
#ifdef DEBUG
printf(" gid2out table bytes=~%ld size=%d\n", nbyte, gid2out.size());
printf(" gid2out table bytes=~%ld size=%ld\n", nbyte, gid2out.size());
#endif
return nbyte;
}
Expand All @@ -1003,7 +1031,7 @@ size_t input_presyn_size(void) {
size_t nbyte = sizeof(gid2in) + sizeof(int) * gid2in.size() +
sizeof(InputPreSyn*) * gid2in.size();
#ifdef DEBUG
printf(" gid2in table bytes=~%ld size=%d\n", nbyte, gid2in.size());
printf(" gid2in table bytes=~%ld size=%ld\n", nbyte, gid2in.size());
#endif
return nbyte;
}
Expand Down Expand Up @@ -1031,7 +1059,7 @@ size_t model_size(bool detailed_report) {
// Memb_list size
int nmech = 0;
for (auto tml = nt.tml; tml; tml = tml->next) {
nb_nt += memb_list_size(tml);
nb_nt += memb_list_size(tml, false);
++nmech;
}

Expand Down
2 changes: 2 additions & 0 deletions coreneuron/io/nrn_setup.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ extern void nrn_setup_cleanup();

extern int nrn_i_layout(int i, int cnt, int j, int size, int layout);

size_t memb_list_size(NrnThreadMembList* tml, bool include_data);

size_t model_size(bool detailed_report);

namespace coreneuron {
Expand Down
13 changes: 8 additions & 5 deletions coreneuron/io/reports/binary_report_handler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,14 @@

namespace coreneuron {

void BinaryReportHandler::create_report(double dt, double tstop, double delay) {
void BinaryReportHandler::create_report(ReportConfiguration& config,
double dt,
double tstop,
double delay) {
#ifdef ENABLE_BIN_REPORTS
records_set_atomic_step(dt);
#endif // ENABLE_BIN_REPORTS
ReportHandler::create_report(dt, tstop, delay);
ReportHandler::create_report(config, dt, tstop, delay);
}

#ifdef ENABLE_BIN_REPORTS
Expand All @@ -44,22 +47,22 @@ static void create_custom_extra(const CellMapping& mapping, std::array<int, 5>&
}

void BinaryReportHandler::register_section_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report,
bool is_soma_target) {
create_extra_func create_extra = is_soma_target ? create_soma_extra : create_compartment_extra;
register_report(nt, config, vars_to_report, create_extra);
}

void BinaryReportHandler::register_custom_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report) {
create_extra_func create_extra = create_custom_extra;
register_report(nt, config, vars_to_report, create_extra);
}

void BinaryReportHandler::register_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report,
create_extra_func& create_extra) {
int sizemapping = 1;
Expand Down
11 changes: 4 additions & 7 deletions coreneuron/io/reports/binary_report_handler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,20 @@ namespace coreneuron {

class BinaryReportHandler: public ReportHandler {
public:
BinaryReportHandler(ReportConfiguration& config)
: ReportHandler(config) {}

void create_report(double dt, double tstop, double delay) override;
void create_report(ReportConfiguration& config, double dt, double tstop, double delay) override;
#ifdef ENABLE_BIN_REPORTS
void register_section_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report,
bool is_soma_target) override;
void register_custom_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report) override;

private:
using create_extra_func = std::function<void(const CellMapping&, std::array<int, 5>&)>;
void register_report(const NrnThread& nt,
ReportConfiguration& config,
const ReportConfiguration& config,
const VarsToReport& vars_to_report,
create_extra_func& create_extra);
#endif // ENABLE_BIN_REPORTS
Expand Down
2 changes: 1 addition & 1 deletion coreneuron/io/reports/nrnreport.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ struct ReportConfiguration {
double stop; // stop time of report
int num_gids; // total number of gids
int buffer_size; // hint on buffer size used for this report
std::set<int> target; // list of gids for this report
std::vector<int> target; // list of gids for this report
};

void setup_report_engine(double dt_report, double mindelay);
Expand Down
11 changes: 4 additions & 7 deletions coreneuron/io/reports/report_configuration_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,14 @@ void register_target_type(ReportConfiguration& report, ReportType report_type) {
std::vector<ReportConfiguration> create_report_configurations(const std::string& conf_file,
const std::string& output_dir,
SpikesInfo& spikes_info) {
std::vector<ReportConfiguration> reports;
std::string report_on;
int target;
std::ifstream report_conf(conf_file);

int num_reports = 0;
report_conf >> num_reports;
for (int i = 0; i < num_reports; i++) {
ReportConfiguration report;
std::vector<ReportConfiguration> reports(num_reports);
for (auto& report: reports) {
report.buffer_size = 4; // default size to 4 Mb

report_conf >> report.name >> report.target_name >> report.type_str >> report_on >>
Expand Down Expand Up @@ -147,15 +146,13 @@ std::vector<ReportConfiguration> create_report_configurations(const std::string&
parse_filter_string(report_on, report);
}
if (report.num_gids) {
std::vector<int> new_gids(report.num_gids);
report.target.resize(report.num_gids);
report_conf.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
report_conf.read(reinterpret_cast<char*>(new_gids.data()),
report_conf.read(reinterpret_cast<char*>(report.target.data()),
report.num_gids * sizeof(int));
report.target = std::set<int>(new_gids.begin(), new_gids.end());
// extra new line: skip
report_conf.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
reports.push_back(report);
}
// read population information for spike report
int num_populations;
Expand Down
4 changes: 2 additions & 2 deletions coreneuron/io/reports/report_event.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ namespace coreneuron {

#if defined(ENABLE_BIN_REPORTS) || defined(ENABLE_SONATA_REPORTS)
struct VarWithMapping {
int id;
uint32_t id;
double* var_value;
VarWithMapping(int id_, double* v_)
: id(id_)
, var_value(v_) {}
};

// mapping the set of variables pointers to report to its gid
using VarsToReport = std::unordered_map<int, std::vector<VarWithMapping>>;
using VarsToReport = std::unordered_map<uint64_t, std::vector<VarWithMapping>>;

class ReportEvent: public DiscreteEvent {
public:
Expand Down
Loading

0 comments on commit 7a29e1b

Please sign in to comment.