From 597c800dbe14ba8cdda14f16db3f705451758608 Mon Sep 17 00:00:00 2001 From: Charles Gautier <33026375+chgautier@users.noreply.github.com> Date: Tue, 27 Sep 2022 10:15:29 +0200 Subject: [PATCH] chore(release): merge release 21.10.next into 21.10.x (#386) * enh(broker): cbd with multiargs and robot tests (#306) -enh(broker): cbd with multiargs and robot tests -Validate with tests/broker/command-line.robot -Update README.md -Common.py : fix find in log with timeout REFS: MON-13901 * fix(broker/bam): downtimes on kpi can be more than one * enh(tests/bam): new tests to reproduce a bug * chore(doc): CHANGELOG updated and version set to 21.10.3 * chore(tests): README updated REFS: MON-14091 * fix(broker/engine): grpc api can be changed through configuration. Otherwise it is fixed to localhost (#320) REFS: MON-13904 * fix(broker/rrd): rebuild fixed in 21.10 (#340) * Also an issue due to conan fixed, python 3.8 is needed. REFS: MON-14092 * fix(ci): issues with conan fixed * fix(ci/scripts): conan bad path * fix(broker/bam): overlapping downtimes on kpi service are well handled now. (#341) REFS: MON-14091 * fix(ci/tests): missing dependencies for python38 * fix(broker/lua): lua stream connector accepts empty parameters(21.10) (#359) REFS: MON-13875 * fix(broker/muxer): poller waits at most 5s to send goodbye to broker before shutdown (#360) REFS: MON-14511 Co-authored-by: denliA <91119589+denliA@users.noreply.github.com> Co-authored-by: David Boucher Co-authored-by: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> --- CHANGELOG.md | 32 + CMakeLists.txt | 4 +- centreon-broker/bam/src/ba.cc | 12 +- centreon-broker/bam/src/kpi_service.cc | 16 +- centreon-broker/bam/test/ba/kpi_service.cc | 1 + .../inc/com/centreon/broker/config/state.hh | 3 + .../core/inc/com/centreon/broker/io/stream.hh | 2 + .../centreon/broker/processing/acceptor.hh | 1 + .../centreon/broker/processing/endpoint.hh | 2 + .../centreon/broker/processing/failover.hh | 1 + .../com/centreon/broker/processing/feeder.hh | 2 + .../broker/processing/stat_visitable.hh | 2 + .../core/src/config/applier/endpoint.cc | 5 + centreon-broker/core/src/config/parser.cc | 25 +- centreon-broker/core/src/config/state.cc | 25 +- centreon-broker/core/src/io/stream.cc | 15 + centreon-broker/core/src/main.cc | 77 +-- .../core/src/processing/acceptor.cc | 16 + .../core/src/processing/failover.cc | 10 +- centreon-broker/core/src/processing/feeder.cc | 10 +- centreon-broker/core/test/config/parser.cc | 109 ++++ centreon-broker/lua/src/factory.cc | 20 +- .../inc/com/centreon/broker/rrd/backend.hh | 3 +- .../rrd/inc/com/centreon/broker/rrd/cached.hh | 7 +- .../inc/com/centreon/broker/rrd/creator.hh | 3 +- .../rrd/inc/com/centreon/broker/rrd/lib.hh | 17 +- .../rrd/inc/com/centreon/broker/rrd/output.hh | 6 +- centreon-broker/rrd/src/connector.cc | 23 +- centreon-broker/rrd/src/creator.cc | 99 +-- centreon-broker/rrd/src/lib.cc | 13 +- centreon-broker/rrd/src/output.cc | 41 +- .../com/centreon/broker/storage/rebuilder.hh | 3 +- .../inc/com/centreon/broker/storage/stream.hh | 1 + centreon-broker/storage/src/rebuilder.cc | 13 + centreon-broker/storage/src/stream.cc | 8 + .../tcp/inc/com/centreon/broker/tcp/stream.hh | 1 + .../com/centreon/broker/tcp/tcp_connection.hh | 3 + centreon-broker/tcp/src/stream.cc | 15 + centreon-broker/tcp/src/tcp_connection.cc | 16 + .../centreon/engine/configuration/state.hh | 3 + centreon-engine/src/configuration/state.cc | 46 +- centreon-engine/src/main.cc | 6 +- .../configuration/applier/applier-global.cc | 36 ++ .../Dockerfile.collect-centos7-dependencies | 3 +- ci/scripts/collect-sonar-scanner-common.sh | 4 +- ci/scripts/collect-sources-analysis.sh | 1 + ci/scripts/collect-test-robot.sh | 2 +- ci/scripts/collect-unit-tests.sh | 9 +- tests/README.md | 225 ++----- tests/bam/inherited_downtime.robot | 82 +++ tests/broker/command-line.robot | 71 +++ tests/init-sql.sh | 16 +- tests/resources/Broker.py | 557 +++++++++-------- tests/resources/Common.py | 333 +++++----- tests/resources/Engine.py | 570 ++++++++++++++---- tests/resources/db_conf.py | 55 +- tests/resources/db_variables.robot | 9 + tests/resources/resources.robot | 18 +- 58 files changed, 1822 insertions(+), 886 deletions(-) create mode 100644 tests/broker/command-line.robot create mode 100644 tests/resources/db_variables.robot diff --git a/CHANGELOG.md b/CHANGELOG.md index 98b7ab9e1cc..1449fb358bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## 21.10.3 + +### centreon-broker + +#### Enhancements + +*grpc* + +The grpc api listens by default on localhost now. And it can be configured +with the Broker configuration file. + +#### Fixes + +*rrd* + +Rebuild of graphs should work better. + +*bam* + +If a service with two overlapping downtimes is a BA kpi. When the first downtime +is cancelled from the service, it is as if all the downtimes are removed from +the kpi. This new version fixes this issue. + +### centreon-engine + +#### Enhancements + +*grpc* + +The grpc api listens by default on localhost now. And it can be configured +with the Engine configuration file. + ## 21.10.2 ### centreon-broker diff --git a/CMakeLists.txt b/CMakeLists.txt index 3a66d6c180e..676e9dc122b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ ## -## Copyright 2009-2021 Centreon +## Copyright 2009-2022 Centreon ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ endif () # Version. set(COLLECT_MAJOR 21) set(COLLECT_MINOR 10) -set(COLLECT_PATCH 2) +set(COLLECT_PATCH 3) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") # add_definitions(-DCENTREON_BROKER_VERSION=\"${COLLECT_VERSION}\") diff --git a/centreon-broker/bam/src/ba.cc b/centreon-broker/bam/src/ba.cc index 0b480eeac1e..b5e6270a861 100644 --- a/centreon-broker/bam/src/ba.cc +++ b/centreon-broker/bam/src/ba.cc @@ -1,5 +1,5 @@ /* -** Copyright 2014-2016, 2021 Centreon +** Copyright 2014-2016, 2021-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -72,10 +72,12 @@ ba::ba(uint32_t id, _host_id(host_id), _service_id(service_id), _generate_virtual_status(generate_virtual_status), - _computed_soft_state(source == configuration::ba::state_source_best ? - ba::state::state_critical : ba::state::state_ok), - _computed_hard_state(source == configuration::ba::state_source_best ? - ba::state::state_critical : ba::state::state_ok), + _computed_soft_state(source == configuration::ba::state_source_best + ? ba::state::state_critical + : ba::state::state_ok), + _computed_hard_state(source == configuration::ba::state_source_best + ? ba::state::state_critical + : ba::state::state_ok), _num_soft_critical_childs{0.f}, _num_hard_critical_childs{0.f}, _acknowledgement_hard(0.0), diff --git a/centreon-broker/bam/src/kpi_service.cc b/centreon-broker/bam/src/kpi_service.cc index 1f2ab93dbea..f3aa2238b8c 100644 --- a/centreon-broker/bam/src/kpi_service.cc +++ b/centreon-broker/bam/src/kpi_service.cc @@ -1,5 +1,5 @@ /* -** Copyright 2014-2015, 2021 Centreon +** Copyright 2014-2015, 2021-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -182,7 +182,7 @@ bool kpi_service::is_acknowledged() const { * @param[out] visitor Object that will receive events. */ void kpi_service::service_update( - std::shared_ptr const& status, + const std::shared_ptr& status, io::stream* visitor) { if (status && status->host_id == _host_id && status->service_id == _service_id) { @@ -252,18 +252,21 @@ void kpi_service::service_update( * @param[in] dt * @param[out] visitor Object that will receive events. */ -void kpi_service::service_update(std::shared_ptr const& dt, +void kpi_service::service_update(const std::shared_ptr& dt, io::stream* visitor) { assert(dt && dt->host_id == _host_id && dt->service_id == _service_id); // Update information. - _downtimed = dt->was_started && dt->actual_end_time.is_null(); - if (_downtime_ids.contains(dt->internal_id) && !dt->was_cancelled) { + bool downtimed = dt->was_started && dt->actual_end_time.is_null(); + if (!_downtimed && downtimed) + _downtimed = true; + + if (_downtime_ids.contains(dt->internal_id) && dt->deletion_time.is_null()) { log_v2::bam()->trace("Downtime {} already handled in this kpi service", dt->internal_id); return; } - if (_downtimed) { + if (downtimed) { log_v2::bam()->trace("adding in kpi service the impacting downtime {}", dt->internal_id); _downtime_ids.insert(dt->internal_id); @@ -271,6 +274,7 @@ void kpi_service::service_update(std::shared_ptr const& dt, log_v2::bam()->trace("removing from kpi service the impacting downtime {}", dt->internal_id); _downtime_ids.erase(dt->internal_id); + _downtimed = !_downtime_ids.empty(); } if (!_event || _event->in_downtime != _downtimed) { diff --git a/centreon-broker/bam/test/ba/kpi_service.cc b/centreon-broker/bam/test/ba/kpi_service.cc index 7b484cbd576..02d472056ad 100644 --- a/centreon-broker/bam/test/ba/kpi_service.cc +++ b/centreon-broker/bam/test/ba/kpi_service.cc @@ -971,6 +971,7 @@ TEST_F(BamBA, KpiServiceDt) { std::cout << "service_update 1" << std::endl; kpis[0]->service_update(dt, _visitor.get()); + dt->deletion_time = now + 2 + 10 * i + 5; dt->actual_end_time = now + 2 + 10 * i + 5; dt->was_cancelled = true; std::cout << "service_update 2" << std::endl; diff --git a/centreon-broker/core/inc/com/centreon/broker/config/state.hh b/centreon-broker/core/inc/com/centreon/broker/config/state.hh index bde6fabe72a..2913186bbc0 100644 --- a/centreon-broker/core/inc/com/centreon/broker/config/state.hh +++ b/centreon-broker/core/inc/com/centreon/broker/config/state.hh @@ -42,6 +42,7 @@ namespace config { class state { int _broker_id; uint16_t _rpc_port; + std::string _listen_address; std::string _broker_name; std::string _cache_directory; std::string _command_file; @@ -75,6 +76,8 @@ class state { int broker_id() const noexcept; void rpc_port(uint16_t port) noexcept; uint16_t rpc_port(void) const noexcept; + void listen_address(const std::string& listen_address) noexcept; + const std::string& listen_address() const noexcept; void broker_name(std::string const& name); const std::string& broker_name() const noexcept; void cache_directory(std::string const& dir); diff --git a/centreon-broker/core/inc/com/centreon/broker/io/stream.hh b/centreon-broker/core/inc/com/centreon/broker/io/stream.hh index e95698e1afb..f6912904f67 100644 --- a/centreon-broker/core/inc/com/centreon/broker/io/stream.hh +++ b/centreon-broker/core/inc/com/centreon/broker/io/stream.hh @@ -77,6 +77,8 @@ class stream { bool validate(std::shared_ptr const& d, std::string const& error); virtual int write(std::shared_ptr const& d) = 0; const std::string& get_name() const { return _name; } + + virtual bool wait_for_all_events_written(unsigned ms_timeout); }; } // namespace io diff --git a/centreon-broker/core/inc/com/centreon/broker/processing/acceptor.hh b/centreon-broker/core/inc/com/centreon/broker/processing/acceptor.hh index 634a6f0596f..1e4991b60ce 100644 --- a/centreon-broker/core/inc/com/centreon/broker/processing/acceptor.hh +++ b/centreon-broker/core/inc/com/centreon/broker/processing/acceptor.hh @@ -88,6 +88,7 @@ class acceptor : public endpoint { void set_read_filters(std::unordered_set const& filters); void set_retry_interval(time_t retry_interval); void set_write_filters(std::unordered_set const& filters); + bool wait_for_all_events_written(unsigned ms_timeout) override; }; } // namespace processing diff --git a/centreon-broker/core/inc/com/centreon/broker/processing/endpoint.hh b/centreon-broker/core/inc/com/centreon/broker/processing/endpoint.hh index ec93df06065..5d1add72888 100644 --- a/centreon-broker/core/inc/com/centreon/broker/processing/endpoint.hh +++ b/centreon-broker/core/inc/com/centreon/broker/processing/endpoint.hh @@ -36,6 +36,8 @@ class endpoint : public stat_visitable { virtual void update() {} virtual void start() = 0; virtual void exit() = 0; + + virtual bool wait_for_all_events_written(unsigned) { return true; } }; } // namespace processing diff --git a/centreon-broker/core/inc/com/centreon/broker/processing/failover.hh b/centreon-broker/core/inc/com/centreon/broker/processing/failover.hh index 8c854e6ff72..efbe39d4ddf 100644 --- a/centreon-broker/core/inc/com/centreon/broker/processing/failover.hh +++ b/centreon-broker/core/inc/com/centreon/broker/processing/failover.hh @@ -79,6 +79,7 @@ class failover : public endpoint { void set_failover(std::shared_ptr fo); void set_retry_interval(time_t retry_interval); void update() override; + bool wait_for_all_events_written(unsigned ms_timeout) override; protected: // From stat_visitable diff --git a/centreon-broker/core/inc/com/centreon/broker/processing/feeder.hh b/centreon-broker/core/inc/com/centreon/broker/processing/feeder.hh index 39dad121275..430a51de590 100644 --- a/centreon-broker/core/inc/com/centreon/broker/processing/feeder.hh +++ b/centreon-broker/core/inc/com/centreon/broker/processing/feeder.hh @@ -79,6 +79,8 @@ class feeder : public stat_visitable { feeder& operator=(const feeder&) = delete; bool is_finished() const noexcept; const char* get_state() const; + + bool wait_for_all_events_written(unsigned ms_timeout); }; } // namespace processing diff --git a/centreon-broker/core/inc/com/centreon/broker/processing/stat_visitable.hh b/centreon-broker/core/inc/com/centreon/broker/processing/stat_visitable.hh index a9a6d728fde..cfbe665b7eb 100644 --- a/centreon-broker/core/inc/com/centreon/broker/processing/stat_visitable.hh +++ b/centreon-broker/core/inc/com/centreon/broker/processing/stat_visitable.hh @@ -54,6 +54,8 @@ class stat_visitable { virtual void _forward_statistic(nlohmann::json& tree); public: + static constexpr unsigned idle_microsec_wait_idle_thread_delay = 100000; + stat_visitable(std::string const& name = std::string()); virtual ~stat_visitable() noexcept = default; stat_visitable(stat_visitable const& other) = delete; diff --git a/centreon-broker/core/src/config/applier/endpoint.cc b/centreon-broker/core/src/config/applier/endpoint.cc index 540d055895a..9f449636e27 100644 --- a/centreon-broker/core/src/config/applier/endpoint.cc +++ b/centreon-broker/core/src/config/applier/endpoint.cc @@ -210,6 +210,9 @@ void endpoint::_discard() { _discarding = true; log_v2::config()->debug("endpoint applier: destruction"); + // wait for failover and feeder to push endloop event + ::usleep(processing::stat_visitable::idle_microsec_wait_idle_thread_delay + + 100000); // Exit threads. { log_v2::config()->debug("endpoint applier: requesting threads termination"); @@ -219,6 +222,7 @@ void endpoint::_discard() { // We begin with feeders for (auto it = _endpoints.begin(); it != _endpoints.end();) { if (it->second->is_feeder()) { + it->second->wait_for_all_events_written(5000); log_v2::config()->trace( "endpoint applier: send exit signal to endpoint '{}'", it->second->get_name()); @@ -244,6 +248,7 @@ void endpoint::_discard() { // We continue with failovers for (auto it = _endpoints.begin(); it != _endpoints.end();) { + it->second->wait_for_all_events_written(5000); log_v2::config()->trace( "endpoint applier: send exit signal on endpoint '{}'", it->second->get_name()); diff --git a/centreon-broker/core/src/config/parser.cc b/centreon-broker/core/src/config/parser.cc index 198bf6e8463..43624db123e 100644 --- a/centreon-broker/core/src/config/parser.cc +++ b/centreon-broker/core/src/config/parser.cc @@ -130,10 +130,27 @@ state parser::parse(std::string const& file) { &json::is_number, &json::get)) ; else if (it.key() == "grpc" && it.value().is_object()) { - if (json_document["centreonBroker"]["grpc"]["rpc_port"].is_number()) { - retval.rpc_port(static_cast( - json_document["centreonBroker"]["grpc"]["rpc_port"] - .get())); + if (json_document["centreonBroker"]["grpc"].contains("rpc_port")) { + if (json_document["centreonBroker"]["grpc"]["rpc_port"] + .is_number()) { + retval.rpc_port(static_cast( + json_document["centreonBroker"]["grpc"]["rpc_port"] + .get())); + } else + throw msg_fmt( + "The rpc_port value in the grpc object should be an integer"); + } + if (json_document["centreonBroker"]["grpc"].contains( + "listen_address")) { + if (json_document["centreonBroker"]["grpc"]["listen_address"] + .is_string()) { + retval.listen_address( + json_document["centreonBroker"]["grpc"]["listen_address"] + .get()); + } else + throw msg_fmt( + "The listen_address value in the grpc object should be a " + "string"); } } else if (get_conf({it.key(), it.value()}, "broker_name", retval, &state::broker_name, diff --git a/centreon-broker/core/src/config/state.cc b/centreon-broker/core/src/config/state.cc index 0063214ffdc..48104a1754d 100644 --- a/centreon-broker/core/src/config/state.cc +++ b/centreon-broker/core/src/config/state.cc @@ -41,6 +41,7 @@ state::state() state::state(const state& other) : _broker_id(other._broker_id), _rpc_port(other._rpc_port), + _listen_address{other._listen_address}, _broker_name(other._broker_name), _cache_directory(other._cache_directory), _command_file(other._command_file), @@ -70,6 +71,7 @@ state& state::operator=(state const& other) { if (this != &other) { _broker_id = other._broker_id; _rpc_port = other._rpc_port; + _listen_address = other._listen_address; _broker_name = other._broker_name; _cache_directory = other._cache_directory; _command_file = other._command_file; @@ -92,6 +94,7 @@ state& state::operator=(state const& other) { void state::clear() { _broker_id = 0; _rpc_port = 0; + _listen_address.resize(0); _broker_name.clear(); _cache_directory.clear(); _command_file.clear(); @@ -360,10 +363,30 @@ std::string const& state::poller_name() const noexcept { void state::rpc_port(uint16_t port) noexcept { _rpc_port = port; } -uint16_t state::rpc_port(void) const noexcept { +uint16_t state::rpc_port() const noexcept { return _rpc_port; } +/** + * @brief Force the interface address to listen from for the gRPC API. + * + * @param listen_address An address or a hostname ("127.0.0.1", "localhost", + * ...) + */ +void state::listen_address(const std::string& listen_address) noexcept { + _listen_address = listen_address; +} + +/** + * @brief Access to the configured listen address or an empty string if not + * defined. The behavior of broker in the latter is to listen from localhost. + * + * @return The listen address for the gRPC API. + */ +const std::string& state::listen_address() const noexcept { + return _listen_address; +} + state::log& state::log_conf() { return _log_conf; } diff --git a/centreon-broker/core/src/io/stream.cc b/centreon-broker/core/src/io/stream.cc index 54ff74ea25b..ab341bed86b 100644 --- a/centreon-broker/core/src/io/stream.cc +++ b/centreon-broker/core/src/io/stream.cc @@ -106,3 +106,18 @@ bool stream::validate(std::shared_ptr const& d, } return true; } + +/** + * @brief if it has a substream, it waits until the substream has sent all data + * on the wire + * + * @param ms_timeout + * @return true all data sent + * @return false timeout expires + */ +bool stream::wait_for_all_events_written(unsigned ms_timeout) { + if (_substream) { + return _substream->wait_for_all_events_written(ms_timeout); + } + return true; +} diff --git a/centreon-broker/core/src/main.cc b/centreon-broker/core/src/main.cc index 6615de76d5e..55efd55bbf6 100644 --- a/centreon-broker/core/src/main.cc +++ b/centreon-broker/core/src/main.cc @@ -35,7 +35,11 @@ #include "com/centreon/broker/log_v2.hh" #include "com/centreon/broker/misc/diagnostic.hh" +#include "absl/strings/numbers.h" +#include "com/centreon/exceptions/msg_fmt.hh" + using namespace com::centreon::broker; +using namespace com::centreon::exceptions; // Main config file. static std::vector gl_mainconfigfiles; @@ -44,7 +48,6 @@ static std::atomic_bool gl_term{false}; static struct option long_options[] = {{"pool_size", required_argument, 0, 's'}, {"check", no_argument, 0, 'c'}, - {"debug", no_argument, 0, 'd'}, {"diagnose", no_argument, 0, 'D'}, {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, 'h'}, @@ -132,6 +135,7 @@ int main(int argc, char* argv[]) { int opt, option_index = 0, n_thread = 0; std::string broker_name{"unknown"}; uint16_t default_port{51000}; + std::string default_listen_address{"localhost"}; // Set configuration update handler. if (signal(SIGHUP, hup_handler) == SIG_ERR) { @@ -155,33 +159,35 @@ int main(int argc, char* argv[]) { try { // Check the command line. bool check(false); - bool debug(false); bool diagnose(false); bool help(false); bool version(false); - opt = getopt_long(argc, argv, "t:cdDvh", long_options, &option_index); - switch (opt) { - case 't': - n_thread = atoi(optarg); - break; - case 'c': - check = true; - break; - case 'd': - debug = true; - break; - case 'D': - diagnose = true; - break; - case 'h': - help = true; - break; - case 'v': - version = true; - break; - default: - break; + while ((opt = getopt_long(argc, argv, "s:cDvh", long_options, + &option_index)) != -1) { + switch (opt) { + case 's': + if (!absl::SimpleAtoi(optarg, &n_thread)) { + throw msg_fmt("The option -s expects a positive integer"); + } + break; + case 'c': + check = true; + break; + case 'D': + diagnose = true; + break; + case 'h': + help = true; + break; + case 'v': + version = true; + break; + default: + throw msg_fmt( + "Enter allowed options : [-s ] [-c] [-D] [-h] [-v]"); + break; + } } if (optind < argc) @@ -199,14 +205,13 @@ int main(int argc, char* argv[]) { diag.generate(gl_mainconfigfiles); } else if (help) { log_v2::core()->info( - "USAGE: {} [-t] [-c] [-d] [-D] [-h] [-v] []", argv[0]); - - log_v2::core()->info(" -t Set x threads."); - log_v2::core()->info(" -c Check configuration file."); - log_v2::core()->info(" -d Enable debug mode."); - log_v2::core()->info(" -D Generate a diagnostic file."); - log_v2::core()->info(" -h Print this help."); - log_v2::core()->info(" -v Print Centreon Broker version."); + "USAGE: {} [-s ] [-c] [-D] [-h] [-v] []", + argv[0]); + log_v2::core()->info(" '-s' Set poolsize threads."); + log_v2::core()->info(" '-c' Check configuration file."); + log_v2::core()->info(" '-D' Generate a diagnostic file."); + log_v2::core()->info(" '-h' Print this help."); + log_v2::core()->info(" '-v' Print Centreon Broker version."); log_v2::core()->info("Centreon Broker {}", CENTREON_BROKER_VERSION); log_v2::core()->info("Copyright 2009-2021 Centreon"); log_v2::core()->info( @@ -217,7 +222,8 @@ int main(int argc, char* argv[]) { retval = 0; } else if (gl_mainconfigfiles.empty()) { log_v2::core()->error( - "USAGE: {} [-c] [-d] [-D] [-h] [-v] []\n\n", argv[0]); + "USAGE: {} [-s ] [-c] [-D] [-h] [-v] []\n\n", + argv[0]); return 1; } else { log_v2::core()->info("Centreon Broker {}", CENTREON_BROKER_VERSION); @@ -248,12 +254,15 @@ int main(int argc, char* argv[]) { gl_state = conf; } + if (!gl_state.listen_address().empty()) + default_listen_address = gl_state.listen_address(); + if (gl_state.rpc_port() == 0) default_port += gl_state.broker_id(); else default_port = gl_state.rpc_port(); std::unique_ptr > rpc( - new brokerrpc("0.0.0.0", default_port, broker_name), + new brokerrpc(default_listen_address, default_port, broker_name), [](brokerrpc* rpc) { rpc->shutdown(); delete rpc; diff --git a/centreon-broker/core/src/processing/acceptor.cc b/centreon-broker/core/src/processing/acceptor.cc index 9e3fd459cfb..ff1d87fe0c4 100644 --- a/centreon-broker/core/src/processing/acceptor.cc +++ b/centreon-broker/core/src/processing/acceptor.cc @@ -248,3 +248,19 @@ void acceptor::_callback() noexcept { _state = acceptor::finished; _state_cv.notify_all(); } + +/** + * @wait ms_timeout ms for all events sent + * + * @param ms_timeout + * @return true + * @return false + */ +bool acceptor::wait_for_all_events_written(unsigned ms_timeout) { + std::lock_guard lock(_stat_mutex); + bool ret = true; + for (processing::feeder* to_wait : _feeders) { + ret &= to_wait->wait_for_all_events_written(ms_timeout); + } + return ret; +} diff --git a/centreon-broker/core/src/processing/failover.cc b/centreon-broker/core/src/processing/failover.cc index 31a901141f4..8b27720a517 100644 --- a/centreon-broker/core/src/processing/failover.cc +++ b/centreon-broker/core/src/processing/failover.cc @@ -345,7 +345,7 @@ void failover::_run() { we = _stream->flush(); } _subscriber->get_muxer().ack_events(we); - ::usleep(100000); + ::usleep(idle_microsec_wait_idle_thread_delay); } } } @@ -599,3 +599,11 @@ void failover::start() { bool failover::should_exit() const { return _should_exit; } + +bool failover::wait_for_all_events_written(unsigned ms_timeout) { + std::lock_guard stream_lock(_stream_m); + if (_stream) { + return _stream->wait_for_all_events_written(ms_timeout); + } + return true; +} diff --git a/centreon-broker/core/src/processing/feeder.cc b/centreon-broker/core/src/processing/feeder.cc index 356231fd747..956d6958c40 100644 --- a/centreon-broker/core/src/processing/feeder.cc +++ b/centreon-broker/core/src/processing/feeder.cc @@ -192,7 +192,7 @@ void feeder::_callback() noexcept { log_v2::processing()->trace( "feeder '{}': timeout on stream and muxer, waiting for 100000µs", _name); - ::usleep(100000); + ::usleep(idle_microsec_wait_idle_thread_delay); } } } catch (exceptions::shutdown const& e) { @@ -255,3 +255,11 @@ const char* feeder::get_state() const { } return "unknown"; } + +bool feeder::wait_for_all_events_written(unsigned ms_timeout) { + misc::read_lock lock(_client_m); + if (_client) { + return _client->wait_for_all_events_written(ms_timeout); + } + return true; +} diff --git a/centreon-broker/core/test/config/parser.cc b/centreon-broker/core/test/config/parser.cc index 85d2d07c066..62c62332e3f 100644 --- a/centreon-broker/core/test/config/parser.cc +++ b/centreon-broker/core/test/config/parser.cc @@ -548,3 +548,112 @@ TEST(parser, logWithNullLoggers) { // Remove temporary file. ::remove(config_file.c_str()); } + +TEST(parser, grpc_full) { + // File name. + std::string config_file(misc::temp_path()); + + // Open file. + FILE* file_stream(fopen(config_file.c_str(), "w")); + if (!file_stream) + throw msg_fmt("could not open '{}'", config_file); + // Data. + std::string data; + data = + "{\n" + " \"centreonBroker\": {\n" + " \"broker_id\": 1,\n" + " \"broker_name\": \"central-broker-master\",\n" + " \"poller_id\": 1,\n" + " \"poller_name\": \"Central\",\n" + " \"module_directory\": " + "\"/etc\",\n" + " \"log_timestamp\": true,\n" + " \"event_queue_max_size\": 100000,\n" + " \"command_file\": \"/var/lib/centreon-broker/command.sock\",\n" + " \"cache_directory\": \"/tmp\",\n" + " \"log\": {\n" + " \"directory\": \"/tmp\"\n" + " },\n" + " \"grpc\": {\n" + " \"rpc_port\": 51001,\n" + " \"listen_address\": \"10.0.2.26\"\n" + " }\n" + " }\n" + "}\n"; + + // Write data. + if (fwrite(data.c_str(), data.size(), 1, file_stream) != 1) + throw msg_fmt("could not write content of '{}'", config_file); + + // Close file. + fclose(file_stream); + + // Parse. + config::parser p; + config::state s{p.parse(config_file)}; + + // Remove temporary file. + ::remove(config_file.c_str()); + + // Check global params + ASSERT_EQ(s.rpc_port(), 51001); + ASSERT_EQ(s.listen_address(), std::string("10.0.2.26")); + ASSERT_EQ(s.broker_id(), 1); + ASSERT_EQ(s.broker_name(), "central-broker-master"); + ASSERT_EQ(s.poller_id(), 1); + ASSERT_EQ(s.module_directory(), "/etc"); + ASSERT_EQ(s.event_queue_max_size(), 100000); + ASSERT_EQ(s.command_file(), "/var/lib/centreon-broker/command.sock"); + ASSERT_EQ(s.cache_directory(), "/tmp/"); + ASSERT_EQ(s.log_conf().directory, "/tmp"); + ASSERT_EQ(s.log_conf().max_size, 0u); +} + +TEST(parser, grpc_in_error) { + // File name. + std::string config_file(misc::temp_path()); + + // Open file. + FILE* file_stream(fopen(config_file.c_str(), "w")); + if (!file_stream) + throw msg_fmt("could not open '{}'", config_file); + // Data. + std::string data; + data = + "{\n" + " \"centreonBroker\": {\n" + " \"broker_id\": 1,\n" + " \"broker_name\": \"central-broker-master\",\n" + " \"poller_id\": 1,\n" + " \"poller_name\": \"Central\",\n" + " \"module_directory\": " + "\"/etc\",\n" + " \"log_timestamp\": true,\n" + " \"event_queue_max_size\": 100000,\n" + " \"command_file\": \"/var/lib/centreon-broker/command.sock\",\n" + " \"cache_directory\": \"/tmp\",\n" + " \"log\": {\n" + " \"directory\": \"/tmp\"\n" + " },\n" + " \"grpc\": {\n" + " \"rpc_port\": \"foo\",\n" + " \"listen_address\": \"10.0.2.26\"\n" + " }\n" + " }\n" + "}\n"; + + // Write data. + if (fwrite(data.c_str(), data.size(), 1, file_stream) != 1) + throw msg_fmt("could not write content of '{}'", config_file); + + // Close file. + fclose(file_stream); + + // Parse. + config::parser p; + ASSERT_THROW(p.parse(config_file), std::exception); + + // Remove temporary file. + ::remove(config_file.c_str()); +} diff --git a/centreon-broker/lua/src/factory.cc b/centreon-broker/lua/src/factory.cc index 9a9e38ab30e..e15adc558e0 100644 --- a/centreon-broker/lua/src/factory.cc +++ b/centreon-broker/lua/src/factory.cc @@ -85,18 +85,14 @@ io::endpoint* factory::new_endpoint( throw msg_fmt("lua: couldn't read a configuration json"); if (js.is_object()) { - json const& name{js["name"]}; - json const& type{js["type"]}; - json const& value{js["value"]}; + json const& name{js.at("name")}; + json const& type{js.at("type")}; + json const& value{js.at("value")}; if (name.get().empty()) throw msg_fmt( "lua: couldn't read a configuration field because" " its name is empty"); - if (value.get().empty()) - throw msg_fmt( - "lua: couldn't read a configuration field because" - "' configuration field because its value is empty"); std::string t((type.get().empty()) ? "string" : type.get()); if (t == "string" || t == "password") @@ -133,18 +129,14 @@ io::endpoint* factory::new_endpoint( } } else if (js.is_array()) { for (json const& obj : js) { - json const& name{obj["name"]}; - json const& type{obj["type"]}; - json const& value{obj["value"]}; + json const& name{obj.at("name")}; + json const& type{obj.at("type")}; + json const& value{obj.at("value")}; if (name.get().empty()) throw msg_fmt( "lua: couldn't read a configuration field because" " its name is empty"); - if (value.get().empty()) - throw msg_fmt( - "lua: couldn't read a configuration field because" - " its value is empty"); std::string t((type.get().empty()) ? "string" : type.get()); diff --git a/centreon-broker/rrd/inc/com/centreon/broker/rrd/backend.hh b/centreon-broker/rrd/inc/com/centreon/broker/rrd/backend.hh index c1e564581f5..70e64d23d96 100644 --- a/centreon-broker/rrd/inc/com/centreon/broker/rrd/backend.hh +++ b/centreon-broker/rrd/inc/com/centreon/broker/rrd/backend.hh @@ -52,7 +52,8 @@ class backend { uint32_t length, time_t from, uint32_t step, - short value_type = 0) = 0; + short value_type = 0, + bool without_cache = false) = 0; virtual void remove(std::string const& filename) = 0; virtual void update(time_t t, std::string const& value) = 0; }; diff --git a/centreon-broker/rrd/inc/com/centreon/broker/rrd/cached.hh b/centreon-broker/rrd/inc/com/centreon/broker/rrd/cached.hh index b295f051715..65109013fff 100644 --- a/centreon-broker/rrd/inc/com/centreon/broker/rrd/cached.hh +++ b/centreon-broker/rrd/inc/com/centreon/broker/rrd/cached.hh @@ -79,7 +79,8 @@ class cached : public backend { uint32_t length, time_t from, uint32_t step, - short value_type = 0) { + short value_type = 0, + bool without_cache = false) { // Close previous file. this->close(); @@ -89,7 +90,7 @@ class cached : public backend { /* We are unfortunately forced to use librrd to create RRD file as ** rrdcached does not support RRD file creation. */ - _lib.open(filename, length, from, step, value_type); + _lib.open(filename, length, from, step, value_type, without_cache); } /** @@ -130,7 +131,7 @@ class cached : public backend { * * @param[in] command Command to send. */ - void _send_to_cached(std::string const& command) { + void _send_to_cached(const std::string& command) { std::error_code err; asio::write(_socket, asio::buffer(command), asio::transfer_all(), err); diff --git a/centreon-broker/rrd/inc/com/centreon/broker/rrd/creator.hh b/centreon-broker/rrd/inc/com/centreon/broker/rrd/creator.hh index 178dac83413..1d16c012781 100644 --- a/centreon-broker/rrd/inc/com/centreon/broker/rrd/creator.hh +++ b/centreon-broker/rrd/inc/com/centreon/broker/rrd/creator.hh @@ -47,7 +47,8 @@ class creator { uint32_t length, time_t from, uint32_t step, - short value_type); + short value_type, + bool without_cache = false); private: struct tmpl_info { diff --git a/centreon-broker/rrd/inc/com/centreon/broker/rrd/lib.hh b/centreon-broker/rrd/inc/com/centreon/broker/rrd/lib.hh index 4e8cd310df0..0d584f7ecfd 100644 --- a/centreon-broker/rrd/inc/com/centreon/broker/rrd/lib.hh +++ b/centreon-broker/rrd/inc/com/centreon/broker/rrd/lib.hh @@ -41,18 +41,19 @@ class lib : public backend { lib(lib const& l) = delete; ~lib() = default; lib& operator=(lib const& l) = delete; - void begin(); - void clean(); - void close(); - void commit(); - void open(std::string const& filename); + void begin() override; + void clean() override; + void close() override; + void commit() override; + void open(std::string const& filename) override; void open(std::string const& filename, uint32_t length, time_t from, uint32_t step, - short value_type = 0); - void remove(std::string const& filename); - void update(time_t t, std::string const& value); + short value_type = 0, + bool without_cache = false) override; + void remove(std::string const& filename) override; + void update(time_t t, std::string const& value) override; private: creator _creator; diff --git a/centreon-broker/rrd/inc/com/centreon/broker/rrd/output.hh b/centreon-broker/rrd/inc/com/centreon/broker/rrd/output.hh index 55c2a255012..cf14ca0936a 100644 --- a/centreon-broker/rrd/inc/com/centreon/broker/rrd/output.hh +++ b/centreon-broker/rrd/inc/com/centreon/broker/rrd/output.hh @@ -41,11 +41,9 @@ namespace rrd { */ template class output : public io::stream { - public: - typedef std::unordered_map>> - rebuild_cache; + using rebuild_cache = + std::unordered_map>>; - private: bool _ignore_update_errors; std::string _metrics_path; rebuild_cache _metrics_rebuild; diff --git a/centreon-broker/rrd/src/connector.cc b/centreon-broker/rrd/src/connector.cc index 59601d91b3c..36dcbcd529a 100644 --- a/centreon-broker/rrd/src/connector.cc +++ b/centreon-broker/rrd/src/connector.cc @@ -28,12 +28,6 @@ using namespace com::centreon::broker; using namespace com::centreon::broker::rrd; -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Default constructor. */ @@ -58,17 +52,18 @@ connector::~connector() {} std::unique_ptr connector::open() { std::unique_ptr retval; if (!_cached_local.empty()) - retval.reset(new output>( - _metrics_path, _status_path, _cache_size, _ignore_update_errors, - _cached_local, _write_metrics, _write_status)); + retval = + std::make_unique>>( + _metrics_path, _status_path, _cache_size, _ignore_update_errors, + _cached_local, _write_metrics, _write_status); else if (_cached_port) - retval.reset(new output>( + retval = std::make_unique>>( _metrics_path, _status_path, _cache_size, _ignore_update_errors, - _cached_port, _write_metrics, _write_status)); + _cached_port, _write_metrics, _write_status); else - retval.reset(new output(_metrics_path, _status_path, _cache_size, - _ignore_update_errors, _write_metrics, - _write_status)); + retval = std::make_unique>(_metrics_path, _status_path, + _cache_size, _ignore_update_errors, + _write_metrics, _write_status); return retval; } diff --git a/centreon-broker/rrd/src/creator.cc b/centreon-broker/rrd/src/creator.cc index 58742ae4359..69262e82127 100644 --- a/centreon-broker/rrd/src/creator.cc +++ b/centreon-broker/rrd/src/creator.cc @@ -84,67 +84,73 @@ void creator::clear() { * @param[in] step Specifies the base interval in seconds with * which data will be fed into the RRD. * @param[in] value_type Type of the metric. + * @param[in] without_cache We force the creation of the file (needed by the + * rebuild). */ void creator::create(std::string const& filename, uint32_t length, time_t from, uint32_t step, - short value_type) { + short value_type, + bool without_cache) { // Fill template informations. if (!step) step = 5 * 60; // Default to every 5 minutes. if (!length) length = 31 * 24 * 60 * 60; // Default to one month long. - tmpl_info info; - info.length = length; - info.step = step; - info.value_type = value_type; + if (!without_cache) { + tmpl_info info; + info.length = length; + info.step = step; + info.value_type = value_type; - // Find fd informations. - std::map::const_iterator it(_fds.find(info)); - // Is in the cache, just duplicate file. - if (it != _fds.end()) - _duplicate(filename, it->second); - // Not is the cache, but we have enough space in the cache. - // Create new entry. - else if (_fds.size() < _cache_size) { - std::string tmpl_filename(fmt::format("{}/tmpl_{}_{}_{}.rrd", _tmpl_path, - length, step, value_type)); + // Find fd informations. + std::map::const_iterator it(_fds.find(info)); + // Is in the cache, just duplicate file. + if (it != _fds.end()) + _duplicate(filename, it->second); + // Not in the cache, but we have enough space in the cache. + // Create new entry. + else if (_fds.size() < _cache_size) { + std::string tmpl_filename(fmt::format("{}/tmpl_{}_{}_{}.rrd", _tmpl_path, + length, step, value_type)); - // Create new template. - _open(tmpl_filename, length, from, step, value_type); + // Create new template. + _open(tmpl_filename, length, from, step, value_type); - // Get template file size. - struct stat s; - if (stat(tmpl_filename.c_str(), &s) < 0) { - char const* msg(strerror(errno)); - throw exceptions::open( - "RRD: could not create template file '{}" - "': {}", - tmpl_filename, msg); - } + // Get template file size. + struct stat s; + if (stat(tmpl_filename.c_str(), &s) < 0) { + char const* msg(strerror(errno)); + throw exceptions::open( + "RRD: could not create template file '{}" + "': {}", + tmpl_filename, msg); + } - // Get template file fd. - int in_fd(open(tmpl_filename.c_str(), O_RDONLY)); - if (in_fd < 0) { - char const* msg(strerror(errno)); - throw exceptions::open( - "RRD: could not open template file '{}" - "': {}", - tmpl_filename, msg); - } + // Get template file fd. + int in_fd(open(tmpl_filename.c_str(), O_RDONLY)); + if (in_fd < 0) { + char const* msg(strerror(errno)); + throw exceptions::open( + "RRD: could not open template file '{}" + "': {}", + tmpl_filename, msg); + } - // Store fd informations into the cache. - fd_info fdinfo; - fdinfo.fd = in_fd; - fdinfo.size = s.st_size; - _fds[info] = fdinfo; + // Store fd informations into the cache. + fd_info fdinfo; + fdinfo.fd = in_fd; + fdinfo.size = s.st_size; + _fds[info] = fdinfo; - _duplicate(filename, fdinfo); - } - // No more space in the cache, juste create rrd file. - else - _open(filename, length, from, step, value_type); + _duplicate(filename, fdinfo); + } + // No more space in the cache, juste create rrd file. + else + _open(filename, length, from - 1, step, value_type); + } else + _open(filename, length, from - 1, step, value_type); } /** @@ -154,9 +160,6 @@ void creator::create(std::string const& filename, * @param[in] in_fd The fd informations to duplicate file. */ void creator::_duplicate(std::string const& filename, fd_info const& in_fd) { - // Remove previous file. - remove(filename.c_str()); - int out_fd(open(filename.c_str(), O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH)); if (out_fd < 0) { diff --git a/centreon-broker/rrd/src/lib.cc b/centreon-broker/rrd/src/lib.cc index ba261d35832..f95bf64f403 100644 --- a/centreon-broker/rrd/src/lib.cc +++ b/centreon-broker/rrd/src/lib.cc @@ -24,8 +24,6 @@ #include #include -#include -#include #include "com/centreon/broker/log_v2.hh" #include "com/centreon/broker/rrd/exceptions/open.hh" @@ -35,12 +33,6 @@ using namespace com::centreon::broker; using namespace com::centreon::broker::rrd; -/************************************** - * * - * Public Methods * - * * - **************************************/ - /** * Constructor. * @@ -109,13 +101,14 @@ void lib::open(std::string const& filename, uint32_t length, time_t from, uint32_t step, - short value_type) { + short value_type, + bool without_cache) { // Close previous file. this->close(); // Remember informations for further operations. _filename = filename; - _creator.create(filename, length, from, step, value_type); + _creator.create(filename, length, from, step, value_type, without_cache); } /** diff --git a/centreon-broker/rrd/src/output.cc b/centreon-broker/rrd/src/output.cc index fb6b3e2818c..971349f2af5 100644 --- a/centreon-broker/rrd/src/output.cc +++ b/centreon-broker/rrd/src/output.cc @@ -1,5 +1,5 @@ /* -** Copyright 2011-2015,2017 Centreon +** Copyright 2011-2015,2017, 2020-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -18,10 +18,10 @@ #include "com/centreon/broker/rrd/output.hh" +#include #include #include #include -#include #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/io/events.hh" @@ -189,8 +189,8 @@ int output::write(std::shared_ptr const& d) { std::string metric_path( fmt::format("{}{}.rrd", _metrics_path, e->metric_id)); - // Check that metric is not being rebuild. - rebuild_cache::iterator it(_metrics_rebuild.find(metric_path)); + // Check that metric is not being rebuilt. + rebuild_cache::iterator it = _metrics_rebuild.find(metric_path); if (e->is_for_rebuild || it == _metrics_rebuild.end()) { // Write metrics RRD. try { @@ -201,39 +201,39 @@ int output::write(std::shared_ptr const& d) { _backend.open(metric_path, e->rrd_len, e->ctime - 1, interval, e->value_type); } - std::ostringstream oss; + std::string v; switch (e->value_type) { case storage::perfdata::gauge: - oss << std::fixed << e->value; + v = fmt::format("{:f}", e->value); log_v2::rrd()->trace( "RRD: update metric {} of type GAUGE with {}", e->metric_id, - oss.str()); + v); break; case storage::perfdata::counter: - oss << static_cast(e->value); + v = fmt::format("{}", static_cast(e->value)); log_v2::rrd()->trace( "RRD: update metric {} of type COUNTER with {}", e->metric_id, - oss.str()); + v); break; case storage::perfdata::derive: - oss << static_cast(e->value); + v = fmt::format("{}", static_cast(e->value)); log_v2::rrd()->trace( "RRD: update metric {} of type DERIVE with {}", e->metric_id, - oss.str()); + v); break; case storage::perfdata::absolute: - oss << static_cast(e->value); + v = fmt::format("{}", static_cast(e->value)); log_v2::rrd()->trace( "RRD: update metric {} of type ABSOLUTE with {}", - e->metric_id, oss.str()); + e->metric_id, v); break; default: - oss << std::fixed << e->value; + v = fmt::format("{:f}", e->value); log_v2::rrd()->trace("RRD: update metric {} of type {} with {}", - e->metric_id, e->value_type, oss.str()); + e->metric_id, e->value_type, v); break; } - _backend.update(e->ctime, oss.str()); + _backend.update(e->ctime, v); } else // Cache value. it->second.push_back(d); @@ -261,7 +261,11 @@ int output::write(std::shared_ptr const& d) { } catch (exceptions::open const& b) { time_t interval(e->interval ? e->interval : 60); assert(e->rrd_len); - _backend.open(status_path, e->rrd_len, e->ctime - 1, interval); + /* In case of a rebuild, we must not use the cache of rrd files, + * otherwise there is a risk its creation date is too recent. + * That's why, we use this last argument e->is_for_rebuild. */ + _backend.open(status_path, e->rrd_len, e->ctime - 1, interval, + e->is_for_rebuild); } std::string value; if (e->state == 0) @@ -313,7 +317,7 @@ int output::write(std::shared_ptr const& d) { } else { it = _metrics_rebuild.find(path); if (it != _metrics_rebuild.end()) { - l = it->second; + l = std::move(it->second); _metrics_rebuild.erase(it); } } @@ -327,6 +331,7 @@ int output::write(std::shared_ptr const& d) { } } break; case storage::remove_graph::static_type(): { + log_v2::rrd()->info("storage::remove_graph"); // Debug message. std::shared_ptr e( std::static_pointer_cast(d)); diff --git a/centreon-broker/storage/inc/com/centreon/broker/storage/rebuilder.hh b/centreon-broker/storage/inc/com/centreon/broker/storage/rebuilder.hh index 29de8488257..b8190b54231 100644 --- a/centreon-broker/storage/inc/com/centreon/broker/storage/rebuilder.hh +++ b/centreon-broker/storage/inc/com/centreon/broker/storage/rebuilder.hh @@ -1,5 +1,5 @@ /* -** Copyright 2012-2015,2017-2021 Centreon +** Copyright 2012-2015,2017-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -93,6 +93,7 @@ class rebuilder { ~rebuilder(); rebuilder(const rebuilder&) = delete; rebuilder& operator=(const rebuilder&) = delete; + void force_check_rebuild_index(); }; } // namespace storage diff --git a/centreon-broker/storage/inc/com/centreon/broker/storage/stream.hh b/centreon-broker/storage/inc/com/centreon/broker/storage/stream.hh index c3a18ea2e06..b6990b0e7f7 100644 --- a/centreon-broker/storage/inc/com/centreon/broker/storage/stream.hh +++ b/centreon-broker/storage/inc/com/centreon/broker/storage/stream.hh @@ -89,6 +89,7 @@ class stream : public io::stream { stream& operator=(const stream&) = delete; int32_t stop() override; int32_t flush() override; + void update() override; bool read(std::shared_ptr& d, time_t deadline) override; void statistics(nlohmann::json& tree) const override; int32_t write(std::shared_ptr const& d) override; diff --git a/centreon-broker/storage/src/rebuilder.cc b/centreon-broker/storage/src/rebuilder.cc index 8d3ff34c33d..464fa3708f5 100644 --- a/centreon-broker/storage/src/rebuilder.cc +++ b/centreon-broker/storage/src/rebuilder.cc @@ -122,6 +122,7 @@ void rebuilder::_run(asio::error_code ec) { info.host_id, info.service_id); std::promise promise; std::future future(promise.get_future()); + log_v2::sql()->trace("rebuilder: fetch check interval <<{}>>", query); ms.run_query_and_get_result(query, std::move(promise)); database::mysql_result res(future.get()); if (ms.fetch_row(res)) @@ -147,6 +148,8 @@ void rebuilder::_run(asio::error_code ec) { std::promise promise; std::future future(promise.get_future()); + log_v2::sql()->trace("rebuilder: fetch metrics to rebuild <<{}>>", + query); ms.run_query_and_get_result(query, std::move(promise)); try { database::mysql_result res(future.get()); @@ -409,3 +412,13 @@ void rebuilder::_set_index_rebuild(mysql& ms, uint64_t index_id, short state) { state, index_id)); ms.run_query(query, database::mysql_error::update_index_state, false); } + +/** + * @brief Force a check of index to rebuild. This method is called on cbd + * reload. + */ +void rebuilder::force_check_rebuild_index() { + log_v2::sql()->info("rebuilder: force check of rebuild indexes"); + _timer.expires_after(std::chrono::seconds(1)); + _timer.async_wait(std::bind(&rebuilder::_run, this, std::placeholders::_1)); +} diff --git a/centreon-broker/storage/src/stream.cc b/centreon-broker/storage/src/stream.cc index 1a0c650d1bb..92aa8aad51a 100644 --- a/centreon-broker/storage/src/stream.cc +++ b/centreon-broker/storage/src/stream.cc @@ -172,3 +172,11 @@ void stream::_update_status(std::string const& status) { std::lock_guard lock(_statusm); _status = status; } + +/** + * @brief Method called each time cbd is reloaded. + */ +void stream::update() { + log_v2::sql()->info("storage stream update"); + _rebuilder.force_check_rebuild_index(); +} diff --git a/centreon-broker/tcp/inc/com/centreon/broker/tcp/stream.hh b/centreon-broker/tcp/inc/com/centreon/broker/tcp/stream.hh index 118d2fbb673..578da77d0ba 100644 --- a/centreon-broker/tcp/inc/com/centreon/broker/tcp/stream.hh +++ b/centreon-broker/tcp/inc/com/centreon/broker/tcp/stream.hh @@ -59,6 +59,7 @@ class stream : public io::stream { int32_t flush() override; int32_t stop() override; int32_t write(std::shared_ptr const& d) override; + bool wait_for_all_events_written(unsigned ms_timeout) override; }; } // namespace tcp diff --git a/centreon-broker/tcp/inc/com/centreon/broker/tcp/tcp_connection.hh b/centreon-broker/tcp/inc/com/centreon/broker/tcp/tcp_connection.hh index eebb4a6e9d7..0f9980a1914 100644 --- a/centreon-broker/tcp/inc/com/centreon/broker/tcp/tcp_connection.hh +++ b/centreon-broker/tcp/inc/com/centreon/broker/tcp/tcp_connection.hh @@ -41,6 +41,7 @@ class tcp_connection : public std::enable_shared_from_this { std::queue> _write_queue; std::atomic_bool _write_queue_has_events; std::atomic_bool _writing; + std::condition_variable _writing_cv; std::atomic _acks; std::atomic_bool _reading; @@ -83,6 +84,8 @@ class tcp_connection : public std::enable_shared_from_this { const std::string peer() const; const std::string& address() const; uint16_t port() const; + + bool wait_for_all_events_written(unsigned ms_timeout); }; } // namespace tcp diff --git a/centreon-broker/tcp/src/stream.cc b/centreon-broker/tcp/src/stream.cc index e5696afbcc3..01fd2e26804 100644 --- a/centreon-broker/tcp/src/stream.cc +++ b/centreon-broker/tcp/src/stream.cc @@ -199,3 +199,18 @@ int32_t stream::write(std::shared_ptr const& d) { } return 1; } + +/** + * @brief wait for connection write queue empty + * + * @param ms_timeout + * @return true queue is empty + * @return false timeout expired + */ +bool stream::wait_for_all_events_written(unsigned ms_timeout) { + if (_connection->is_closed()) { + return true; + } + + return _connection->wait_for_all_events_written(ms_timeout); +} diff --git a/centreon-broker/tcp/src/tcp_connection.cc b/centreon-broker/tcp/src/tcp_connection.cc index 83ca6201cbf..3f3428c79f1 100644 --- a/centreon-broker/tcp/src/tcp_connection.cc +++ b/centreon-broker/tcp/src/tcp_connection.cc @@ -200,6 +200,21 @@ int32_t tcp_connection::write(const std::vector& v) { return retval; } +/** + * @brief wait for all events sent on the wire + * + * @param ms_timeout + * @return true if all events are sent + * @return false if timeout expires + */ +bool tcp_connection::wait_for_all_events_written(unsigned ms_timeout) { + log_v2::tcp()->trace("wait_for_all_events_written _writing={}", _writing); + std::mutex dummy; + std::unique_lock l(dummy); + return _writing_cv.wait_for(l, std::chrono::milliseconds(ms_timeout), + [this]() { return _writing == false; }); +} + /** * @brief Execute the real writing on the socket. Infact, this function: * * checks if the _write_queue is empty, and then exchanges its content with @@ -217,6 +232,7 @@ void tcp_connection::writing() { } if (!_write_queue_has_events) { _writing = false; + _writing_cv.notify_all(); return; } diff --git a/centreon-engine/inc/com/centreon/engine/configuration/state.hh b/centreon-engine/inc/com/centreon/engine/configuration/state.hh index 7fc93e413e2..c5a64583b0f 100644 --- a/centreon-engine/inc/com/centreon/engine/configuration/state.hh +++ b/centreon-engine/inc/com/centreon/engine/configuration/state.hh @@ -297,6 +297,8 @@ class state { void poller_id(uint32_t value) noexcept; uint16_t rpc_port() const noexcept; void rpc_port(uint16_t value) noexcept; + const std::string& rpc_listen_address() const noexcept; + void rpc_listen_address(const std::string& listen_address) noexcept; bool process_performance_data() const noexcept; void process_performance_data(bool value); std::list const& resource_file() const noexcept; @@ -563,6 +565,7 @@ class state { std::string _poller_name; uint32_t _poller_id; uint16_t _rpc_port; + std::string _rpc_listen_address; bool _process_performance_data; std::list _resource_file; unsigned long _retained_contact_host_attribute_mask; diff --git a/centreon-engine/src/configuration/state.cc b/centreon-engine/src/configuration/state.cc index 185e5c0206e..477be1fce73 100644 --- a/centreon-engine/src/configuration/state.cc +++ b/centreon-engine/src/configuration/state.cc @@ -173,6 +173,7 @@ std::unordered_map const state::_setters{ {"poller_name", SETTER(std::string const&, poller_name)}, {"poller_id", SETTER(uint32_t, poller_id)}, {"rpc_port", SETTER(uint16_t, rpc_port)}, + {"rpc_listen_address", SETTER(const std::string&, rpc_listen_address)}, {"precached_object_file", SETTER(std::string const&, _set_precached_object_file)}, {"process_performance_data", SETTER(bool, process_performance_data)}, @@ -362,6 +363,7 @@ static bool const default_use_setpgid(true); static bool const default_use_syslog(true); static std::string const default_use_timezone(""); static bool const default_use_true_regexp_matching(false); +static const std::string default_rpc_listen_address("localhost"); /** * Default constructor. @@ -448,6 +450,7 @@ state::state() _poller_name{"unknown"}, _poller_id{0}, _rpc_port{0}, + _rpc_listen_address{default_rpc_listen_address}, _process_performance_data(default_process_performance_data), _retained_contact_host_attribute_mask( default_retained_contact_host_attribute_mask), @@ -605,6 +608,7 @@ state& state::operator=(state const& right) { _poller_name = right._poller_name; _poller_id = right._poller_id; _rpc_port = right._rpc_port; + _rpc_listen_address = right._rpc_listen_address; _process_performance_data = right._process_performance_data; _retained_contact_host_attribute_mask = right._retained_contact_host_attribute_mask; @@ -754,6 +758,7 @@ bool state::operator==(state const& right) const noexcept { _perfdata_timeout == right._perfdata_timeout && _poller_name == right._poller_name && _poller_id == right._poller_id && _rpc_port == right._rpc_port && + _rpc_listen_address == right._rpc_listen_address && _process_performance_data == right._process_performance_data && _retained_contact_host_attribute_mask == right._retained_contact_host_attribute_mask && @@ -2113,8 +2118,8 @@ void state::host_perfdata_file_mode(perfdata_file_mode value) { * * @return The host_perfdata_file_processing_command value. */ -std::string const& state::host_perfdata_file_processing_command() const - noexcept { +std::string const& state::host_perfdata_file_processing_command() + const noexcept { return _host_perfdata_file_processing_command; } @@ -2739,7 +2744,7 @@ uint16_t state::rpc_port() const noexcept { } /** - * Set poller_id value. + * Set the rpc port value. * * @param[in] value The new poller_id value. */ @@ -2747,6 +2752,24 @@ void state::rpc_port(uint16_t value) noexcept { _poller_id = value; } +/** + * Get rpc_listen_address value. + * + * @return The grpc api listen address value. + */ +const std::string& state::rpc_listen_address() const noexcept { + return _rpc_listen_address; +} + +/** + * Set grpc api listen_address value. + * + * @param[in] value The new grpc api listen address. + */ +void state::rpc_listen_address(const std::string& listen_address) noexcept { + _rpc_listen_address = listen_address; +} + /** * Get process_performance_data value. * @@ -3143,8 +3166,8 @@ void state::service_freshness_check_interval(unsigned int value) { * * @return The service_inter_check_delay_method value. */ -state::inter_check_delay state::service_inter_check_delay_method() const - noexcept { +state::inter_check_delay state::service_inter_check_delay_method() + const noexcept { return _service_inter_check_delay_method; } @@ -3162,8 +3185,8 @@ void state::service_inter_check_delay_method(inter_check_delay value) { * * @return The service_interleave_factor_method value. */ -state::interleave_factor state::service_interleave_factor_method() const - noexcept { +state::interleave_factor state::service_interleave_factor_method() + const noexcept { return _service_interleave_factor_method; } @@ -3235,8 +3258,8 @@ void state::service_perfdata_file_mode(perfdata_file_mode value) { * * @return The service_perfdata_file_processing_command value. */ -std::string const& state::service_perfdata_file_processing_command() const - noexcept { +std::string const& state::service_perfdata_file_processing_command() + const noexcept { return _service_perfdata_file_processing_command; } @@ -3508,8 +3531,8 @@ void state::translate_passive_host_checks(bool value) { * * @return The users resources list. */ -std::unordered_map const& state::user() const - noexcept { +std::unordered_map const& state::user() + const noexcept { return _users; } @@ -3547,7 +3570,6 @@ void state::user(unsigned int key, std::string const& value) { _users[string::from(key)] = value; } - /** * Set use_aggressive_host_checking value. This function is still there just * to warn the user. It should be removed soon. diff --git a/centreon-engine/src/main.cc b/centreon-engine/src/main.cc index db99e519112..15d651815b9 100644 --- a/centreon-engine/src/main.cc +++ b/centreon-engine/src/main.cc @@ -40,6 +40,7 @@ #include "com/centreon/engine/configuration/state.hh" #include "com/centreon/engine/diagnostic.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" +#include "com/centreon/engine/enginerpc.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/logging.hh" @@ -56,7 +57,6 @@ #include "com/centreon/engine/version.hh" #include "com/centreon/io/directory_entry.hh" #include "com/centreon/logging/engine.hh" -#include "com/centreon/engine/enginerpc.hh" using namespace com::centreon::engine; @@ -336,8 +336,10 @@ int main(int argc, char* argv[]) { port = dis(gen); } + const std::string& listen_address = config.rpc_listen_address(); + std::unique_ptr > rpc( - new enginerpc("0.0.0.0", port), [](enginerpc* rpc) { + new enginerpc(listen_address, port), [](enginerpc* rpc) { rpc->shutdown(); delete rpc; }); diff --git a/centreon-engine/tests/configuration/applier/applier-global.cc b/centreon-engine/tests/configuration/applier/applier-global.cc index 2cd91df0d37..8df5466a0c8 100644 --- a/centreon-engine/tests/configuration/applier/applier-global.cc +++ b/centreon-engine/tests/configuration/applier/applier-global.cc @@ -88,3 +88,39 @@ TEST_F(ApplierGlobal, RpcPort) { ASSERT_EQ(st.rpc_port(), 42u); } + +TEST_F(ApplierGlobal, RpcListenAddress) { + configuration::parser parser; + configuration::state st; + + ASSERT_EQ(st.rpc_port(), 0u); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "rpc_listen_address=10.11.12.13" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", st); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.rpc_listen_address(), "10.11.12.13"); +} + +TEST_F(ApplierGlobal, NotDefinedRpcListenAddress) { + configuration::parser parser; + configuration::state st; + + ASSERT_EQ(st.rpc_port(), 0u); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "rpc_port=42" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", st); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.rpc_listen_address(), "localhost"); +} diff --git a/ci/docker/Dockerfile.collect-centos7-dependencies b/ci/docker/Dockerfile.collect-centos7-dependencies index 985f47ced4d..5cd6cf47657 100644 --- a/ci/docker/Dockerfile.collect-centos7-dependencies +++ b/ci/docker/Dockerfile.collect-centos7-dependencies @@ -48,11 +48,10 @@ RUN yum -y install devtoolset-9 \ unzip \ ShellCheck RUN ln -s /usr/bin/cmake3 /usr/bin/cmake -RUN pip3 install conan --prefix=/usr --upgrade -RUN rm -rf ~/.conan/profiles/default COPY conanfile.txt . RUN cat conanfile.txt RUN source /opt/rh/devtoolset-9/enable && source /opt/rh/rh-python38/enable &&\ + pip3 install conan --upgrade &&\ conan install . -s compiler.libcxx=libstdc++11 --build='*' RUN unzip -q sonar-scanner-cli-4.7.0.2747-linux.zip diff --git a/ci/scripts/collect-sonar-scanner-common.sh b/ci/scripts/collect-sonar-scanner-common.sh index 4a980d83786..e9560f937a8 100755 --- a/ci/scripts/collect-sonar-scanner-common.sh +++ b/ci/scripts/collect-sonar-scanner-common.sh @@ -45,7 +45,9 @@ get_cache() { fi fi tar xzf "$TAR_NAME" - mkdir build + if [ ! -d build ] ; then + mkdir build + fi mv cache build/ rm -rf "$TAR_NAME" } diff --git a/ci/scripts/collect-sources-analysis.sh b/ci/scripts/collect-sources-analysis.sh index 997895d06fa..5f25a81b25c 100755 --- a/ci/scripts/collect-sources-analysis.sh +++ b/ci/scripts/collect-sources-analysis.sh @@ -22,6 +22,7 @@ fi DISTRIB=$( lsb_release -rs | cut -f1 -d. ) if [[ "$DISTRIB" = "7" ]] ; then source /opt/rh/devtoolset-9/enable + source /opt/rh/rh-python38/enable fi # Prepare compilation diff --git a/ci/scripts/collect-test-robot.sh b/ci/scripts/collect-test-robot.sh index b25bc2c4a81..1169c612f97 100755 --- a/ci/scripts/collect-test-robot.sh +++ b/ci/scripts/collect-test-robot.sh @@ -39,7 +39,7 @@ rpm -i centreon*.el7.x86_64.rpm echo "########################### install robot framework ############################" cd /src/tests/ -pip3 install -U robotframework robotframework-databaselibrary pymysql +pip3 install -U robotframework robotframework-databaselibrary pymysql python-dateutil yum install "Development Tools" python3-devel -y diff --git a/ci/scripts/collect-unit-tests.sh b/ci/scripts/collect-unit-tests.sh index ef142c37aac..4ef2c0a3f86 100755 --- a/ci/scripts/collect-unit-tests.sh +++ b/ci/scripts/collect-unit-tests.sh @@ -8,16 +8,17 @@ cd /src/build/ DISTRIB=$(lsb_release -rs | cut -f1 -d.) if [ "$DISTRIB" = "7" ] ; then source /opt/rh/devtoolset-9/enable -fi + source /opt/rh/rh-python38/enable +fi conan install .. -s compiler.libcxx=libstdc++11 --build=missing if [ $(cat /etc/issue | awk '{print $1}') = "Debian" ] ; then CXXFLAGS="-Wall -Wextra" cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../centreon-clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On .. -else +else CXXFLAGS="-Wall -Wextra" cmake3 -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../centreon-clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On .. fi #Build -make -j9 +make -j9 make -j9 install #Test @@ -33,7 +34,7 @@ test/ut-clib --gtest_output=xml:/src/clib-ut.xml cd .. # cd centreon-connector # ./ut_connector --gtest_output=xml:/src/connector-ut.xml -# cd .. +# cd .. diff --git a/tests/README.md b/tests/README.md index ad47a04518f..7b8340efbe0 100644 --- a/tests/README.md +++ b/tests/README.md @@ -55,22 +55,15 @@ Here is the list of the currently implemented tests: ### Bam - [x] **BEBAMIDT1**: A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. The downtime is removed from the service, the inherited downtime is then deleted. - [x] **BEBAMIDT2**: A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. Engine is restarted. Broker is restarted. The two downtimes are still there with no duplicates. The downtime is removed from the service, the inherited downtime is then deleted. +- [x] **BEBAMIGNDT1**: A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. ### Broker -- [x] **BFC1**: Start broker with invalid filters but one filter ok -- [x] **BFC2**: Start broker with only invalid filters on an output -- [x] **BLDIS1**: Start broker with core logs 'disabled' -- [x] **BSS1**: Start-Stop two instances of broker and no coredump -- [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump -- [x] **BSS3**: Start-Stop one instance of broker and no coredump -- [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump -- [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock -- [x] **BSSU1**: Start-Stop with unified_sql two instances of broker and no coredump -- [x] **BSSU2**: Start/Stop with unified_sql 10 times broker with 300ms interval and no coredump -- [x] **BSSU3**: Start-Stop with unified_sql one instance of broker and no coredump -- [x] **BSSU4**: Start/Stop with unified_sql 10 times broker with 1sec interval and no coredump -- [x] **BSSU5**: Start-Stop with unified_sql with reversed connection on TCP acceptor with only one instance and no deadlock +- [x] **BCL1**: Starting broker with option '-s foobar' should return an error +- [x] **BCL2**: Starting broker with option '-s5' should work +- [x] **BCL3**: Starting broker with options '-D' should work and activate diagnose mode +- [x] **BCL4**: Starting broker with options '-s2' and '-D' should work. - [x] **BDB1**: Access denied when database name exists but is not the good one for sql output +- [x] **BDB10**: connection should be established when user password is good for sql/perfdata - [x] **BDB2**: Access denied when database name exists but is not the good one for storage output - [x] **BDB3**: Access denied when database name does not exist for sql output - [x] **BDB4**: Access denied when database name does not exist for storage and sql outputs @@ -79,212 +72,80 @@ Here is the list of the currently implemented tests: - [x] **BDB7**: access denied when database user password is wrong for perfdata/sql - [x] **BDB8**: access denied when database user password is wrong for perfdata/sql - [x] **BDB9**: access denied when database user password is wrong for sql -- [x] **BDB10**: connection should be established when user password is good for sql/perfdata +- [x] **BDBM1**: start broker/engine and then start MariaDB => connection is established - [x] **BEDB2**: start broker/engine and then start MariaDB => connection is established - [x] **BEDB3**: start broker/engine, then stop MariaDB and then start it again. The gRPC API should give informations about SQL connections. - [x] **BEDB4**: start broker/engine, then stop MariaDB and then start it again. The gRPC API should give informations about SQL connections. -- [x] **BDBM1**: start broker/engine and then start MariaDB => connection is established -- [x] **BDBU1**: Access denied when database name exists but is not the good one for unified sql output -- [x] **BDBU3**: Access denied when database name does not exist for unified sql output -- [x] **BDBU5**: cbd does not crash if the unified sql db_host is wrong -- [x] **BDBU7**: Access denied when database user password is wrong for unified sql -- [x] **BDBU10**: Connection should be established when user password is good for unified sql -- [x] **BDBMU1**: start broker/engine with unified sql and then start MariaDB => connection is established -- [x] **BGRPCSS1**: Start-Stop two instances of broker configured with grpc stream and no coredump -- [x] **BGRPCSS2**: Start/Stop 10 times broker configured with grpc stream with 300ms interval and no coredump -- [x] **BGRPCSS3**: Start-Stop one instance of broker configured with grpc stream and no coredump -- [x] **BGRPCSS4**: Start/Stop 10 times broker configured with grpc stream with 1sec interval and no coredump -- [x] **BGRPCSS5**: Start-Stop with reversed connection on grpc acceptor with only one instance and no deadlock -- [x] **BGRPCSSU1**: Start-Stop with unified_sql two instances of broker with grpc stream and no coredump -- [x] **BGRPCSSU2**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 300ms interval and no coredump -- [x] **BGRPCSSU3**: Start-Stop with unified_sql one instance of broker configured with grpc and no coredump -- [x] **BGRPCSSU4**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 1sec interval and no coredump -- [x] **BGRPCSSU5**: Start-Stop with unified_sql with reversed connection on grpc acceptor with only one instance and no deadlock -- [x] **BCL1**: Starting broker with option '-s foobar' should return an error -- [x] **BCL2**: Starting broker with option '-s 5' should work +- [x] **BFC1**: Start broker with invalid filters but one filter ok +- [x] **BFC2**: Start broker with only invalid filters on an output +- [x] **BLDIS1**: Start broker with core logs 'disabled' +- [x] **BSS1**: Start-Stop two instances of broker and no coredump +- [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump +- [x] **BSS3**: Start-Stop one instance of broker and no coredump +- [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump +- [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock ### Broker/database +- [x] **NetworkDBFail6**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s +- [x] **NetworkDBFail7**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s - [x] **NetworkDbFail1**: network failure test between broker and database (shutting down connection for 100ms) - [x] **NetworkDbFail2**: network failure test between broker and database (shutting down connection for 1s) - [x] **NetworkDbFail3**: network failure test between broker and database (shutting down connection for 10s) - [x] **NetworkDbFail4**: network failure test between broker and database (shutting down connection for 30s) - [x] **NetworkDbFail5**: network failure test between broker and database (shutting down connection for 60s) -- [x] **NetworkDBFail6**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s -- [x] **NetworkDBFailU6**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s (with unified_sql) -- [x] **NetworkDBFail7**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s -- [x] **NetworkDBFailU7**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s (with unified_sql) ### Broker/engine -- [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. -- [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage -- [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. -- [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. -- [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. - [x] **BECC1**: Broker/Engine communication with compression between central and poller -- [x] **EBNHG1**: New host group with several pollers and connections to DB -- [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup -- [x] **EBNHGU4**: New host group with several pollers and connections to DB with broker and rename this hostgroup -- [x] **LOGV2EB1**: log-v2 enabled old log disabled check broker sink -- [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink -- [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink -- [x] **LOGV2EB2**: log-v2 enabled old log enabled check broker sink -- [x] **LOGV2EF1**: log-v2 enabled old log disabled check logfile sink -- [x] **LOGV2DF1**: log-v2 disabled old log enabled check logfile sink -- [x] **LOGV2DF2**: log-v2 disabled old log disabled check logfile sink -- [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink -- [x] **LOGV2BE2**: log-v2 enabled old log enabled check broker sink is equal -- [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink -- [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -- [x] **BEHS1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -- [x] **BRGC1**: Broker good reverse connection -- [x] **BRCTS1**: Broker reverse connection too slow -- [x] **BRCS1**: Broker reverse connection stopped -- [x] **BRRDDM1**: RRD metrics deletion from metric ids. -- [x] **BRRDDID1**: RRD metrics deletion from index ids. -- [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes -- [x] **BRRDDMU1**: RRD metric deletion on table metric with unified sql output -- [x] **BRRDDIDU1**: RRD metrics deletion from index ids with unified sql output. -- [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes -- [x] **BRRDRM1**: RRD metric rebuild with gRPC API and unified sql -- [x] **BRRDRMU1**: RRD metric rebuild with gRPC API and unified sql -- [x] **ENRSCHE1**: check next check of reschedule is last_check+interval_check -- [x] **EBNSG1**: New service group with several pollers and connections to DB -- [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNSVC1**: New services with several pollers +- [x] **BECT1**: Broker/Engine communication with anonymous TLS between central and poller +- [x] **BECT2**: Broker/Engine communication with TLS between central and poller with key/cert +- [x] **BECT3**: Broker/Engine communication with anonymous TLS and ca certificate +- [x] **BECT4**: Broker/Engine communication with TLS between central and poller with key/cert and hostname forced +- [x] **BEDTMASS1**: New services with several pollers +- [x] **BEEXTCMD10**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo2.0 +- [x] **BEEXTCMD12**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo2.0 +- [x] **BEEXTCMD16**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo2.0 +- [x] **BEEXTCMD18**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo2.0 +- [x] **BEEXTCMD2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 +- [x] **BEEXTCMD4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 +- [x] **BEEXTCMD6**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo2.0 +- [x] **BEEXTCMD8**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo2.0 - [x] **BERD1**: Starting/stopping Broker does not create duplicated events. - [x] **BERD2**: Starting/stopping Engine does not create duplicated events. - [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases -- [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql - [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases -- [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql -- [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -- [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -- [x] **BEDTMASS1**: New services with several pollers -- [x] **BEDTMASS2**: New services with several pollers - [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first - [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first - [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first - [x] **BESS4**: Start-Stop Broker/Engine - Engine started first - Broker stopped first - [x] **BESS5**: Start-Stop Broker/engine - Engine debug level is set to all, it should not hang -- [x] **BESS_GRPC1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first -- [x] **BESS_GRPC2**: Start-Stop grpc version Broker/Engine - Broker started first - Engine stopped first -- [x] **BESS_GRPC3**: Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first -- [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first -- [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang -- [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first compression activated -- [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. -- [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -- [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. -- [x] **BEUTAG2**: Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. -- [x] **BEUTAG3**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -- [x] **BEUTAG4**: Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. -- [x] **BEUTAG5**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. -- [x] **BEUTAG6**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. -- [x] **BEUTAG7**: some services are configured and deleted with tags on two pollers. -- [x] **BEUTAG8**: Services have tags provided by templates. -- [x] **BEUTAG9**: hosts have tags provided by templates. -- [x] **BEUTAG10**: some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. -- [x] **BEUTAG11**: some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. -- [x] **BEUTAG12**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. -- [x] **BECT1**: Broker/Engine communication with anonymous TLS between central and poller -- [x] **BECT2**: Broker/Engine communication with TLS between central and poller with key/cert -- [x] **BECT3**: Broker/Engine communication with anonymous TLS and ca certificate -- [x] **BECT4**: Broker/Engine communication with TLS between central and poller with key/cert and hostname forced -- [x] **BECT_GRPC1**: Broker/Engine communication with anonymous TLS between central and poller -- [x] **BECT_GRPC2**: Broker/Engine communication with TLS between central and poller with key/cert -- [x] **BECT_GRPC3**: Broker/Engine communication with anonymous TLS and ca certificate -- [x] **BECT_GRPC4**: Broker/Engine communication with TLS between central and poller with key/cert and hostname forced -- [x] **BEEXTCMD1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 -- [x] **BEEXTCMD2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 -- [x] **BEEXTCMD3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 -- [x] **BEEXTCMD4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 -- [x] **BEEXTCMD5**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo3.0 -- [x] **BEEXTCMD6**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo2.0 -- [x] **BEEXTCMD7**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo3.0 -- [x] **BEEXTCMD8**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo2.0 -- [x] **BEEXTCMD9**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo3.0 -- [x] **BEEXTCMD10**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo2.0 -- [x] **BEEXTCMD11**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo3.0 -- [x] **BEEXTCMD12**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo2.0 -- [x] **BEEXTCMD13**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo3.0 -- [x] **BEEXTCMD14**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo2.0 -- [x] **BEEXTCMD15**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo3.0 -- [x] **BEEXTCMD16**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo2.0 -- [x] **BEEXTCMD17**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo3.0 -- [x] **BEEXTCMD18**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo2.0 -- [x] **BEEXTCMD19**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo3.0 -- [x] **BEEXTCMD20**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo2.0 -- [x] **BEEXTCMD21**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo3.0 -- [x] **BEEXTCMD22**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo2.0 -- [x] **BEEXTCMD23**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo3.0 -- [x] **BEEXTCMD24**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo2.0 -- [x] **BEEXTCMD25**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo3.0 -- [x] **BEEXTCMD26**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo2.0 -- [x] **BEEXTCMD27**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo3.0 -- [x] **BEEXTCMD28**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo2.0 -- [x] **BEEXTCMD29**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo3.0 -- [x] **BEEXTCMD30**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo2.0 -- [x] **BEEXTCMD31**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo3.0 -- [x] **BEEXTCMD32**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo2.0 -- [x] **BEEXTCMD33**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo3.0 -- [x] **BEEXTCMD34**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo2.0 -- [x] **BEEXTCMD35**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo3.0 -- [x] **BEEXTCMD36**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo2.0 -- [x] **BEEXTCMD37**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo3.0 -- [x] **BEEXTCMD38**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo2.0 -- [x] **BEEXTCMD39**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo3.0 -- [x] **BEEXTCMD40**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo2.0 -- [x] **BEEXTCMD41**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo3.0 -- [x] **BEEXTCMD42**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo2.0 -- [x] **BEEXTCMD_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and grpc -- [x] **BEEXTCMD_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc -- [x] **BEEXTCMD_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc -- [x] **BEEXTCMD_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc -- [x] **BEEXTCMD_REVERSE_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and reversed gRPC -- [x] **BEEXTCMD_REVERSE_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc reversed -- [x] **BEEXTCMD_REVERSE_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc reversed -- [x] **BEEXTCMD_REVERSE_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc reversed -- [x] **EBSNU1**: New services with notes_url with more than 2000 characters +- [x] **BRCS1**: Broker reverse connection stopped +- [x] **BRCTS1**: Broker reverse connection too slow +- [x] **BRGC1**: Broker good reverse connection +- [x] **EBNHG1**: New host group with several pollers and connections to DB +- [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup +- [x] **EBNSG1**: New service group with several pollers and connections to DB +- [x] **EBNSVC1**: New services with several pollers - [x] **EBSAU2**: New services with action_url with more than 2000 characters - [x] **EBSN3**: New services with notes with more than 500 characters +- [x] **EBSNU1**: New services with notes_url with more than 2000 characters +- [x] **ENRSCHE1**: check next check of reschedule is last_check+interval_check ### Connector perl - [x] **test use connector perl exist script**: test exist script -- [x] **test use connector perl unknown script**: test unknown script - [x] **test use connector perl multiple script**: test script multiple +- [x] **test use connector perl unknown script**: test unknown script ### Connector ssh -- [x] **TestBadUser**: test unknown user -- [x] **TestBadPwd**: test bad password - [x] **Test6Hosts**: as 127.0.0.x point to the localhost address we will simulate check on 6 hosts +- [x] **TestBadPwd**: test bad password +- [x] **TestBadUser**: test unknown user ### Engine - [x] **EFHC1**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 - [x] **EFHC2**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 -- [x] **EFHCU1**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. resources table is cleared before starting broker. -- [x] **EFHCU2**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. +- [x] **EPC1**: Check with perl connector - [x] **ESS1**: Start-Stop (0s between start/stop) 5 times one instance of engine and no coredump - [x] **ESS2**: Start-Stop (300ms between start/stop) 5 times one instance of engine and no coredump - [x] **ESS3**: Start-Stop (0s between start/stop) 5 times three instances of engine and no coredump - [x] **ESS4**: Start-Stop (300ms between start/stop) 5 times three instances of engine and no coredump -- [x] **EPC1**: Check with perl connector - -### Migration -- [x] **MIGRATION**: Migration bbdo2 => sql/storage => unified_sql => bbdo3 - -### Severities -- [x] **BEUHSEV1**: Four hosts have a severity added. Then we remove the severity from host 1. Then we change severity 10 to severity8 for host 3. -- [x] **BEUHSEV2**: Seven hosts are configured with a severity on two pollers. Then we remove severities from the first and second hosts of the first poller but only the severity from the first host of the second poller. -- [x] **BETUHSEV1**: Hosts have severities provided by templates. -- [x] **BESEV1**: Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Broker is started before. -- [x] **BESEV2**: Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Engine is started before. -- [x] **BEUSEV1**: Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Broker is started before. -- [x] **BEUSEV2**: Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Engine is started before. -- [x] **BEUSEV3**: Four services have a severity added. Then we remove the severity from service 1. Then we change severity 11 to severity7 for service 3. -- [x] **BEUSEV4**: Seven services are configured with a severity on two pollers. Then we remove severities from the first and second services of the first poller but only the severity from the first service of the second poller. Then only severities no more used should be removed from the database. -- [x] **BETUSEV1**: Services have severities provided by templates. diff --git a/tests/bam/inherited_downtime.robot b/tests/bam/inherited_downtime.robot index e507c6c22de..211741455e0 100644 --- a/tests/bam/inherited_downtime.robot +++ b/tests/bam/inherited_downtime.robot @@ -127,3 +127,85 @@ BEBAMIDT2 Stop Engine Stop Broker + +BEBAMIGNDT1 + [Documentation] A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. + [Tags] broker downtime engine bam + Clear Commands Status + Config Broker module + Config Broker central + Broker Config Log central bam trace + Config Broker rrd + Config Engine ${1} + + Clone Engine Config To DB + Add Bam Config To Engine + + @{svc}= Set Variable ${{ [("host_16", "service_313"), ("host_16", "service_314")] }} + Create BA With Services test worst ${svc} ignore + Add Bam Config To Broker central + # Command of service_314 is set to critical + ${cmd_1}= Get Command Id 313 + Log To Console service_314 has command id ${cmd_1} + Set Command Status ${cmd_1} 0 + ${cmd_2}= Get Command Id 314 + Log To Console service_314 has command id ${cmd_2} + Set Command Status ${cmd_2} 2 + Start Broker + Start Engine + Sleep 5s + + # KPI set to ok + Repeat Keyword 3 times Process Service Check Result host_16 service_313 0 output critical for 313 + ${result}= Check Service Status With Timeout host_16 service_313 0 60 + Should Be True ${result} msg=The service (host_16,service_313) is not OK as expected + + # KPI set to critical + Repeat Keyword 3 times Process Service Check Result host_16 service_314 2 output critical for 314 + ${result}= Check Service Status With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16,service_314) is not CRITICAL as expected + + # The BA should become critical + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The BA ba_1 is not CRITICAL as expected + Log To console The BA is critical. + + # Two downtimes are applied on service_314 + Schedule Service Downtime host_16 service_314 3600 + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console One downtime applied to service_314. + + Schedule Service Downtime host_16 service_314 1800 + ${result}= Check Service Downtime With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console Two downtimes applied to service_314. + + ${result}= Check Service Downtime With Timeout _Module_BAM_1 ba_1 0 60 + Should Be True ${result} msg=The BA ba_1 is in downtime but should not + Log to console The BA is configured to ignore kpis in downtime + + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The service in downtime should be ignored while computing the state of this BA. + Log to console The BA is OK, since the critical service is in downtime. + + # The first downtime is deleted + Delete Service Downtime host_16 service_314 + + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) does not contain 1 downtime as it should + Log to console Still one downtime applied to service_314. + + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The BA is not OK whereas the service_314 is still in downtime. + Log to console The BA is still OK + + # The second downtime is deleted + Delete Service Downtime host_16 service_314 + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The critical service is no more in downtime, the BA should be critical. + Log to console The BA is now critical (no more downtime) + + Stop Engine + Kindly Stop Broker + diff --git a/tests/broker/command-line.robot b/tests/broker/command-line.robot new file mode 100644 index 00000000000..93aa9ae5d80 --- /dev/null +++ b/tests/broker/command-line.robot @@ -0,0 +1,71 @@ +*** Settings *** +Resource ../resources/resources.robot +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes + +Documentation Centreon Broker only start/stop tests +Library Process +Library OperatingSystem +Library ../resources/Broker.py +Library DateTime + + +*** Test Cases *** +BCL1 + [Documentation] Starting broker with option '-s foobar' should return an error + [Tags] Broker start-stop + Config Broker central + Start Broker With Args -s foobar + ${result}= Wait For Broker + ${expected}= Evaluate "The option -s expects a positive integer" in """${result}""" + Should be True ${expected} msg=expected error 'The option -s expects a positive integer' + +BCL2 + [Documentation] Starting broker with option '-s5' should work + [Tags] Broker start-stop + Config Broker central + ${start}= Get Current Date exclude_millis=True + Sleep 1s + Start Broker With Args -s5 /etc/centreon-broker/central-broker.json + ${table}= Create List Starting the TCP thread pool of 5 threads + ${logger_res}= Find in log with timeout ${centralLog} ${start} ${table} 30 + Should be True ${logger_res} msg=Didn't found 5 threads in /var/log/centreon-broker/central-broker-master.log + Stop Broker With Args + +BCL3 + [Documentation] Starting broker with options '-D' should work and activate diagnose mode + [Tags] Broker start-stop + Config Broker central + ${start}= Get Current Date exclude_millis=True + Sleep 1s + Start Broker With Args -D /etc/centreon-broker/central-broker.json + ${result}= Wait For Broker + ${expected}= Evaluate "diagnostic:" in """${result}""" + Should be True ${expected} msg=diagnostic mode didn't launch + +BCL4 + [Documentation] Starting broker with options '-s2' and '-D' should work. + [Tags] Broker start-stop + Config Broker central + Start Broker With Args -s2 -D /etc/centreon-broker/central-broker.json + ${result}= Wait For Broker + ${expected}= Evaluate "diagnostic:" in """${result}""" + Should be True ${expected} msg=diagnostic mode didn't launch + +*** Keywords *** +Start Broker With Args + [Arguments] @{options} + log to console @{options} + Start Process /usr/sbin/cbd @{options} alias=b1 stdout=/tmp/output.txt + +Wait For broker + Wait For Process b1 + ${result}= Get File /tmp/output.txt + Remove File /tmp/output.txt + [Return] ${result} + +Stop Broker With Args + Send Signal To Process SIGTERM b1 + ${result}= Wait For Process b1 timeout=60s on_timeout=kill + Should Be Equal As Integers ${result.rc} 0 diff --git a/tests/init-sql.sh b/tests/init-sql.sh index 514503abd03..f22037423d8 100755 --- a/tests/init-sql.sh +++ b/tests/init-sql.sh @@ -1,4 +1,16 @@ #!/bin/bash -mysql -u root -pcentreon -e "drop database centreon" -mysql -u root -pcentreon < resources/centreon.sql +DBUserRoot=$(awk '($1=="${DBUserRoot}") {print $2}' resources/db_variables.robot) +DBPassRoot=$(awk '($1=="${DBPassRoot}") {print $2}' resources/db_variables.robot) + +if [ -z $DBUserRoot ] ; then + DBUserRoot="root" +fi + +if [ -z $DBPassRoot ] ; then + DBPassRoot="centreon" +fi + +mysql --user="$DBUserRoot" --password="$DBPassRoot" -e "drop database centreon" +mysql --user="$DBUserRoot" --password="$DBPassRoot" < ../resources/centreon.sql +mysql --user="$DBUserRoot" --password="$DBPassRoot" < ../resources/centreon_storage.sql diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index c6b5e81e2cd..eefe85ae5dc 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -3,7 +3,9 @@ import pymysql.cursors import time import shutil -import socket, sys, time +import socket +import sys +import time from datetime import datetime from subprocess import getoutput import subprocess as subp @@ -17,15 +19,18 @@ from google.protobuf import empty_pb2 from robot.libraries.BuiltIn import BuiltIn -db_name = BuiltIn().get_variable_value("${DBName}") -db_host = BuiltIn().get_variable_value("${DBHost}") -db_user = BuiltIn().get_variable_value("${DBUser}") -db_pass = BuiltIn().get_variable_value("${DBPass}") -db_port = BuiltIn().get_variable_value("${DBPort}") TIMEOUT = 30 +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") + config = { -"central": """{{ + "central": """{{ "centreonBroker": {{ "broker_id": {0}, "broker_name": "{1}", @@ -74,11 +79,11 @@ "db_type": "mysql", "retry_interval": "5", "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "connections_count": "3", "read_timeout": "1", @@ -88,7 +93,7 @@ "name": "centreon-broker-master-rrd", "port": "5670", "buffering_timeout": "0", - "host": "localhost", + "host": "127.0.0.1", "retry_interval": "5", "protocol": "bbdo", "tls": "no", @@ -104,11 +109,11 @@ "buffering_timeout": "0", "length": "15552000", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "read_timeout": "1", "check_replication": "no", @@ -131,7 +136,7 @@ }} }}""", -"module": """{{ + "module": """{{ "centreonBroker": {{ "broker_id": {}, "broker_name": "{}", @@ -164,7 +169,7 @@ {{ "name": "central-module-master-output", "port": "5669", - "host": "localhost", + "host": "127.0.0.1", "protocol": "bbdo", "tls": "no", "negotiation": "yes", @@ -188,7 +193,7 @@ }} }}""", -"rrd": """{{ + "rrd": """{{ "centreonBroker": {{ "broker_id": {0}, "broker_name": "{1}", @@ -258,7 +263,7 @@ }} }}""", -"central_map": """{{ + "central_map": """{{ "centreonBroker": {{ "broker_id": {0}, "broker_name": "{1}", @@ -284,7 +289,8 @@ "tcp": "error", "tls": "error", "lua": "error", - "bam": "error" + "bam": "error", + "grpc": "error" }} }}, "input": [ @@ -307,11 +313,11 @@ "db_type": "mysql", "retry_interval": "5", "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "connections_count": "3", "read_timeout": "1", @@ -349,11 +355,11 @@ "buffering_timeout": "0", "length": "15552000", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "read_timeout": "1", "check_replication": "no", @@ -377,9 +383,26 @@ }}""", } + +def _apply_conf(name, callback): + if name == 'central': + filename = "central-broker.json" + elif name.startswith('module'): + filename = "central-{}.json".format(name) + else: + filename = "central-rrd.json" + + f = open("/etc/centreon-broker/{}".format(filename), "r") + buf = f.read() + f.close() + conf = json.loads(buf) + callback(conf) + f = open("/etc/centreon-broker/{}".format(filename), "w") + f.write(json.dumps(conf, indent=2)) + f.close() + + def config_broker(name, poller_inst: int = 1): - if not exists("/var/lib/centreon-broker/"): - makedirs("/var/lib/centreon-broker/") if name == 'central': broker_id = 1 broker_name = "central-broker-master" @@ -398,15 +421,18 @@ def config_broker(name, poller_inst: int = 1): if not exists("/var/lib/centreon/status/"): makedirs("/var/lib/centreon/status/") if not exists("/var/lib/centreon/metrics/tmpl_15552000_300_0.rrd"): - getoutput("rrdcreate /var/lib/centreon/metrics/tmpl_15552000_300_0.rrd DS:value:ABSOLUTE:3000:U:U RRA:AVERAGE:0.5:1:864000") + getoutput( + "rrdcreate /var/lib/centreon/metrics/tmpl_15552000_300_0.rrd DS:value:ABSOLUTE:3000:U:U RRA:AVERAGE:0.5:1:864000") broker_id = 2 broker_name = "central-rrd-master" filename = "central-rrd.json" if name == 'module': for i in range(poller_inst): - broker_name = "/etc/centreon-broker/central-module{}.json".format(i) - buf = config[name].format(broker_id, "central-module-master{}".format(i)) + broker_name = "/etc/centreon-broker/central-module{}.json".format( + i) + buf = config[name].format( + broker_id, "central-module-master{}".format(i)) conf = json.loads(buf) conf["centreonBroker"]["poller_id"] = i + 1 @@ -415,9 +441,36 @@ def config_broker(name, poller_inst: int = 1): f.close() else: f = open("/etc/centreon-broker/{}".format(filename), "w") - f.write(config[name].format(broker_id, broker_name)) + f.write(config[name].format(broker_id, broker_name, DB_HOST, DB_PORT, DB_USER, DB_PASS, DB_NAME_STORAGE)) f.close() + +def change_broker_tcp_output_to_grpc(name: str): + def output_to_grpc(conf): + output_dict = conf["centreonBroker"]["output"] + for i, v in enumerate(output_dict): + if v["type"] == "ipv4": + v["type"] = "grpc" + _apply_conf(name, output_to_grpc) + + +def change_broker_tcp_input_to_grpc(name: str): + def input_to_grpc(conf): + input_dict = conf["centreonBroker"]["input"] + for i, v in enumerate(input_dict): + if v["type"] == "ipv4": + v["type"] = "grpc" + _apply_conf(name, input_to_grpc) + + +def change_broker_compression_output(config_name: str, compression_value: str): + def compression_modifier(conf): + output_dict = conf["centreonBroker"]["output"] + for i, v in enumerate(output_dict): + v["compression"] = compression_value + _apply_conf(config_name, compression_modifier) + + def config_broker_sql_output(name, output): if name == 'central': filename = "central-broker.json" @@ -436,66 +489,67 @@ def config_broker_sql_output(name, output): output_dict.pop(i) if output == 'unified_sql': output_dict.append({ - "name" : "central-broker-unified-sql", - "db_type" : "mysql", - "db_host" : "localhost", - "db_port" : "3306", - "db_user" : "centreon", - "db_password" : "centreon", - "db_name" : "centreon_storage", - "interval" : "60", - "length" : "15552000", - "queries_per_transaction" : "20000", - "connections_count" : "4", - "read_timeout" : "60", - "buffering_timeout" : "0", - "retry_interval" : "60", - "check_replication" : "no", - "type" : "unified_sql", - "store_in_data_bin" : "yes", - "insert_in_index_data" : "1" + "name": "central-broker-unified-sql", + "db_type": "mysql", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, + "interval": "60", + "length": "15552000", + "queries_per_transaction": "20000", + "connections_count": "4", + "read_timeout": "60", + "buffering_timeout": "0", + "retry_interval": "60", + "check_replication": "no", + "type": "unified_sql", + "store_in_data_bin": "yes", + "insert_in_index_data": "1" }) elif output == 'sql/perfdata': output_dict.append({ - "name": "central-broker-master-sql", - "db_type": "mysql", - "retry_interval": "5", - "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", - "queries_per_transaction": "1000", - "connections_count": "3", - "read_timeout": "1", - "type": "sql" + "name": "central-broker-master-sql", + "db_type": "mysql", + "retry_interval": "5", + "buffering_timeout": "0", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, + "queries_per_transaction": "1000", + "connections_count": "3", + "read_timeout": "1", + "type": "sql" }) output_dict.append({ - "name": "central-broker-master-perfdata", - "interval": "60", - "retry_interval": "5", - "buffering_timeout": "0", - "length": "15552000", - "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", - "queries_per_transaction": "1000", - "read_timeout": "1", - "check_replication": "no", - "store_in_data_bin": "yes", - "connections_count": "3", - "insert_in_index_data": "1", - "type": "storage" + "name": "central-broker-master-perfdata", + "interval": "60", + "retry_interval": "5", + "buffering_timeout": "0", + "length": "15552000", + "db_type": "mysql", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, + "queries_per_transaction": "1000", + "read_timeout": "1", + "check_replication": "no", + "store_in_data_bin": "yes", + "connections_count": "3", + "insert_in_index_data": "1", + "type": "storage" }) f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() -def broker_config_clear_outputs_except(name, ex : list): + +def broker_config_clear_outputs_except(name, ex: list): if name == 'central': filename = "central-broker.json" elif name.startswith('module'): @@ -567,14 +621,15 @@ def broker_config_add_lua_output(name, output, luafile): conf = json.loads(buf) output_dict = conf["centreonBroker"]["output"] output_dict.append({ - "name": output, - "path": luafile, - "type": "lua" + "name": output, + "path": luafile, + "type": "lua" }) f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_output_set(name, output, key, value): if name == 'central': filename = "central-broker.json" @@ -586,12 +641,14 @@ def broker_config_output_set(name, output, key, value): buf = f.read() f.close() conf = json.loads(buf) - output_dict = [elem for i, elem in enumerate(conf["centreonBroker"]["output"]) if elem["name"] == output][0] + output_dict = [elem for i, elem in enumerate( + conf["centreonBroker"]["output"]) if elem["name"] == output][0] output_dict[key] = value f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_output_set_json(name, output, key, value): if name == 'central': filename = "central-broker.json" @@ -603,13 +660,15 @@ def broker_config_output_set_json(name, output, key, value): buf = f.read() f.close() conf = json.loads(buf) - output_dict = [elem for i, elem in enumerate(conf["centreonBroker"]["output"]) if elem["name"] == output][0] + output_dict = [elem for i, elem in enumerate( + conf["centreonBroker"]["output"]) if elem["name"] == output][0] j = json.loads(value) output_dict[key] = j f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_output_remove(name, output, key): if name == 'central': filename = "central-broker.json" @@ -621,13 +680,15 @@ def broker_config_output_remove(name, output, key): buf = f.read() f.close() conf = json.loads(buf) - output_dict = [elem for i, elem in enumerate(conf["centreonBroker"]["output"]) if elem["name"] == output][0] + output_dict = [elem for i, elem in enumerate( + conf["centreonBroker"]["output"]) if elem["name"] == output][0] if key in output_dict: - output_dict.pop(key) + output_dict.pop(key) f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_input_set(name, inp, key, value): if name == 'central': filename = "central-broker.json" @@ -639,12 +700,14 @@ def broker_config_input_set(name, inp, key, value): buf = f.read() f.close() conf = json.loads(buf) - input_dict = [elem for i, elem in enumerate(conf["centreonBroker"]["input"]) if elem["name"] == inp][0] + input_dict = [elem for i, elem in enumerate( + conf["centreonBroker"]["input"]) if elem["name"] == inp][0] input_dict[key] = value f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_input_remove(name, inp, key): if name == 'central': filename = "central-broker.json" @@ -656,13 +719,15 @@ def broker_config_input_remove(name, inp, key): buf = f.read() f.close() conf = json.loads(buf) - input_dict = [elem for i, elem in enumerate(conf["centreonBroker"]["input"]) if elem["name"] == inp][0] + input_dict = [elem for i, elem in enumerate( + conf["centreonBroker"]["input"]) if elem["name"] == inp][0] if key in input_dict: - input_dict.pop(key) + input_dict.pop(key) f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_log(name, key, value): if name == 'central': filename = "central-broker.json" @@ -680,6 +745,7 @@ def broker_config_log(name, key, value): f.write(json.dumps(conf, indent=2)) f.close() + def broker_config_flush_log(name, value): if name == 'central': filename = "central-broker.json" @@ -697,63 +763,65 @@ def broker_config_flush_log(name, value): f.write(json.dumps(conf, indent=2)) f.close() + def check_broker_stats_exist(name, key1, key2, timeout=TIMEOUT): - limit = time.time() + timeout - while time.time() < limit: - if name == 'central': - filename = "central-broker-master-stats.json" - elif name == 'module': - filename = "central-module-master-stats.json" - else: - filename = "central-rrd-master-stats.json" - retry = True - while retry: - retry = False - f = open("/var/lib/centreon-broker/{}".format(filename), "r") - buf = f.read() - f.close() - - try: - conf = json.loads(buf) - except: + limit = time.time() + timeout + while time.time() < limit: + if name == 'central': + filename = "central-broker-master-stats.json" + elif name == 'module': + filename = "central-module-master-stats.json" + else: + filename = "central-rrd-master-stats.json" retry = True - if key1 in conf: - if key2 in conf[key1]: - return True - time.sleep(1) - return False + while retry: + retry = False + f = open("/var/lib/centreon-broker/{}".format(filename), "r") + buf = f.read() + f.close() + + try: + conf = json.loads(buf) + except: + retry = True + if key1 in conf: + if key2 in conf[key1]: + return True + time.sleep(1) + return False + def get_broker_stats_size(name, key, timeout=TIMEOUT): - limit = time.time() + timeout - retval = 0 - while time.time() < limit: - if name == 'central': - filename = "central-broker-master-stats.json" - elif name == 'module': - filename = "central-module-master-stats.json" - else: - filename = "central-rrd-master-stats.json" - retry = True - while retry: - retry = False - f = open("/var/lib/centreon-broker/{}".format(filename), "r") - buf = f.read() - f.close() - try: - conf = json.loads(buf) - except: + limit = time.time() + timeout + retval = 0 + while time.time() < limit: + if name == 'central': + filename = "central-broker-master-stats.json" + elif name == 'module': + filename = "central-module-master-stats.json" + else: + filename = "central-rrd-master-stats.json" retry = True - - if key in conf: - value = len(conf[key]) - else: - value = 0 - if value > retval: - retval = value - elif retval != 0: - return retval - time.sleep(5) - return retval + while retry: + retry = False + f = open("/var/lib/centreon-broker/{}".format(filename), "r") + buf = f.read() + f.close() + try: + conf = json.loads(buf) + except: + retry = True + + if key in conf: + value = len(conf[key]) + else: + value = 0 + if value > retval: + retval = value + elif retval != 0: + return retval + time.sleep(5) + return retval ## @@ -763,12 +831,12 @@ def get_broker_stats_size(name, key, timeout=TIMEOUT): # # @return a list of index ids. # -def get_not_existing_indexes(count:int): +def get_not_existing_indexes(count: int): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -801,15 +869,16 @@ def get_not_existing_indexes(count:int): # # @return a list of index ids. # -def get_indexes_to_delete(count:int): - files = [os.path.basename(x) for x in glob.glob("/var/lib/centreon/metrics/[0-9]*.rrd")] +def get_indexes_to_delete(count: int): + files = [os.path.basename(x) for x in glob.glob( + "/var/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -840,15 +909,16 @@ def get_indexes_to_delete(count:int): # # @return a list of metric ids. # -def get_not_existing_metrics(count:int): - files = [os.path.basename(x) for x in glob.glob("/var/lib/centreon/metrics/[0-9]*.rrd")] +def get_not_existing_metrics(count: int): + files = [os.path.basename(x) for x in glob.glob( + "/var/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -877,15 +947,16 @@ def get_not_existing_metrics(count:int): # # @return a list of metric ids. # -def get_metrics_to_delete(count:int): - files = [os.path.basename(x) for x in glob.glob("/var/lib/centreon/metrics/[0-9]*.rrd")] +def get_metrics_to_delete(count: int): + files = [os.path.basename(x) for x in glob.glob( + "/var/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -906,15 +977,17 @@ def get_metrics_to_delete(count:int): # @param count:int The number of metrics to create. # -def create_metrics(count:int): - files = [os.path.basename(x) for x in glob.glob("/var/lib/centreon/metrics/[0-9]*.rrd")] + +def create_metrics(count: int): + files = [os.path.basename(x) for x in glob.glob( + "/var/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -925,7 +998,7 @@ def create_metrics(count:int): cursor.execute(sql) result = cursor.fetchall() ids_db = [r['metric_id'] for r in result] - if list(set(ids) & set(ids_db)) == [] : + if list(set(ids) & set(ids_db)) == []: sql = "DELETE FROM metrics" cursor.execute(sql) connection.commit() @@ -933,21 +1006,24 @@ def create_metrics(count:int): cursor.execute(sql) result = cursor.fetchall() ids_index = [r['id'] for r in result] - if ids_index == [] : + if ids_index == []: sql = "INSERT INTO index_data (host_id, service_id) VALUES ('1', '1')" cursor.execute(sql) ids_index = cursor.lastrowid - for c in range(count) : - sql = "INSERT INTO metrics (index_id,metric_name,unit_name,warn,warn_low,warn_threshold_mode,crit,crit_low,crit_threshold_mode,min,max,current_value,data_source_type) VALUES ('{}','metric_{}','unit_{}','10','1','0','1','1','0','0','100','25','0')".format(ids_index[0],c,c) + for c in range(count): + sql = "INSERT INTO metrics (index_id,metric_name,unit_name,warn,warn_low,warn_threshold_mode,crit,crit_low,crit_threshold_mode,min,max,current_value,data_source_type) VALUES ('{}','metric_{}','unit_{}','10','1','0','1','1','0','0','100','25','0')".format( + ids_index[0], c, c) cursor.execute(sql) ids_metric = cursor.lastrowid connection.commit() - shutil.copy("/var/lib/centreon/metrics/tmpl_15552000_300_0.rrd", "/var/lib/centreon/metrics/{}.rrd".format(ids_metric)) + shutil.copy("/var/lib/centreon/metrics/tmpl_15552000_300_0.rrd", + "/var/lib/centreon/metrics/{}.rrd".format(ids_metric)) logger.console("create metric file {}".format(ids_metric)) def run_reverse_bam(duration, interval): - subp.Popen("broker/map_client.py {:f}".format(interval), shell=True, stdout=subp.PIPE, stdin=subp.PIPE) + subp.Popen("broker/map_client.py {:f}".format(interval), + shell=True, stdout=subp.PIPE, stdin=subp.PIPE) time.sleep(duration) getoutput("kill -9 $(ps aux | grep map_client.py | awk '{print $2}')") @@ -959,14 +1035,15 @@ def run_reverse_bam(duration, interval): # # @return a list of indexes def get_indexes_to_rebuild(count: int): - files = [os.path.basename(x) for x in glob.glob("/var/lib/centreon/metrics/[0-9]*.rrd")] + files = [os.path.basename(x) for x in glob.glob( + "/var/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) retval = [] @@ -978,15 +1055,18 @@ def get_indexes_to_rebuild(count: int): result = cursor.fetchall() for r in result: if int(r['metric_id']) in ids: - logger.console("building data for metric {}".format(r['metric_id'])) + logger.console( + "building data for metric {}".format(r['metric_id'])) start = int(time.time()) - 24 * 60 * 60 * 30 # We go back to 30 days with steps of 5 mn value1 = int(r['metric_id']) value2 = 0 value = value1 - cursor.execute("DELETE FROM data_bin WHERE id_metric={} AND ctime >= {}".format(r['metric_id'], start)) + cursor.execute("DELETE FROM data_bin WHERE id_metric={} AND ctime >= {}".format( + r['metric_id'], start)) for i in range(0, 24 * 60 * 60 * 30, 60 * 5): - cursor.execute("INSERT INTO data_bin (id_metric, ctime, value, status) VALUES ({},{},{},'0')".format(r['metric_id'], start + i, value)) + cursor.execute("INSERT INTO data_bin (id_metric, ctime, value, status) VALUES ({},{},{},'0')".format( + r['metric_id'], start + i, value)) if value == value1: value = value2 else: @@ -1010,16 +1090,17 @@ def get_indexes_to_rebuild(count: int): # @return a list of metric ids. def get_metrics_matching_indexes(indexes): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: # Read a single record - sql = "SELECT `metric_id` FROM `metrics` WHERE `index_id` IN ({})".format(','.join(map(str, indexes))) + sql = "SELECT `metric_id` FROM `metrics` WHERE `index_id` IN ({})".format( + ','.join(map(str, indexes))) cursor.execute(sql) result = cursor.fetchall() retval = [int(r['metric_id']) for r in result] @@ -1029,25 +1110,23 @@ def get_metrics_matching_indexes(indexes): ## # @brief send a gRPC command to remove graphs (by indexes or by metrics) # +# @param port the gRPC port to use to send the command # @param indexes a list of indexes # @param metrics a list of metrics # -#def remove_graphs(indexes, metrics, timeout=10): -# # Connect to the database -# connection = pymysql.connect(host='localhost', -# user='centreon', -# password='centreon', -# database='centreon_storage', -# charset='utf8mb4', -# cursorclass=pymysql.cursors.DictCursor) -# with connection: -# with connection.cursor() as cursor: -# # Read a single record -# sql = "UPDATE metrics SET to_delete='1' WHERE index_id IN ({})".format(','.join(map(str, indexes))) -# cursor.execute(sql) -# result = cursor.fetchall() -# retval = [int(r['metric_id']) for r in result] -# return retval +def remove_graphs(port, indexes, metrics, timeout=10): + limit = time.time() + timeout + while time.time() < limit: + time.sleep(1) + with grpc.insecure_channel("127.0.0.1:{}".format(port)) as channel: + stub = broker_pb2_grpc.BrokerStub(channel) + trm = broker_pb2.ToRemove() + trm.index_ids.extend(indexes) + trm.metric_ids.extend(metrics) + try: + stub.RemoveGraphs(trm) + except: + logger.console("gRPC server not ready") ## @@ -1090,7 +1169,8 @@ def compare_rrd_average_value(metric, value: float): res = float(lst[1].replace(',', '.')) return abs(res - float(value)) < 2 else: - logger.console("It was impossible to get the average value from the file /var/lib/centreon/metrics/{}.rrd from the last 30 days".format(metric)) + logger.console( + "It was impossible to get the average value from the file /var/lib/centreon/metrics/{}.rrd from the last 30 days".format(metric)) return True @@ -1167,34 +1247,33 @@ def add_bam_config_to_broker(name): "cache": "yes", "check_replication": "no", "command_file": "/var/lib/centreon-engine/config0/rw/centengine.cmd", - "db_host": "127.0.0.1", - "db_name": "centreon", - "db_password": "centreon", - "db_port": "3306", + "db_host": DB_HOST, + "db_name": DB_NAME_CONF, + "db_password": DB_PASS, + "db_port": DB_PORT, "db_type": "mysql", - "db_user": "centreon", + "db_user": DB_USER, "queries_per_transaction": "0", - "storage_db_name": "centreon_storage", + "storage_db_name": DB_NAME_STORAGE, "type": "bam" - }) + }) output_dict.append({ - "name": "centreon-bam-reporting", - "filters": { - "category": [ - "bam" - ] - }, - "check_replication": "no", - "db_host": "127.0.0.1", - "db_name": "centreon_storage", - "db_password": "centreon", - "db_port": "3306", - "db_type": "mysql", - "db_user": "centreon", - "queries_per_transaction": "0", - "type": "bam_bi" + "name": "centreon-bam-reporting", + "filters": { + "category": [ + "bam" + ] + }, + "check_replication": "no", + "db_host": DB_HOST, + "db_name": DB_NAME_STORAGE, + "db_password": DB_PASS, + "db_port": DB_PORT, + "db_type": "mysql", + "db_user": DB_USER, + "queries_per_transaction": "0", + "type": "bam_bi" }) f = open("/etc/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() - diff --git a/tests/resources/Common.py b/tests/resources/Common.py index 949c34ce684..44410fdb1b5 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -6,21 +6,34 @@ from dateutil import parser from datetime import datetime import pymysql.cursors +from robot.libraries.BuiltIn import BuiltIn + TIMEOUT = 30 +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") + def check_connection(port: int, pid1: int, pid2: int): limit = time.time() + TIMEOUT - r = re.compile(r"^ESTAB.*127\.0\.0\.1:{}\s".format(port)) + r = re.compile( + r"^ESTAB.*127\.0\.0\.1\]*:{}\s|^ESTAB.*\[::1\]*:{}\s".format(port, port)) + p = re.compile( + r"127\.0\.0\.1\]*:(\d+)\s+.*127\.0\.0\.1\]*:(\d+)\s+.*,pid=(\d+)") + p_v6 = re.compile( + r"::1\]*:(\d+)\s+.*::1\]*:(\d+)\s+.*,pid=(\d+)") while time.time() < limit: out = getoutput("ss -plant") lst = out.split('\n') estab_port = list(filter(r.match, lst)) if len(estab_port) >= 2: ok = [False, False] - p = re.compile( - r"127\.0\.0\.1:(\d+)\s+127\.0\.0\.1:(\d+)\s+.*,pid=(\d+)") for l in estab_port: m = p.search(l) if m is not None: @@ -28,10 +41,15 @@ def check_connection(port: int, pid1: int, pid2: int): ok[0] = True if pid2 == int(m.group(3)): ok[1] = True + m = p_v6.search(l) + if m is not None: + if pid1 == int(m.group(3)): + ok[0] = True + if pid2 == int(m.group(3)): + ok[1] = True if ok[0] and ok[1]: return True time.sleep(1) - return False @@ -126,19 +144,23 @@ def create_certificate(host: str, cert: str): create_key_and_certificate(host, "", cert) +def run_env(): + return getoutput("echo $RUN_ENV | awk '{print $1}'") + + def start_mysql(): - if not getoutput("echo $RUN_ENV | awk '{print $1}'"): + if not run_env(): getoutput("systemctl start mysql") else: getoutput("mariadbd --user=root > /dev/null 2>&1 &") def stop_mysql(): - if not getoutput("echo $RUN_ENV | awk '{print $1}'"): + if not run_env(): getoutput("systemctl stop mysql") else: getoutput( - "kill -9 $(ps aux | grep 'mariadbd --user=root' | grep -v grep | awk '{print $2}')") + "kill -9 $(ps aux | grep 'mariadbd --user=root' | grep -v grep | awk '{print $2}')") def kill_broker(): @@ -183,7 +205,8 @@ def check_engine_logs_are_duplicated(log: str, date): idx = find_line_from(lines, date) count_true = 0 count_false = 0 - logs = [] + logs_old = [] + logs_new = [] old_log = re.compile(r"\[[^\]]*\] \[[^\]]*\] ([^\[].*)") new_log = re.compile( r"\[[^\]]*\] \[[^\]]*\] \[[^\]]*\] \[[^\]]*\] (.*)") @@ -191,25 +214,27 @@ def check_engine_logs_are_duplicated(log: str, date): mo = old_log.match(l) mn = new_log.match(l) if mo is not None: - if mo.group(1) in logs: - logs.remove(mo.group(1)) + if mo.group(1) in logs_new: + logs_new.remove(mo.group(1)) else: - logs.append(mo.group(1)) + logs_old.append(mo.group(1)) else: mn = new_log.match(l) if mn is not None: - if mn.group(1) in logs: - logs.remove(mn.group(1)) + if mn.group(1) in logs_old: + logs_old.remove(mn.group(1)) else: - logs.append(mn.group(1)) - if len(logs) <= 1: + logs_new.append(mn.group(1)) + if len(logs_old) <= 1: # It is possible to miss one log because of the initial split of the # file. return True else: - logger.console("Logs not duplicated") - for l in logs: + logger.console( + "{} old logs are not duplicated".format(len(logs_old))) + for l in logs_old: logger.console(l) + # We don't care about new logs not duplicated, in a future, we won't have any old logs except IOError: logger.console("The file '{}' does not exist".format(log)) return False @@ -223,23 +248,25 @@ def find_line_from(lines, date): start = 0 end = len(lines) - 1 idx = start - while end - start > 1: + while end > start: idx = (start + end) // 2 m = p.match(lines[idx]) while m is None: logger.console("Unable to parse the date ({} <= {} <= {}): <<{}>>".format( start, idx, end, lines[idx])) idx -= 1 - if idx >= 0 : + if idx >= 0: m = p.match(lines[idx]) else: logger.console("We are at the first line and no date found") idx_d = get_date(m.group(1)) - if my_date <= idx_d: + if my_date <= idx_d and end != idx: end = idx - elif my_date > idx_d: + elif my_date > idx_d and start != idx: start = idx + else: + break return idx @@ -269,6 +296,7 @@ def check_reschedule(log: str, date, content: str): logger.console("The file '{}' does not exist".format(log)) return False + def check_reschedule_with_timeout(log: str, date, content: str, timeout: int): limit = time.time() + timeout c = "" @@ -311,11 +339,11 @@ def set_command_status(cmd, status): def check_service_status_with_timeout(hostname: str, service_desc: str, status: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, autocommit=True, - database='centreon_storage', + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -324,7 +352,7 @@ def check_service_status_with_timeout(hostname: str, service_desc: str, status: cursor.execute("SELECT s.state FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description=\"{}\" AND h.name=\"{}\"".format( service_desc, hostname)) result = cursor.fetchall() - if result[0]['state'] and int(result[0]['state']) == status: + if result[0]['state'] is not None and int(result[0]['state']) == int(status): return True time.sleep(5) return False @@ -333,10 +361,10 @@ def check_service_status_with_timeout(hostname: str, service_desc: str, status: def check_severity_with_timeout(name: str, level, icon_id, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -355,10 +383,10 @@ def check_severity_with_timeout(name: str, level, icon_id, timeout: int): def check_tag_with_timeout(name: str, typ, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -377,10 +405,10 @@ def check_tag_with_timeout(name: str, typ, timeout: int): def check_severities_count(value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -398,10 +426,10 @@ def check_severities_count(value: int, timeout: int): def check_tags_count(value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -419,10 +447,10 @@ def check_tags_count(value: int, timeout: int): def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: @@ -430,7 +458,7 @@ def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): cursor.execute( "SELECT current_status FROM mod_bam WHERE name='{}'".format(ba_name)) result = cursor.fetchall() - if result[0]['current_status'] and int(result[0]['current_status']) == status: + if result[0]['current_status'] is not None and int(result[0]['current_status']) == status: return True time.sleep(5) return False @@ -439,10 +467,10 @@ def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): def check_service_downtime_with_timeout(hostname: str, service_desc: str, enabled, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -459,16 +487,16 @@ def check_service_downtime_with_timeout(hostname: str, service_desc: str, enable def delete_service_downtime(hst: str, svc: str): now = int(time.time()) - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("select d.internal_id from downtimes d inner join hosts h on d.host_id=h.host_id inner join services s on d.service_id=s.service_id where d.cancelled='0' and s.scheduled_downtime_depth='1' and s.description='{}' and h.name='{}'".format(svc, hst)) + cursor.execute("select d.internal_id from downtimes d inner join hosts h on d.host_id=h.host_id inner join services s on d.service_id=s.service_id where d.cancelled='0' and s.scheduled_downtime_depth<>'0' and s.description='{}' and h.name='{}' LIMIT 1".format(svc, hst)) result = cursor.fetchall() did = int(result[0]['internal_id']) @@ -479,10 +507,10 @@ def delete_service_downtime(hst: str, svc: str): def number_of_downtimes_is(nb: int): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -495,10 +523,10 @@ def number_of_downtimes_is(nb: int): def clear_db(table: str): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -511,10 +539,10 @@ def clear_db(table: str): def check_service_severity_with_timeout(host_id: int, service_id: int, severity_id, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -532,20 +560,22 @@ def check_service_severity_with_timeout(host_id: int, service_id: int, severity_ time.sleep(1) return False + def check_host_severity_with_timeout(host_id: int, severity_id, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', autocommit=True, cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("select sv.id from resources r left join severities sv ON r.severity_id=sv.severity_id where r.parent_id = 0 and r.id={}".format(host_id)) + cursor.execute( + "select sv.id from resources r left join severities sv ON r.severity_id=sv.severity_id where r.parent_id = 0 and r.id={}".format(host_id)) result = cursor.fetchall() if len(result) > 0: if severity_id == 'None': @@ -556,7 +586,8 @@ def check_host_severity_with_timeout(host_id: int, severity_id, timeout: int): time.sleep(1) return False -def check_resources_tags_with_timeout(parent_id: int, mid: int, typ: str, tag_ids: list, timeout: int): + +def check_resources_tags_with_timeout(parent_id: int, mid: int, typ: str, tag_ids: list, timeout: int, enabled: bool = True): if typ == 'servicegroup': t = 0 elif typ == 'hostgroup': @@ -567,42 +598,63 @@ def check_resources_tags_with_timeout(parent_id: int, mid: int, typ: str, tag_id t = 3 limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("select t.id from resources r inner join resources_tags rt on r.resource_id=rt.resource_id inner join tags t on rt.tag_id=t.tag_id WHERE r.id={} and r.parent_id={} and t.type={}".format(mid, parent_id, t)) + logger.console("select t.id from resources r inner join resources_tags rt on r.resource_id=rt.resource_id inner join tags t on rt.tag_id=t.tag_id WHERE r.id={} and r.parent_id={} and t.type={}".format( + mid, parent_id, t)) + cursor.execute("select t.id from resources r inner join resources_tags rt on r.resource_id=rt.resource_id inner join tags t on rt.tag_id=t.tag_id WHERE r.id={} and r.parent_id={} and t.type={}".format( + mid, parent_id, t)) result = cursor.fetchall() - if len(result) > 0: + logger.console(result) + if not enabled: + if len(result) == 0: + return True + else: + for r in result: + if r['id'] in tag_ids: + logger.console( + "id {} is in tag ids".format(r['id'])) + break + return True + elif enabled and len(result) > 0: if len(result) == len(tag_ids): for r in result: if r['id'] not in tag_ids: - logger.console("id {} is not in tag ids".format(r['id'])) + logger.console( + "id {} is not in tag ids".format(r['id'])) break - return True + return True else: - logger.console("different sizes: result:{} and tag_ids:{}".format(len(result), len(tag_ids))) + logger.console("different sizes: result:{} and tag_ids:{}".format( + len(result), len(tag_ids))) + else: + logger.console("result") + logger.console(result) time.sleep(1) return False + def check_host_tags_with_timeout(host_id: int, tag_id: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("SELECT t.id FROM resources_tags rt, tags t WHERE rt.tag_id = t.tag_id and resource_id={} and t.id={}".format(host_id, tag_id)) + cursor.execute( + "SELECT t.id FROM resources_tags rt, tags t WHERE rt.tag_id = t.tag_id and resource_id={} and t.id={}".format(host_id, tag_id)) result = cursor.fetchall() if len(result) > 0: if int(result[0]['id']) == tag_id: @@ -610,62 +662,67 @@ def check_host_tags_with_timeout(host_id: int, tag_id: int, timeout: int): time.sleep(1) return False + def check_number_of_resources_monitored_by_poller_is(poller: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("select count(*) from hosts h where instance_id={} and enabled=1".format(poller)) - hresult = cursor.fetchall() - cursor.execute("select count(*) from hosts h left join services s on s.host_id=h.host_id where h.instance_id={} and s.enabled=1".format(poller)) - sresult = cursor.fetchall() - if len(hresult) > 0 and len(sresult) > 0: - if int(hresult[0]['count(*)']) + int(sresult[0]['count(*)']) == value: + cursor.execute( + "SELECT count(*) FROM resources WHERE poller_id={} AND enabled=1".format(poller)) + result = cursor.fetchall() + if len(result) > 0: + if int(result[0]['count(*)']) == value: return True time.sleep(1) return False + def check_number_of_downtimes(expected: int, start, timeout: int): limit = time.time() + timeout d = parser.parse(start).timestamp() while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("SELECT count(*) FROM downtimes WHERE start_time >= {} AND deletion_time IS NULL".format(d)) + cursor.execute( + "SELECT count(*) FROM downtimes WHERE start_time >= {} AND deletion_time IS NULL".format(d)) result = cursor.fetchall() if len(result) > 0: - logger.console("{}/{} active downtimes".format(result[0]['count(*)'], expected)) + logger.console( + "{}/{} active downtimes".format(result[0]['count(*)'], expected)) if int(result[0]['count(*)']) == expected: return True time.sleep(1) return False + def check_number_of_relations_between_hostgroup_and_hosts(hostgroup: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("SELECT count(*) FROM hosts_hostgroups WHERE hostgroup_id={}".format(hostgroup)) + cursor.execute( + "SELECT count(*) FROM hosts_hostgroups WHERE hostgroup_id={}".format(hostgroup)) result = cursor.fetchall() if len(result) > 0: if int(result[0]['count(*)']) == value: @@ -673,19 +730,21 @@ def check_number_of_relations_between_hostgroup_and_hosts(hostgroup: int, value: time.sleep(1) return False + def check_number_of_relations_between_servicegroup_and_services(servicegroup: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("SELECT count(*) FROM services_servicegroups WHERE servicegroup_id={}".format(servicegroup)) + cursor.execute( + "SELECT count(*) FROM services_servicegroups WHERE servicegroup_id={}".format(servicegroup)) result = cursor.fetchall() if len(result) > 0: if int(result[0]['count(*)']) == value: @@ -693,26 +752,29 @@ def check_number_of_relations_between_servicegroup_and_services(servicegroup: in time.sleep(1) return False + def check_host_status(host: str, value: int, t: int, in_resources: bool, timeout: int = TIMEOUT): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', - charset='utf8mb4', - cursorclass=pymysql.cursors.DictCursor) + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: key = '' confirmed = '' if in_resources: - cursor.execute("SELECT status, status_confirmed FROM resources WHERE parent_id=0 AND name='{}'".format(host)) + cursor.execute( + "SELECT status, status_confirmed FROM resources WHERE parent_id=0 AND name='{}'".format(host)) key = 'status' confirmed = 'status_confirmed' else: - cursor.execute("SELECT state, state_type FROM hosts WHERE name='{}'".format(host)) + cursor.execute( + "SELECT state, state_type FROM hosts WHERE name='{}'".format(host)) key = 'state' confirmed = 'state_type' result = cursor.fetchall() @@ -720,6 +782,7 @@ def check_host_status(host: str, value: int, t: int, in_resources: bool, timeout if int(result[0][key]) == value and int(result[0][confirmed]) == t: return True else: - logger.console("Host '{}' has status '{}' with confirmed '{}'".format(host, result[0][key], result[0][confirmed])) + logger.console("Host '{}' has status '{}' with confirmed '{}'".format( + host, result[0][key], result[0][confirmed])) time.sleep(1) return False diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 977d885ce5b..cab79b08ba2 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -1,4 +1,4 @@ -from os import makedirs,chmod +from os import makedirs, chmod from os.path import exists, dirname from robot.api import logger import db_conf @@ -9,14 +9,18 @@ import re import stat +import grpc +import engine_pb2 +import engine_pb2_grpc + + CONF_DIR = "/etc/centreon-engine" ENGINE_HOME = "/var/lib/centreon-engine" -ENGINE_LOG = "/var/log/centreon-engine" SCRIPT_DIR: str = dirname(__file__) + "/engine-scripts/" class EngineInstance: - def __init__(self, count: int, hosts: int=50, srv_by_host: int=20): + def __init__(self, count: int, hosts: int = 50, srv_by_host: int = 20): self.last_service_id = 0 self.hosts = [] self.services = [] @@ -83,7 +87,7 @@ def create_centengine(self, id: int, debug_level=0): "log_pid=1\n" "macros_filter=KEY80,KEY81,KEY82,KEY83,KEY84\n" "enable_macros_filter=0\n" - "grpc_port=50001\n" + "rpc_port=50001\n" "postpone_notification_to_timeperiod=0\n" "instance_heartbeat_interval=30\n" "enable_notifications=1\n" @@ -99,14 +103,16 @@ def create_centengine(self, id: int, debug_level=0): "log_host_retries=1\n" "log_event_handlers=1\n" "log_external_commands=1\n" + "log_v2_enabled=1\n" + "log_legacy_enabled=0\n" "log_v2_logger=file\n" - "log_level_functions=info\n" + "log_level_functions=trace\n" "log_level_config=info\n" "log_level_events=info\n" "log_level_checks=info\n" "log_level_notifications=info\n" "log_level_eventbroker=info\n" - "log_level_external_command=info\n" + "log_level_external_command=trace\n" "log_level_commands=info\n" "log_level_downtimes=info\n" "log_level_comments=info\n" @@ -133,7 +139,7 @@ def create_host(self): d = q % 255 retval = { - "config": "define host {{\n" "host_name host_{0}\n alias " + "config": "define host {{\n" " host_name host_{0}\n alias " "host_{0}\n address {1}.{2}.{3}.{4}\n check_command " " checkh{0}\n check_period 24x7\n register 1\n " "_KEY{0} VAL{0}\n _SNMPCOMMUNITY public\n " @@ -142,7 +148,7 @@ def create_host(self): "hid": hid} return retval - def create_service(self, host_id: int, cmd_ids:int): + def create_service(self, host_id: int, cmd_ids: int): self.last_service_id += 1 service_id = self.last_service_id command_id = random.randint(cmd_ids[0], cmd_ids[1]) @@ -295,7 +301,7 @@ def create_service_group(id, mbs): return retval @staticmethod - def create_severities(poller:int, nb:int, offset: int): + def create_severities(poller: int, nb: int, offset: int): config_file = "{}/config{}/severities.cfg".format(CONF_DIR, poller) ff = open(config_file, "w+") content = "" @@ -315,7 +321,8 @@ def create_severities(poller:int, nb:int, offset: int): @staticmethod def create_template_file(poller: int, typ: str, what: str, ids): - config_file = "{}/config{}/{}Templates.cfg".format(CONF_DIR, poller, typ) + config_file = "{}/config{}/{}Templates.cfg".format( + CONF_DIR, poller, typ) ff = open(config_file, "w+") content = "" idx = 1 @@ -327,13 +334,13 @@ def create_template_file(poller: int, typ: str, what: str, ids): active_checks_enabled 1 passive_checks_enabled 1 }} -""".format(typ,typ,idx,what, i) +""".format(typ, typ, idx, what, i) idx += 1 ff.write(content) ff.close() @staticmethod - def create_tags(poller:int, nb:int, offset: int): + def create_tags(poller: int, nb: int, offset: int): tt = ["servicegroup", "hostgroup", "servicecategory", "hostcategory"] config_file = "{}/config{}/tags.cfg".format(CONF_DIR, poller) @@ -368,8 +375,7 @@ def build_configs(self, hosts: int, services_by_host: int, debug_level=0): else: nb_hosts = hosts hosts = 0 - if not exists("{}/config{}".format(ENGINE_LOG, inst)): - makedirs("{}/config{}".format(ENGINE_LOG, inst)) + config_dir = "{}/config{}".format(CONF_DIR, inst) makedirs(config_dir) f = open(config_dir + "/centengine.cfg", "w+") @@ -457,7 +463,8 @@ def build_configs(self, hosts: int, services_by_host: int, debug_level=0): for file in ["check.pl", "notif.pl"]: shutil.copyfile("{0}/{1}".format(SCRIPT_DIR, file), "{0}/{1}".format(ENGINE_HOME, file)) - chmod("{0}/{1}".format(ENGINE_HOME, file), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) + chmod("{0}/{1}".format(ENGINE_HOME, file), + stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) if not exists(ENGINE_HOME + "/config{}/rw".format(inst)): makedirs(ENGINE_HOME + "/config{}/rw".format(inst)) @@ -571,7 +578,8 @@ def add_host_group(index: int, id_host_group: int, members: list): f.write(engine.create_host_group(id_host_group, mbs)) f.close() -def rename_host_group(index: int, id_host_group: int, name:str, members: list): + +def rename_host_group(index: int, id_host_group: int, name: str, members: list): mbs = [l for l in members if l in engine.hosts] f = open("/etc/centreon-engine/config{}/hostgroups.cfg".format(index), "w") logger.console(mbs) @@ -584,13 +592,15 @@ def rename_host_group(index: int, id_host_group: int, name:str, members: list): """.format(id_host_group, name, ",".join(mbs))) f.close() + def add_service_group(index: int, id_service_group: int, members: list): f = open("/etc/centreon-engine/config{}/servicegroups.cfg".format(index), "a+") logger.console(members) f.write(engine.create_service_group(id_service_group, members)) f.close() -def create_service(index:int, host_id:int, cmd_id:int): + +def create_service(index: int, host_id: int, cmd_id: int): f = open("/etc/centreon-engine/config{}/services.cfg".format(index), "a+") svc = engine.create_service(host_id, [1, cmd_id]) lst = svc.split('\n') @@ -599,12 +609,14 @@ def create_service(index:int, host_id:int, cmd_id:int): if m is not None: retval = int(m.group(1)) else: - raise Exception("Impossible to get the service id from '{}'".format(good)) + raise Exception( + "Impossible to get the service id from '{}'".format(good)) m = 0 f.write(svc) f.close() return retval + def engine_log_duplicate(result: list): dup = True for i in result: @@ -624,9 +636,9 @@ def add_bam_config_to_engine(): dbconf.init_bam() -def create_ba_with_services(name: str, typ: str, svc: list): +def create_ba_with_services(name: str, typ: str, svc: list, dt_policy="inherit"): global dbconf - dbconf.create_ba_with_services(name, typ, svc) + dbconf.create_ba_with_services(name, typ, svc, dt_policy) def get_command_id(service: int): @@ -636,61 +648,104 @@ def get_command_id(service: int): return dbconf.command[cmd_name] -def process_service_check_result(hst: str, svc: str, state: int, output: str): +def process_service_check_result(hst: str, svc: str, state: int, output: str, config='config0'): now = int(time.time()) cmd = "[{}] PROCESS_SERVICE_CHECK_RESULT;{};{};{};{}\n".format( now, hst, svc, state, output) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(f"/var/lib/centreon-engine/{config}/rw/centengine.cmd", "w") f.write(cmd) f.close() -def change_normal_svc_check_interval(hst: str, svc: str, check_interval: int): - now = int(time.time()) - cmd = "[{}] CHANGE_NORMAL_SVC_CHECK_INTERVAL;{};{};{}\n".format( - now, hst, svc, check_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() -def change_normal_host_check_interval(hst: str, check_interval: int): - now = int(time.time()) - cmd = "[{}] CHANGE_NORMAL_HOST_CHECK_INTERVAL;{};{}\n".format( - now, hst, check_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() +def change_normal_svc_check_interval(use_grpc: int, hst: str, svc: str, check_interval: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeServiceObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, service_desc=svc, mode=engine_pb2.ChangeObjectInt.Mode.NORMAL_CHECK_INTERVAL, dval=check_interval)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_NORMAL_SVC_CHECK_INTERVAL;{};{};{}\n".format( + now, hst, svc, check_interval) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() -def change_retry_svc_check_interval(hst: str, svc: str, retry_interval: int): - now = int(time.time()) - cmd = "[{}] CHANGE_RETRY_SVC_CHECK_INTERVAL;{};{};{}\n".format( - now, hst, svc, retry_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() -def change_retry_host_check_interval(hst: str, retry_interval: int): - now = int(time.time()) - cmd = "[{}] CHANGE_RETRY_HOST_CHECK_INTERVAL;{};{}\n".format( - now, hst, retry_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() +def change_normal_host_check_interval(use_grpc: int, hst: str, check_interval: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeHostObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, mode=engine_pb2.ChangeObjectInt.Mode.NORMAL_CHECK_INTERVAL, dval=check_interval)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_NORMAL_HOST_CHECK_INTERVAL;{};{}\n".format( + now, hst, check_interval) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() -def change_max_svc_check_attempts(hst: str, svc: str, max_check_attempts: int): - now = int(time.time()) - cmd = "[{}] CHANGE_MAX_SVC_CHECK_ATTEMPTS;{};{};{}\n".format( - now, hst, svc, max_check_attempts) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() -def change_max_host_check_attempts(hst: str, max_check_attempts: int): - now = int(time.time()) - cmd = "[{}] CHANGE_MAX_HOST_CHECK_ATTEMPTS;{};{}\n".format( - now, hst, max_check_attempts) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() +def change_retry_svc_check_interval(use_grpc: int, hst: str, svc: str, retry_interval: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeServiceObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, service_desc=svc, mode=engine_pb2.ChangeObjectInt.Mode.RETRY_CHECK_INTERVAL, dval=retry_interval)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_RETRY_SVC_CHECK_INTERVAL;{};{};{}\n".format( + now, hst, svc, retry_interval) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_retry_host_check_interval(use_grpc: int, hst: str, retry_interval: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeHostObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, mode=engine_pb2.ChangeObjectInt.Mode.RETRY_CHECK_INTERVAL, dval=retry_interval)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_RETRY_HOST_CHECK_INTERVAL;{};{}\n".format( + now, hst, retry_interval) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_max_svc_check_attempts(use_grpc: int, hst: str, svc: str, max_check_attempts: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeServiceObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, service_desc=svc, mode=engine_pb2.ChangeObjectInt.Mode.MAX_ATTEMPTS, intval=max_check_attempts)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_MAX_SVC_CHECK_ATTEMPTS;{};{};{}\n".format( + now, hst, svc, max_check_attempts) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_max_host_check_attempts(use_grpc: int, hst: str, max_check_attempts: int): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeHostObjectIntVar(engine_pb2.ChangeObjectInt( + host_name=hst, mode=engine_pb2.ChangeObjectInt.Mode.MAX_ATTEMPTS, intval=max_check_attempts)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_MAX_HOST_CHECK_ATTEMPTS;{};{}\n".format( + now, hst, max_check_attempts) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + def change_host_check_command(hst: str, Check_Command: str): now = int(time.time()) @@ -700,21 +755,306 @@ def change_host_check_command(hst: str, Check_Command: str): f.write(cmd) f.close() -def change_host_check_timeperiod(hst: str, check_timeperiod: str): - now = int(time.time()) - cmd = "[{}] CHANGE_HOST_CHECK_TIMEPERIOD;{};{}\n".format( - now, hst, check_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() -def change_host_notification_timeperiod(hst: str, notification_timeperiod: str): - now = int(time.time()) - cmd = "[{}] CHANGE_HOST_NOTIFICATION_TIMEPERIOD;{};{}\n".format( - now, hst, notification_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() +def change_host_check_timeperiod(use_grpc: int, hst: str, check_timeperiod: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeHostObjectCharVar(engine_pb2.ChangeObjectChar( + host_name=hst, mode=engine_pb2.ChangeObjectChar.Mode.CHANGE_CHECK_TIMEPERIOD, charval=check_timeperiod)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_HOST_CHECK_TIMEPERIOD;{};{}\n".format( + now, hst, check_timeperiod) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_host_notification_timeperiod(use_grpc: int, hst: str, notification_timeperiod: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeHostObjectCharVar(engine_pb2.ChangeObjectChar( + host_name=hst, mode=engine_pb2.ChangeObjectChar.Mode.CHANGE_NOTIFICATION_TIMEPERIOD, charval=notification_timeperiod)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_HOST_NOTIFICATION_TIMEPERIOD;{};{}\n".format( + now, hst, notification_timeperiod) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_svc_check_timeperiod(use_grpc: int, hst: str, svc: str, check_timeperiod: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeServiceObjectCharVar(engine_pb2.ChangeObjectChar( + host_name=hst, service_desc=svc, mode=engine_pb2.ChangeObjectChar.Mode.CHANGE_CHECK_TIMEPERIOD, charval=check_timeperiod)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_SVC_CHECK_TIMEPERIOD;{};{};{}\n".format( + now, hst, svc, check_timeperiod) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def change_svc_notification_timeperiod(use_grpc: int, hst: str, svc: str, notification_timeperiod: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.ChangeServiceObjectCharVar(engine_pb2.ChangeObjectChar( + host_name=hst, service_desc=svc, mode=engine_pb2.ChangeObjectChar.Mode.CHANGE_NOTIFICATION_TIMEPERIOD, charval=notification_timeperiod)) + else: + now = int(time.time()) + cmd = "[{}] CHANGE_SVC_NOTIFICATION_TIMEPERIOD;{};{};{}\n".format( + now, hst, svc, notification_timeperiod) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_and_child_notifications(use_grpc: int, hst: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.DisableHostAndChildNotifications( + engine_pb2.HostIdentifier(name=hst)) + else: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_AND_CHILD_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_and_child_notifications(use_grpc: int, hst: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.EnableHostAndChildNotifications( + engine_pb2.HostIdentifier(name=hst)) + else: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_AND_CHILD_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_check(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_CHECK;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_check(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_CHECK;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_event_handler(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_EVENT_HANDLER;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_event_handler(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_EVENT_HANDLER;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_flap_detection(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_FLAP_DETECTION;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_flap_detection(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_FLAP_DETECTION;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_notifications(use_grpc: int, hst: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.DisableHostNotifications( + engine_pb2.HostIdentifier(name=hst)) + else: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_notifications(use_grpc: int, hst: str): + if use_grpc > 0: + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + stub.EnableHostNotifications( + engine_pb2.HostIdentifier(name=hst)) + else: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_svc_checks(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_SVC_CHECKS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_svc_checks(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_SVC_CHECKS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_host_svc_notifications(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_HOST_SVC_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_host_svc_notifications(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_HOST_SVC_NOTIFICATIONS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_passive_host_checks(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_PASSIVE_HOST_CHECKS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_passive_host_checks(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_PASSIVE_HOST_CHECKS;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def disable_passive_svc_checks(use_grpc: int, hst: str, svc: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] DISABLE_PASSIVE_SVC_CHECKS;{};{}\n".format( + now, hst, svc) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def enable_passive_svc_checks(use_grpc: int, hst: str, svc: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] ENABLE_PASSIVE_SVC_CHECKS;{};{}\n".format( + now, hst, svc) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def start_obsessing_over_host(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] START_OBSESSING_OVER_HOST;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def stop_obsessing_over_host(use_grpc: int, hst: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] STOP_OBSESSING_OVER_HOST;{}\n".format( + now, hst) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def start_obsessing_over_svc(use_grpc: int, hst: str, svc: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] START_OBSESSING_OVER_SVC;{};{}\n".format( + now, hst, svc) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def stop_obsessing_over_svc(use_grpc: int, hst: str, svc: str): + if use_grpc == 0: + now = int(time.time()) + cmd = "[{}] STOP_OBSESSING_OVER_SVC;{};{}\n".format( + now, hst, svc) + f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + def service_ext_commands(hst: str, svc: str, state: int, output: str): now = int(time.time()) @@ -724,6 +1064,7 @@ def service_ext_commands(hst: str, svc: str, state: int, output: str): f.write(cmd) f.close() + def process_host_check_result(hst: str, state: int, output: str): now = int(time.time()) cmd = "[{}] PROCESS_HOST_CHECK_RESULT;{};{};{}\n".format( @@ -759,6 +1100,7 @@ def delete_host_downtimes(poller: int, hst: str): f.write(cmd) f.close() + def schedule_forced_svc_check(host: str, svc: str, pipe: str = "/var/lib/centreon-engine/rw/centengine.cmd"): now = int(time.time()) f = open(pipe, "w") @@ -777,33 +1119,38 @@ def schedule_forced_host_check(host: str, pipe: str = "/var/lib/centreon-engine/ time.sleep(0.05) -def create_severities_file(poller: int, nb:int, offset:int = 1): +def create_severities_file(poller: int, nb: int, offset: int = 1): engine.create_severities(poller, nb, offset) -def create_template_file(poller: int, typ: str, what: str, ids:list): + +def create_template_file(poller: int, typ: str, what: str, ids: list): engine.create_template_file(poller, typ, what, ids) -def create_template_file(poller: int, typ: str, what: str, ids:list): + +def create_template_file(poller: int, typ: str, what: str, ids: list): engine.create_template_file(poller, typ, what, ids) -def create_tags_file(poller: int, nb:int, offset:int = 1): + +def create_tags_file(poller: int, nb: int, offset: int = 1): engine.create_tags(poller, nb, offset) -def config_engine_add_cfg_file(poller:int, cfg:str): + +def config_engine_add_cfg_file(poller: int, cfg: str): ff = open("{}/config{}/centengine.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() r = re.compile(r"^\s*cfg_file=") for i in range(len(lines)): if r.match(lines[i]): - lines.insert(i, "cfg_file={}/config{}/{}\n".format(CONF_DIR, poller, cfg)) + lines.insert( + i, "cfg_file={}/config{}/{}\n".format(CONF_DIR, poller, cfg)) break ff = open("{}/config{}/centengine.cfg".format(CONF_DIR, poller), "w+") ff.writelines(lines) ff.close() -def add_severity_to_services(poller:int, severity_id:int, svc_lst): +def add_severity_to_services(poller: int, severity_id: int, svc_lst): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -819,7 +1166,7 @@ def add_severity_to_services(poller:int, severity_id:int, svc_lst): ff.close() -def add_severity_to_hosts(poller:int, severity_id:int, svc_lst): +def add_severity_to_hosts(poller: int, severity_id: int, svc_lst): ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -835,7 +1182,7 @@ def add_severity_to_hosts(poller:int, severity_id:int, svc_lst): ff.close() -def add_template_to_services(poller:int, tmpl:str, svc_lst): +def add_template_to_services(poller: int, tmpl: str, svc_lst): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -843,13 +1190,15 @@ def add_template_to_services(poller:int, tmpl:str, svc_lst): for i in range(len(lines)): m = r.match(lines[i]) if m is not None and m.group(1) in svc_lst: - lines.insert(i + 1, " use {}\n".format(tmpl)) + lines.insert( + i + 1, " use {}\n".format(tmpl)) ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def add_tags_to_services(poller:int, type:str, tag_id:str, svc_lst): + +def add_tags_to_services(poller: int, type: str, tag_id: str, svc_lst): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -857,12 +1206,14 @@ def add_tags_to_services(poller:int, type:str, tag_id:str, svc_lst): for i in range(len(lines)): m = r.match(lines[i]) if m is not None and m.group(1) in svc_lst: - lines.insert(i + 1, " {} {}\n".format(type, tag_id)) + lines.insert( + i + 1, " {} {}\n".format(type, tag_id)) ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def remove_severities_from_services(poller:int): + +def remove_severities_from_services(poller: int): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -872,7 +1223,8 @@ def remove_severities_from_services(poller:int): ff.writelines(out) ff.close() -def remove_severities_from_hosts(poller:int): + +def remove_severities_from_hosts(poller: int): ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -915,7 +1267,8 @@ def check_search(debug_file_path: str, str_to_search): return "connector::run not found" return "check_search don t find " + str_to_search -def add_tags_to_hosts(poller:int, type:str, tag_id:str, hst_lst): + +def add_tags_to_hosts(poller: int, type: str, tag_id: str, hst_lst): ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -923,33 +1276,37 @@ def add_tags_to_hosts(poller:int, type:str, tag_id:str, hst_lst): for i in range(len(lines)): m = r.match(lines[i]) if m is not None and m.group(1) in hst_lst: - lines.insert(i + 1, " {} {}\n".format(type, tag_id)) + lines.insert( + i + 1, " {} {}\n".format(type, tag_id)) ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def remove_tags_from_services(poller:int, type:str): + +def remove_tags_from_services(poller: int, type: str): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() - r = re.compile("r\"^\s*{}\s*\d+$\"".format(type)) + r = re.compile(r"^\s*" + type + r"\s*[0-9,]+$") lines = [l for l in lines if not r.match(l)] ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def remove_tags_from_hosts(poller:int, type:str): + +def remove_tags_from_hosts(poller: int, type: str): ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() - r = re.compile("r\"^\s*{}\s*\d+$\"".format(type)) + r = re.compile(r"^\s*" + type + r"\s*[0-9,]+$") lines = [l for l in lines if not r.match(l)] ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def add_template_to_services(poller:int, tmpl:str, svc_lst): + +def add_template_to_services(poller: int, tmpl: str, svc_lst): ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -957,13 +1314,15 @@ def add_template_to_services(poller:int, tmpl:str, svc_lst): for i in range(len(lines)): m = r.match(lines[i]) if m is not None and m.group(1) in svc_lst: - lines.insert(i + 1, " use {}\n".format(tmpl)) + lines.insert( + i + 1, " use {}\n".format(tmpl)) ff = open("{}/config{}/services.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def add_template_to_hosts(poller:int, tmpl:str, hst_lst): + +def add_template_to_hosts(poller: int, tmpl: str, hst_lst): ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() @@ -971,17 +1330,20 @@ def add_template_to_hosts(poller:int, tmpl:str, hst_lst): for i in range(len(lines)): m = r.match(lines[i]) if m is not None and m.group(1) in hst_lst: - lines.insert(i + 1, " use {}\n".format(tmpl)) + lines.insert( + i + 1, " use {}\n".format(tmpl)) ff = open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "w") ff.writelines(lines) ff.close() -def config_engine_remove_cfg_file(poller:int, fic:str): + +def config_engine_remove_cfg_file(poller: int, fic: str): ff = open("{}/config{}/centengine.cfg".format(CONF_DIR, poller), "r") lines = ff.readlines() ff.close() - r = re.compile(r"^\s*cfg_file=/etc/centreon-engine/config{}/{}".format(poller, fic)) + r = re.compile( + r"^\s*cfg_file=/etc/centreon-engine/config{}/{}".format(poller, fic)) linesearch = [l for l in lines if not r.match(l)] ff = open("{}/config{}/centengine.cfg".format(CONF_DIR, poller), "w") ff.writelines(linesearch) diff --git a/tests/resources/db_conf.py b/tests/resources/db_conf.py index 742764d19ed..11f02e8878d 100755 --- a/tests/resources/db_conf.py +++ b/tests/resources/db_conf.py @@ -1,11 +1,21 @@ #!/usr/bin/python3 from robot.api import logger -import sys import pymysql.cursors +from robot.libraries.BuiltIn import BuiltIn + CONF_DIR = "/etc/centreon-engine" ENGINE_HOME = "/var/lib/centreon-engine" +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") + + class DbConf: def __init__(self, engine): self.last_service_id = 0 @@ -26,10 +36,10 @@ def __init__(self, engine): def clear_db(self): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -56,10 +66,10 @@ def clear_db(self): connection.commit() def init_bam(self): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -73,10 +83,10 @@ def init_bam(self): self.engine.centengine_conf_add_bam() def create_conf_db(self): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -160,11 +170,11 @@ def create_conf_db(self): hid += 1 connection.commit() - def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)]): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)], dt_policy): + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -174,7 +184,14 @@ def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)]): elif typ == 'worst': t = 2 with connection.cursor() as cursor: - cursor.execute("INSERT INTO mod_bam (name, state_source, activate,id_reporting_period,level_w,level_c,id_notification_period,notifications_enabled,event_handler_enabled, inherit_kpi_downtimes) VALUES ('{}',{},'1',1, 80, 70, 1,'0', '0','1')".format(name, t)) + if dt_policy == "inherit": + inherit_dt = 1 + elif dt_policy == "ignore": + inherit_dt = 2 + else: + inherit_dt = 0 + + cursor.execute("INSERT INTO mod_bam (name, state_source, activate,id_reporting_period,level_w,level_c,id_notification_period,notifications_enabled,event_handler_enabled, inherit_kpi_downtimes) VALUES ('{}',{},'1',1, 80, 70, 1,'0', '0','{}')".format(name, t, inherit_dt)) id_ba = cursor.lastrowid sid = self.engine.create_bam_service("ba_{}".format(id_ba), name, "_Module_BAM_1", "centreon-bam-check!{}".format(id_ba)) cursor.execute("INSERT INTO service (service_id, service_description, display_name, service_active_checks_enabled, service_passive_checks_enabled,service_register) VALUES ({0}, \"ba_{1}\",\"{2}\",'2','2','2')".format(sid, id_ba, name)) diff --git a/tests/resources/db_variables.robot b/tests/resources/db_variables.robot new file mode 100644 index 00000000000..6aa17e4f969 --- /dev/null +++ b/tests/resources/db_variables.robot @@ -0,0 +1,9 @@ +*** Variables *** +${DBName} centreon_storage +${DBNameConf} centreon +${DBHost} localhost +${DBUser} centreon +${DBPass} Fa@V@?L9@4Wfh70u +${DBPort} 3306 +${DBUserRoot} root +${DBPassRoot} Af58MwnP@zJ*@6!5 \ No newline at end of file diff --git a/tests/resources/resources.robot b/tests/resources/resources.robot index 514dd532e37..4aa30b14cca 100644 --- a/tests/resources/resources.robot +++ b/tests/resources/resources.robot @@ -1,4 +1,5 @@ *** Settings *** +Resource ./db_variables.robot Library Process Library OperatingSystem Library Common.py @@ -9,6 +10,10 @@ Clean Before Suite Clear Engine Logs Clear Broker Logs +Clean Grpc Before Suite + set grpc port 0 + Clean Before Suite + Clean After Suite # Remove Files ${ENGINE_LOG}${/}centengine.log ${ENGINE_LOG}${/}centengine.debug # Remove Files ${BROKER_LOG}${/}central-broker-master.log ${BROKER_LOG}${/}central-rrd-master.log ${BROKER_LOG}${/}central-module-master.log @@ -26,6 +31,9 @@ Clear Broker Logs Start Broker Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 Start Process /usr/sbin/cbd /etc/centreon-broker/central-rrd.json alias=b2 +# ${log_pid1}= Get Process Id b1 +# ${log_pid2}= Get Process Id b2 +# Log To Console \npidcentral=${log_pid1} pidrrd=${log_pid2}\n Reload Broker Send Signal To Process SIGHUP b1 @@ -60,6 +68,8 @@ Start Engine Create Directory ${log} Create Directory ${lib} Start Process /usr/sbin/centengine ${conf} alias=${alias} +# ${log_pid1}= Get Process Id ${alias} +# Log To Console \npidengine${idx}=${log_pid1}\n END Start Custom Engine @@ -122,10 +132,4 @@ ${logEngine1} ${ENGINE_LOG}/config1/centengine.log ${logEngine2} ${ENGINE_LOG}/config2/centengine.log ${dbgEngine0} ${ENGINE_LOG}/config0/centengine.debug ${dbgEngine1} ${ENGINE_LOG}/config1/centengine.debug -${dbgEngine2} ${ENGINE_LOG}/config2/centengine.debug - -${DBName} centreon_storage -${DBHost} localhost -${DBUser} centreon -${DBPass} centreon -${DBPort} 3306 +${dbgEngine2} ${ENGINE_LOG}/config2/centengine.debug \ No newline at end of file