From 42ce72b6be1a1d62baff5e27b5595b6376de2358 Mon Sep 17 00:00:00 2001 From: tuntoja Date: Thu, 31 Oct 2024 11:10:35 +0100 Subject: [PATCH 01/12] enh(ci): update pull and push jobs in workflows --- .github/workflows/centreon-collect.yml | 4 ++-- .github/workflows/docker-builder.yml | 4 ++-- .github/workflows/gorgone.yml | 8 ++++---- .github/workflows/libzmq.yml | 8 ++++---- .github/workflows/lua-curl.yml | 4 ++-- .github/workflows/package-collect.yml | 4 ++-- .github/workflows/robot-nightly.yml | 8 ++++---- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index f10bda6c92d..780a9c2b39e 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -81,8 +81,8 @@ jobs: uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} - name: Test ${{ matrix.image }} uses: ./.github/actions/runner-docker diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index cca49cdc610..09a7c744558 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -105,8 +105,8 @@ jobs: uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml index 80beae3bf2c..611b7f53491 100644 --- a/.github/workflows/gorgone.yml +++ b/.github/workflows/gorgone.yml @@ -52,8 +52,8 @@ jobs: veracode_api_id: ${{ secrets.VERACODE_API_ID_GORG }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_GORG }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} package: needs: [get-environment] @@ -85,8 +85,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml index 09025644d24..89f6f764ae7 100644 --- a/.github/workflows/libzmq.yml +++ b/.github/workflows/libzmq.yml @@ -41,8 +41,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} @@ -101,8 +101,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index f4bc3b53cad..6f813e07880 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -83,8 +83,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index bb41d9d71fc..35e2873b199 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -76,8 +76,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ inputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index c66c8ae6249..1e6c18717b1 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -44,8 +44,8 @@ jobs: veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} package: needs: [get-environment] @@ -124,8 +124,8 @@ jobs: tests_params: ${{matrix.tests_params}} test_group_name: ${{matrix.test_group_name}} secrets: - registry_username: ${{ secrets.DOCKER_REGISTRY_ID }} - registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + registry_username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + registry_password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} xray_client_id: ${{ secrets.XRAY_CLIENT_ID }} From 5d0d4317b8f513ea7937acde9edb1adb650afba4 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Wed, 20 Nov 2024 16:34:58 +0100 Subject: [PATCH 02/12] fix(broker/unified_sql): typo on hosts_hostgroups (#1881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit REFS: MON-153308 Co-authored-by: Stéphane Duret --- broker/unified_sql/src/stream_sql.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index edd91e546ed..adf4cbe0269 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -1664,7 +1664,7 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { } std::string query = fmt::format( - "DELETE FROM hosts_hostgroup WHERE host_id={} and hostgroup_id = {}", + "DELETE FROM hosts_hostgroups WHERE host_id={} and hostgroup_id = {}", hgm.host_id(), hgm.hostgroup_id()); _mysql.run_query(query, database::mysql_error::delete_host_group_member, From a5ab52514fda1b12d8edfc684f767efb44896778 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 21 Nov 2024 08:21:13 +0100 Subject: [PATCH 03/12] fix(broker): perfdata parsing (#1880) * fix(common/perfdata): Fix parsing labels with brackets in their names (but not data source types) REFS: MON-153310 --------- Co-authored-by: Vincent Untz --- common/src/perfdata.cc | 5 ++++- common/tests/perfdata_test.cc | 15 +++++++++++++++ engine/tests/string/string.cc | 11 +++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/common/src/perfdata.cc b/common/src/perfdata.cc index 80945b75950..0d6f5b89af3 100644 --- a/common/src/perfdata.cc +++ b/common/src/perfdata.cc @@ -265,18 +265,21 @@ std::list perfdata::parse_perfdata( /* The label is given by s and finishes at end */ if (*end == ']') { - --end; if (strncmp(s, "a[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::absolute; } else if (strncmp(s, "c[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::counter; } else if (strncmp(s, "d[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::derive; } else if (strncmp(s, "g[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::gauge; } } diff --git a/common/tests/perfdata_test.cc b/common/tests/perfdata_test.cc index bab234f9522..c64d9fe623a 100644 --- a/common/tests/perfdata_test.cc +++ b/common/tests/perfdata_test.cc @@ -623,3 +623,18 @@ TEST_F(PerfdataParser, BadMetric1) { ++i; } } + +TEST_F(PerfdataParser, ExtractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + auto lst{common::perfdata::parse_perfdata(0, 0, perfdata.c_str(), _logger)}; + auto it = lst.begin(); + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "xx[aa a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "[a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "aa a]"); +} diff --git a/engine/tests/string/string.cc b/engine/tests/string/string.cc index 3486ba7e15d..e0adeb7217d 100644 --- a/engine/tests/string/string.cc +++ b/engine/tests/string/string.cc @@ -62,6 +62,17 @@ TEST(string_utils, extractPerfdataGaugeDiff) { "d[aa a]=28;13;54;0;80"); } +TEST(string_utils, extractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + ASSERT_EQ(string::extract_perfdata(perfdata, "xx[aa a aa]"), + "'xx[aa a aa]'=2;3;7;1;9"); + ASSERT_EQ(string::extract_perfdata(perfdata, "[a aa]"), + "'[a aa]'=12;25;50;0;118"); + ASSERT_EQ(string::extract_perfdata(perfdata, "aa a]"), + "'aa a]'=28;13;54;0;80"); +} + TEST(string_utils, removeThresholdsWithoutThresholds) { std::string perfdata("a=2V"); ASSERT_EQ(string::remove_thresholds(perfdata), "a=2V"); From e6420134437cfe0b60d763d7ce5a8cf3b5f75e25 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 22 Nov 2024 15:30:06 +0100 Subject: [PATCH 04/12] fix(broker/sql): two issues in the mysql object * A possible segfault fixed. * An issue on errors raised by mariadb that can have errno=0 REFS: MON-153321 --- broker/core/sql/src/mysql_connection.cc | 27 ++- broker/core/sql/src/mysql_multi_insert.cc | 6 +- engine/modules/opentelemetry/CMakeLists.txt | 21 +-- .../services-and-bulk-stmt.robot | 157 +++++++++++++++++- tests/broker-engine/services-increased.robot | 4 +- tests/resources/Broker.py | 99 ++++++++++- tests/resources/resources.resource | 17 +- 7 files changed, 294 insertions(+), 37 deletions(-) diff --git a/broker/core/sql/src/mysql_connection.cc b/broker/core/sql/src/mysql_connection.cc index 5c6d2548bba..0951a439b21 100644 --- a/broker/core/sql/src/mysql_connection.cc +++ b/broker/core/sql/src/mysql_connection.cc @@ -16,6 +16,7 @@ * For more information : contact@centreon.com */ #include +#include #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/misc/misc.hh" @@ -460,18 +461,26 @@ void mysql_connection::_statement(mysql_task* t) { "mysql_connection {:p}: execute statement {:x} attempt {}: {}", static_cast(this), task->statement_id, attempts, query); if (mysql_stmt_execute(stmt)) { - std::string err_msg( - fmt::format("{} errno={} {}", mysql_error::msg[task->error_code], - ::mysql_errno(_conn), ::mysql_stmt_error(stmt))); - SPDLOG_LOGGER_ERROR(_logger, - "connection fail to execute statement {:p}: {}", - static_cast(this), err_msg); - if (_server_error(::mysql_stmt_errno(stmt))) { + int32_t err_code = ::mysql_stmt_errno(stmt); + std::string err_msg(fmt::format("{} errno={} {}", + mysql_error::msg[task->error_code], + err_code, ::mysql_stmt_error(stmt))); + if (err_code == 0) { + SPDLOG_LOGGER_TRACE(_logger, + "mysql_connection: errno=0, so we simulate a " + "server error CR_SERVER_LOST"); + err_code = CR_SERVER_LOST; + } else { + SPDLOG_LOGGER_ERROR(_logger, + "connection fail to execute statement {:p}: {}", + static_cast(this), err_msg); + } + if (_server_error(err_code)) { set_error_message(err_msg); break; } - if (mysql_stmt_errno(stmt) != 1213 && - mysql_stmt_errno(stmt) != 1205) // Dead Lock error + if (err_code != ER_LOCK_DEADLOCK && + err_code != ER_LOCK_WAIT_TIMEOUT) // Dead Lock error attempts = MAX_ATTEMPTS; if (mysql_commit(_conn)) { diff --git a/broker/core/sql/src/mysql_multi_insert.cc b/broker/core/sql/src/mysql_multi_insert.cc index cafc020e386..7d375cb82cd 100644 --- a/broker/core/sql/src/mysql_multi_insert.cc +++ b/broker/core/sql/src/mysql_multi_insert.cc @@ -132,7 +132,11 @@ void bulk_or_multi::execute(mysql& connexion, my_error::code ec, int thread_id) { if (_bulk_stmt) { - if (!_bulk_bind->empty()) { + /* If the database connection is lost, we can have this issue */ + if (!_bulk_bind) { + _bulk_bind = _bulk_stmt->create_bind(); + _bulk_bind->reserve(_bulk_row); + } else if (!_bulk_bind->empty()) { _bulk_stmt->set_bind(std::move(_bulk_bind)); connexion.run_statement(*_bulk_stmt, ec, thread_id); _bulk_bind = _bulk_stmt->create_bind(); diff --git a/engine/modules/opentelemetry/CMakeLists.txt b/engine/modules/opentelemetry/CMakeLists.txt index 2da7c0972ec..a5437e61867 100644 --- a/engine/modules/opentelemetry/CMakeLists.txt +++ b/engine/modules/opentelemetry/CMakeLists.txt @@ -34,7 +34,7 @@ foreach(name IN LISTS service_files) COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} - --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto --grpc_out=${SRC_DIR} ${proto_file} VERBATIM WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -61,7 +61,7 @@ add_custom_command( WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # mod_externalcmd target. -add_library(opentelemetry SHARED +add_library(opentelemetry SHARED ${SRC_DIR}/centreon_agent/agent.grpc.pb.cc ${SRC_DIR}/centreon_agent/agent.pb.cc ${SRC_DIR}/centreon_agent/agent_check_result_builder.cc @@ -86,25 +86,26 @@ ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc target_precompile_headers(opentelemetry PRIVATE precomp_inc/precomp.hh) # set(EXTERNALCMD_MODULE "${EXTERNALCMD_MODULE}" PARENT_SCOPE) -target_link_libraries(opentelemetry +target_link_libraries(opentelemetry spdlog::spdlog -L${Boost_LIBRARY_DIR_RELEASE} boost_program_options) -add_dependencies(opentelemetry +add_dependencies(opentelemetry pb_open_telemetry_lib pb_neb_lib + engine_rpc pb_tag_lib) -target_include_directories(opentelemetry PRIVATE - "${MODULE_DIR}/inc/com/centreon/engine/modules/opentelemetry" - "${CMAKE_SOURCE_DIR}/bbdo" +target_include_directories(opentelemetry PRIVATE + "${MODULE_DIR}/inc/com/centreon/engine/modules/opentelemetry" + "${CMAKE_SOURCE_DIR}/bbdo" "${MODULE_DIR}/inc" ${CMAKE_SOURCE_DIR}/common/inc - ${CMAKE_SOURCE_DIR}/common/http/inc - ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/common/http/inc + ${CMAKE_SOURCE_DIR}/common/grpc/inc src - ${PROJECT_SOURCE_DIR}/enginerpc + ${PROJECT_SOURCE_DIR}/enginerpc ${CMAKE_SOURCE_DIR}/common/src ) diff --git a/tests/broker-engine/services-and-bulk-stmt.robot b/tests/broker-engine/services-and-bulk-stmt.robot index 5662fbdba12..8464aaf4eb7 100644 --- a/tests/broker-engine/services-and-bulk-stmt.robot +++ b/tests/broker-engine/services-and-bulk-stmt.robot @@ -35,6 +35,7 @@ EBBPS1 Should Be True ... ${result} ... An Initial service state on host_1:service_1000 should be raised before we can start external commands. + FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 1 warning${i} END @@ -55,6 +56,7 @@ EBBPS1 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 2 warning${i} @@ -92,6 +94,7 @@ EBBPS1 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database EBBPS2 [Documentation] 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. @@ -112,7 +115,7 @@ EBBPS2 ${start} Get Current Date ${start_broker} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List INITIAL SERVICE STATE: host_1;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 30 Should Be True @@ -138,6 +141,7 @@ EBBPS2 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 2 critical${i} @@ -174,6 +178,7 @@ EBBPS2 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database EBMSSM [Documentation] 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. @@ -194,7 +199,7 @@ EBMSSM Ctn Clear Retention ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Broker Set Sql Manager Stats 51001 5 5 # Let's wait for the external command check start @@ -220,6 +225,7 @@ EBMSSM Sleep 1s END Should Be True ${output[0][0]} >= 100000 + Disconnect From Database EBPS2 [Documentation] 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. @@ -243,7 +249,7 @@ EBPS2 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine # Let's wait for the external command check start ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -297,7 +303,7 @@ RLCode ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -367,7 +373,7 @@ metric_mapping ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -407,7 +413,7 @@ Services_and_bulks_${id} ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Broker Set Sql Manager Stats 51001 5 5 # Let's wait for the external command check start @@ -438,6 +444,145 @@ Services_and_bulks_${id} ... 1 1020 ... 2 150 +EBMSSMDBD + [Documentation] 1000 services are configured with 100 metrics each. + ... The rrd output is removed from the broker configuration. + ... While metrics are written in the database, we stop the database and then restart it. + ... Broker must recover its connection to the database and continue to write metrics. + [Tags] broker engine unified_sql MON-153321 + Ctn Clear Metrics + Ctn Config Engine ${1} ${1} ${1000} + # We want all the services to be passive to avoid parasite checks during our test. + Ctn Set Services Passive ${0} service_.* + Ctn Config Broker central + Ctn Config Broker rrd + Ctn Config Broker module ${1} + Ctn Config BBDO3 1 + Ctn Broker Config Log central core error + Ctn Broker Config Log central tcp error + Ctn Broker Config Log central sql debug + Ctn Config Broker Sql Output central unified_sql + Ctn Config Broker Remove Rrd Output central + Ctn Clear Retention + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + + ${content} Create List check_for_external_commands() + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message about check_for_external_commands() should be available. + + ${start} Ctn Get Round Current Date + # Let's wait for one "INSERT INTO data_bin" to appear in stats. + Log To Console Many service checks with 100 metrics each are processed. + FOR ${i} IN RANGE ${1000} + Ctn Process Service Check Result With Metrics host_1 service_${i+1} 1 warning${i} 100 + END + + Log To Console We wait for at least one metric to be written in the database. + # Let's wait for all force checks to be in the storage database. + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${500} + ${output} Query + ... SELECT COUNT(s.last_check) FROM metrics m LEFT JOIN index_data i ON m.index_id = i.id LEFT JOIN services s ON s.host_id = i.host_id AND s.service_id = i.service_id WHERE metric_name LIKE "metric_%%" AND s.last_check >= ${start} + IF ${output[0][0]} >= 1 BREAK + Sleep 1s + END + Disconnect From Database + + Log To Console Let's start some database manipulation... + ${start} Get Current Date + + FOR ${i} IN RANGE ${3} + Ctn Stop Mysql + Sleep 10s + Ctn Start Mysql + ${content} Create List could not insert data in data_bin + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 10 + Log To Console ${result} + END + +EBMSSMPART + [Documentation] 1000 services are configured with 100 metrics each. + ... The rrd output is removed from the broker configuration. + ... The data_bin table is configured with two partitions p1 and p2 such + ... that p1 contains old data and p2 contains current data. + ... While metrics are written in the database, we remove the p2 partition. + ... Once the p2 partition is recreated, broker must recover its connection + ... to the database and continue to write metrics. + ... To check that last point, we force a last service check and we check + ... that its metrics are written in the database. + [Tags] broker engine unified_sql MON-153321 + Ctn Clear Metrics + Ctn Config Engine ${1} ${1} ${1000} + # We want all the services to be passive to avoid parasite checks during our test. + Ctn Set Services Passive ${0} service_.* + Ctn Config Broker central + Ctn Config Broker rrd + Ctn Config Broker module ${1} + Ctn Config BBDO3 1 + Ctn Broker Config Log central core error + Ctn Broker Config Log central tcp error + Ctn Broker Config Log central sql trace + Ctn Config Broker Sql Output central unified_sql + Ctn Config Broker Remove Rrd Output central + Ctn Clear Retention + + Ctn Prepare Partitions For Data Bin + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + + Ctn Wait For Engine To Be Ready ${start} 1 + + ${start} Ctn Get Round Current Date + # Let's wait for one "INSERT INTO data_bin" to appear in stats. + Log To Console Many service checks with 100 metrics each are processed. + FOR ${i} IN RANGE ${1000} + Ctn Process Service Check Result With Metrics host_1 service_${i+1} 1 warning${i} 100 + END + + Log To Console We wait for at least one metric to be written in the database. + # Let's wait for all force checks to be in the storage database. + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${500} + ${output} Query + ... SELECT COUNT(s.last_check) FROM metrics m LEFT JOIN index_data i ON m.index_id = i.id LEFT JOIN services s ON s.host_id = i.host_id AND s.service_id = i.service_id WHERE metric_name LIKE "metric_%%" AND s.last_check >= ${start} + IF ${output[0][0]} >= 1 BREAK + Sleep 1s + END + Disconnect From Database + + Log To Console Let's start some database manipulation... + Ctn Remove P2 From Data Bin + ${start} Get Current Date + + ${content} Create List errno= + FOR ${i} IN RANGE ${6} + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 10 + IF ${result} BREAK + END + + Log To Console Let's recreate the p2 partition... + Ctn Add P2 To Data Bin + + ${start} Ctn Get Round Current Date + Ctn Process Service Check Result With Metrics host_1 service_1 0 Last Output OK 100 + + Log To Console Let's wait for the last service check to be in the database... + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${120} + ${output} Query SELECT count(*) FROM data_bin WHERE ctime >= ${start} - 10 + Log To Console ${output} + IF ${output[0][0]} >= 100 BREAK + Sleep 1s + END + Log To Console ${output} + Should Be True ${output[0][0]} >= 100 + Disconnect From Database + + Ctn Init Data Bin Without Partition + *** Keywords *** Ctn Test Clean diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index 895c6187de0..249e1239573 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -42,7 +42,7 @@ EBNSVC1 ${result} Ctn Check Number Of Resources Monitored By Poller Is ${3} ${nb_res} 30 Should Be True ${result} Poller 3 should monitor ${nb_srv} services and 16 hosts. END - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker Service_increased_huge_check_interval @@ -144,4 +144,4 @@ Service_increased_huge_check_interval ... rra[0].pdp_per_row must be equal to 5400 for metric ${m} END - [Teardown] Run Keywords Ctn Stop engine AND Ctn Kindly Stop Broker + [Teardown] Run Keywords Ctn Stop Engine AND Ctn Kindly Stop Broker diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 3f4b0068c89..68669c9faa7 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -1689,7 +1689,7 @@ def ctn_get_service_index(host_id: int, service_id: int, timeout: int = 60): my_id = [r['id'] for r in result] if len(my_id) > 0: logger.console( - f"Index data {id} found for service {host_id}:{service_id}") + f"Index data {id} found for service {host_id}:{service_id}") return my_id[0] time.sleep(2) logger.console(f"no index data found for service {host_id}:{service_id}") @@ -2911,3 +2911,100 @@ def ctn_get_broker_log_info(port, log, timeout=TIMEOUT): except: logger.console("gRPC server not ready") return str(res) + + +def ctn_prepare_partitions_for_data_bin(): + """ + Create two partitions for the data_bin table. + The first one named p1 contains data with ctime older than now - 60. + The second one named p2 contains data with ctime older than now + 3600. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + now = int(time.time()) + before = now - 60 + after = now + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute("DROP TABLE IF EXISTS data_bin") + sql = f"""CREATE TABLE `data_bin` ( + `id_metric` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `value` float DEFAULT NULL, + `status` enum('0','1','2','3','4') DEFAULT NULL, + KEY `index_metric` (`id_metric`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 + PARTITION BY RANGE (`ctime`) +(PARTITION `p1` VALUES LESS THAN ({before}) ENGINE = InnoDB, + PARTITION `p2` VALUES LESS THAN ({after}) ENGINE = InnoDB)""" + cursor.execute(sql) + connection.commit() + + +def ctn_remove_p2_from_data_bin(): + """ + Remove the partition p2 from the data_bin table. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + with connection: + with connection.cursor() as cursor: + cursor.execute("ALTER TABLE data_bin DROP PARTITION p2") + connection.commit() + + +def ctn_add_p2_to_data_bin(): + """ + Add the partition p2 the the data_bin table. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + after = int(time.time()) + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute( + f"ALTER TABLE data_bin ADD PARTITION (PARTITION p2 VALUES LESS THAN ({after}))") + connection.commit() + + +def ctn_init_data_bin_without_partition(): + """ + Recreate the data_bin table without partition. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + now = int(time.time()) + before = now - 60 + after = now + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute("DROP TABLE IF EXISTS data_bin") + sql = f"""CREATE TABLE `data_bin` ( + `id_metric` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `value` float DEFAULT NULL, + `status` enum('0','1','2','3','4') DEFAULT NULL, + KEY `index_metric` (`id_metric`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1""" + cursor.execute(sql) + connection.commit() diff --git a/tests/resources/resources.resource b/tests/resources/resources.resource index 474c70b1d4f..ac2e0ae7d50 100644 --- a/tests/resources/resources.resource +++ b/tests/resources/resources.resource @@ -370,13 +370,14 @@ Ctn Dump Ba On Error Ctn Process Service Result Hard [Arguments] ${host} ${svc} ${state} ${output} - Repeat Keyword - ... 3 times - ... Ctn Process Service Check Result - ... ${host} - ... ${svc} - ... ${state} - ... ${output} + FOR ${idx} IN RANGE 3 + Ctn Process Service Check Result + ... ${host} + ... ${svc} + ... ${state} + ... ${output} + Sleep 1s + END Ctn Wait For Engine To Be Ready [Arguments] ${start} ${nbEngine}=1 @@ -386,7 +387,7 @@ Ctn Wait For Engine To Be Ready ${result} Ctn Find In Log With Timeout ... ${ENGINE_LOG}/config${i}/centengine.log ... ${start} ${content} 60 - ... verbose=False + ... verbose=False Should Be True ... ${result} ... A message telling check_for_external_commands() should be available in config${i}/centengine.log. From 61ae9a06739be3c297d96d29b37847a4e858fb5f Mon Sep 17 00:00:00 2001 From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> Date: Wed, 27 Nov 2024 08:16:19 +0100 Subject: [PATCH 05/12] retention.dat is not referenced in centreon-collect.yaml, so it's not deleted on uninstall or upgrade (#1772) (#1824) --- packaging/centreon-collect.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/packaging/centreon-collect.yaml b/packaging/centreon-collect.yaml index 264f42f8640..f1f925d8655 100644 --- a/packaging/centreon-collect.yaml +++ b/packaging/centreon-collect.yaml @@ -30,9 +30,6 @@ contents: owner: centreon-engine group: centreon-engine - - dst: "/var/log/centreon-engine/retention.dat" - type: ghost - - src: "files/empty_file" dst: "/var/log/centreon-engine/status.dat" file_info: From 108bfa7ad997eb00227adc95ffacb736334d267c Mon Sep 17 00:00:00 2001 From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> Date: Wed, 27 Nov 2024 08:22:05 +0100 Subject: [PATCH 06/12] Mon 152090 rename reverse connection to reversed grpc streaming (#1843) * rename reverse_connection to reversed_grpc_streaming * upgrade vcpkg --- agent/src/config.cc | 6 +++--- agent/src/config_win.cc | 2 +- .../process/inc/com/centreon/common/process/process.hh | 2 +- common/process/src/process.cc | 10 +++++----- tests/resources/Agent.py | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/agent/src/config.cc b/agent/src/config.cc index d15de69aead..1616147a029 100644 --- a/agent/src/config.cc +++ b/agent/src/config.cc @@ -37,7 +37,7 @@ const std::string_view config::config_schema(R"( "minLength": 5 }, "endpoint": { - "description": "Endpoint where agent has to connect to on the poller side or listening endpoint on the agent side in case of reverse_connection", + "description": "Endpoint where agent has to connect to on the poller side or listening endpoint on the agent side in case of reversed_grpc_streaming", "type": "string", "pattern": "[\\w\\.:]+:\\w+" }, @@ -61,7 +61,7 @@ const std::string_view config::config_schema(R"( "description": "Name of the SSL certification authority", "type": "string" }, - "reverse_connection": { + "reversed_grpc_streaming": { "description": "Set to true to make Engine connect to the agent. Requires the agent to be configured as a server. Default: false", "type": "boolean" }, @@ -144,5 +144,5 @@ config::config(const std::string& path) { if (_host.empty()) { _host = boost::asio::ip::host_name(); } - _reverse_connection = json_config.get_bool("reverse_connection", false); + _reverse_connection = json_config.get_bool("reversed_grpc_streaming", false); } diff --git a/agent/src/config_win.cc b/agent/src/config_win.cc index 9fe35068904..a1315c3697d 100644 --- a/agent/src/config_win.cc +++ b/agent/src/config_win.cc @@ -103,7 +103,7 @@ config::config(const std::string& registry_key) { if (_host.empty()) { _host = boost::asio::ip::host_name(); } - _reverse_connection = get_bool("reverse_connection"); + _reverse_connection = get_bool("reversed_grpc_streaming"); RegCloseKey(h_key); } diff --git a/common/process/inc/com/centreon/common/process/process.hh b/common/process/inc/com/centreon/common/process/process.hh index 06a6799bd3b..9fc6862ecf6 100644 --- a/common/process/inc/com/centreon/common/process/process.hh +++ b/common/process/inc/com/centreon/common/process/process.hh @@ -48,7 +48,7 @@ class mutex {}; template <> class lock { public: - lock(mutex* dummy_mut) {} + lock(mutex* /* dummy_mut*/) {} }; } // namespace detail diff --git a/common/process/src/process.cc b/common/process/src/process.cc index 6036a0fca19..14f827d4310 100644 --- a/common/process/src/process.cc +++ b/common/process/src/process.cc @@ -113,7 +113,7 @@ struct boost_process { boost_process(asio::io_context& io_context, const std::string& exe_path, const std::vector& args, - bool no_stdin) + bool no_stdin [[maybe_unused]]) : stdout_pipe(io_context), stderr_pipe(io_context), stdin_pipe(io_context), @@ -277,9 +277,9 @@ void process::stdin_write_no_lock( try { _write_pending = true; _proc->stdin_pipe.async_write_some( - asio::buffer(*data), - [me = shared_from_this(), caller = _proc, data]( - const boost::system::error_code& err, size_t nb_written) { + asio::buffer(*data), [me = shared_from_this(), caller = _proc, data]( + const boost::system::error_code& err, + size_t nb_written [[maybe_unused]]) { detail::lock l(&me->_protect); if (caller != me->_proc) { return; @@ -438,4 +438,4 @@ template class process; template class process; -} // namespace com::centreon::common \ No newline at end of file +} // namespace com::centreon::common diff --git a/tests/resources/Agent.py b/tests/resources/Agent.py index 4497a4453f3..b5fac6b092a 100644 --- a/tests/resources/Agent.py +++ b/tests/resources/Agent.py @@ -59,7 +59,7 @@ def ctn_config_reverse_centreon_agent(key_path:str = None, cert_path:str = None, makedirs(CONF_DIR, mode=0o777, exist_ok=True) with open(f"{CONF_DIR}/centagent.json", "w") as ff: ff.write(agent_config) - ff.write(",\n \"reverse_connection\":true") + ff.write(",\n \"reversed_grpc_streaming\":true") if key_path is not None or cert_path is not None or ca_path is not None: ff.write(",\n \"encryption\":true") if key_path is not None: From b9ac220ff8533005cb03d9fac014a1b6dbc6cf6d Mon Sep 17 00:00:00 2001 From: Sechkem Date: Wed, 27 Nov 2024 11:16:25 +0100 Subject: [PATCH 07/12] fix(engine): coredump when disabling a host with children relation (#1863) * fix(engine/broker): make parents_host shared ptr & remove relation db when child is deleted * feat(test): add tests EBPN about relation parent child REFS: MON-152645 --- broker/neb/src/callbacks.cc | 14 +- broker/neb/src/initial.cc | 6 +- engine/enginerpc/engine.proto | 3 + engine/enginerpc/engine_impl.cc | 12 +- engine/inc/com/centreon/engine/host.hh | 5 +- engine/src/configuration/applier/host.cc | 17 +- engine/src/host.cc | 97 +++---- engine/src/macros/grab_host.cc | 12 +- .../broker-engine/parent_child_relation.robot | 262 ++++++++++++++++++ tests/resources/Engine.py | 100 +++++++ 10 files changed, 446 insertions(+), 82 deletions(-) create mode 100644 tests/broker-engine/parent_child_relation.robot diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index 783db2a2609..f6a8ec4b3ab 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -2908,12 +2908,8 @@ int neb::callback_relation(int callback_type, void* data) { if (relation->hst && relation->dep_hst && !relation->svc && !relation->dep_svc) { // Find host IDs. - int host_id; - int parent_id; - { - host_id = engine::get_host_id(relation->dep_hst->name()); - parent_id = engine::get_host_id(relation->hst->name()); - } + int host_id = relation->dep_hst->host_id(); + int parent_id = relation->hst->host_id(); if (host_id && parent_id) { // Generate parent event. auto new_host_parent{std::make_shared()}; @@ -2964,10 +2960,8 @@ int neb::callback_pb_relation(int callback_type [[maybe_unused]], void* data) { if (relation->hst && relation->dep_hst && !relation->svc && !relation->dep_svc) { // Find host IDs. - int host_id; - int parent_id; - host_id = engine::get_host_id(relation->dep_hst->name()); - parent_id = engine::get_host_id(relation->hst->name()); + int host_id = relation->dep_hst->host_id(); + int parent_id = relation->hst->host_id(); if (host_id && parent_id) { // Generate parent event. auto new_host_parent{std::make_shared()}; diff --git a/broker/neb/src/initial.cc b/broker/neb/src/initial.cc index a8fad65920e..2d2c2cd8a42 100644 --- a/broker/neb/src/initial.cc +++ b/broker/neb/src/initial.cc @@ -336,14 +336,12 @@ static void send_host_parents_list(neb_sender sender = neb::callback_relation) { end{com::centreon::engine::host::hosts.end()}; it != end; ++it) { // Loop through all parents. - for (host_map_unsafe::iterator pit{it->second->parent_hosts.begin()}, - pend{it->second->parent_hosts.end()}; - pit != pend; ++pit) { + for (auto [_, sptr_host] : it->second->parent_hosts) { // Fill callback struct. nebstruct_relation_data nsrd; memset(&nsrd, 0, sizeof(nsrd)); nsrd.type = NEBTYPE_PARENT_ADD; - nsrd.hst = pit->second; + nsrd.hst = sptr_host.get(); nsrd.dep_hst = it->second.get(); // Callback. diff --git a/engine/enginerpc/engine.proto b/engine/enginerpc/engine.proto index f11b867dda0..2280e53f9d1 100644 --- a/engine/enginerpc/engine.proto +++ b/engine/enginerpc/engine.proto @@ -356,6 +356,9 @@ message EngineHost { UNREACHABLE = 2; } State current_state = 6; + string display_name = 7; + repeated string parent_hosts = 8; + repeated string child_hosts = 9; } message ContactIdentifier { diff --git a/engine/enginerpc/engine_impl.cc b/engine/enginerpc/engine_impl.cc index 0476f9dc9d0..f32a0aefc5e 100644 --- a/engine/enginerpc/engine_impl.cc +++ b/engine/enginerpc/engine_impl.cc @@ -245,9 +245,19 @@ grpc::Status engine_impl::GetHost(grpc::ServerContext* context [[maybe_unused]], host->set_alias(selectedhost->get_alias()); host->set_address(selectedhost->get_address()); host->set_check_period(selectedhost->check_period()); + host->set_id(selectedhost->host_id()); host->set_current_state( static_cast(selectedhost->get_current_state())); - host->set_id(selectedhost->host_id()); + host->set_display_name(selectedhost->get_display_name()); + + if (!selectedhost->parent_hosts.empty()) + for (const auto& [key, _] : selectedhost->parent_hosts) + host->add_parent_hosts(key); + + if (!selectedhost->child_hosts.empty()) + for (const auto& [key, _] : selectedhost->child_hosts) + host->add_child_hosts(key); + return 0; }); diff --git a/engine/inc/com/centreon/engine/host.hh b/engine/inc/com/centreon/engine/host.hh index c91f28a5e6a..0c97d18c5eb 100644 --- a/engine/inc/com/centreon/engine/host.hh +++ b/engine/inc/com/centreon/engine/host.hh @@ -250,7 +250,7 @@ class host : public notifier { void set_check_command_ptr( const std::shared_ptr& cmd) override; - host_map_unsafe parent_hosts; + host_map parent_hosts; host_map_unsafe child_hosts; static host_map hosts; static host_id_map hosts_by_id; @@ -307,6 +307,7 @@ int number_of_total_parent_hosts(com::centreon::engine::host* hst); std::ostream& operator<<(std::ostream& os, com::centreon::engine::host const& obj); std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj); +std::ostream& operator<<(std::ostream& os, host_map const& obj); namespace com::centreon::engine { @@ -318,6 +319,4 @@ std::string get_host_name(const uint64_t host_id); } // namespace com::centreon::engine -std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj); - #endif // !CCE_HOST_HH diff --git a/engine/src/configuration/applier/host.cc b/engine/src/configuration/applier/host.cc index 8459866d7ac..1e45408b6b0 100644 --- a/engine/src/configuration/applier/host.cc +++ b/engine/src/configuration/applier/host.cc @@ -374,10 +374,8 @@ void applier::host::modify_object(configuration::host const& obj) { if (obj.parents() != obj_old.parents()) { // Delete old parents. { - for (host_map_unsafe::iterator it(it_obj->second->parent_hosts.begin()), - end(it_obj->second->parent_hosts.end()); - it != end; it++) - broker_relation_data(NEBTYPE_PARENT_DELETE, it->second, nullptr, + for (const auto& [_, sptr_host] : it_obj->second->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, it_obj->second.get(), nullptr); } it_obj->second->parent_hosts.clear(); @@ -436,6 +434,11 @@ void applier::host::remove_object(configuration::host const& obj) { for (auto& it_h : it->second->get_parent_groups()) it_h->members.erase(it->second->name()); + // remove any relations + for (const auto& [_, sptr_host] : it->second->parent_hosts) + broker_relation_data(NEBTYPE_PARENT_DELETE, sptr_host.get(), nullptr, + it->second.get(), nullptr); + // Notify event broker. for (auto it_s = it->second->services.begin(); it_s != it->second->services.end(); ++it_s) @@ -470,10 +473,8 @@ void applier::host::resolve_object(configuration::host const& obj) { // It is necessary to do it only once to prevent the removal // of valid child backlinks. if (obj == *config->hosts().begin()) { - for (host_map::iterator it(engine::host::hosts.begin()), - end(engine::host::hosts.end()); - it != end; ++it) - it->second->child_hosts.clear(); + for (const auto& [_, sptr_host] : engine::host::hosts) + sptr_host->child_hosts.clear(); } // Find host. diff --git a/engine/src/host.cc b/engine/src/host.cc index ca62b79de61..f6f2b63e693 100644 --- a/engine/src/host.cc +++ b/engine/src/host.cc @@ -593,6 +593,19 @@ std::ostream& operator<<(std::ostream& os, host_map_unsafe const& obj) { return os; } +std::ostream& operator<<(std::ostream& os, host_map const& obj) { + bool first = true; + for (const auto& [key, _] : obj) { + if (first) { + first = false; + } else { + os << ", "; + } + os << key; + } + return os; +} + /** * Dump host content into the stream. * @@ -1036,8 +1049,7 @@ int is_host_immediate_child_of_host(com::centreon::engine::host* parent_host, } // Mid-level/bottom hosts. else { - host_map_unsafe::const_iterator it{ - child_host->parent_hosts.find(parent_host->name())}; + auto it{child_host->parent_hosts.find(parent_host->name())}; return it != child_host->parent_hosts.end(); } @@ -1856,8 +1868,8 @@ int host::run_async_check(int check_options, try { // Run command. get_check_command_ptr()->run(processed_cmd, *macros, - config->host_check_timeout(), - check_result_info); + config->host_check_timeout(), + check_result_info); } catch (com::centreon::exceptions::interruption const& e) { retry = true; } catch (std::exception const& e) { @@ -3157,17 +3169,15 @@ int host::process_check_result_3x(enum host::host_state new_state, SPDLOG_LOGGER_DEBUG(checks_logger, "Propagating checks to parent host(s)..."); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; - if (it->second->get_current_state() != host::state_up) { + if (sptr_host->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) - << "Check of parent host '" << it->first << "' queued."; + << "Check of parent host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, - "Check of parent host '{}' queued.", it->first); - check_hostlist.push_back(it->second); + "Check of parent host '{}' queued.", key); + check_hostlist.push_back(sptr_host.get()); } } @@ -3280,24 +3290,21 @@ int host::process_check_result_3x(enum host::host_state new_state, "** WARNING: Max attempts = 1, so we have to run serial " "checks of all parent hosts!"); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; has_parent = true; engine_logger(dbg_checks, more) - << "Running serial check parent host '" << it->first << "'..."; - SPDLOG_LOGGER_DEBUG(checks_logger, - "Running serial check parent host '{}'...", - it->first); + << "Running serial check parent host '" << key << "'..."; + SPDLOG_LOGGER_DEBUG( + checks_logger, "Running serial check parent host '{}'...", key); /* run an immediate check of the parent host */ - it->second->run_sync_check_3x(&parent_state, check_options, - use_cached_result, - check_timestamp_horizon); + sptr_host->run_sync_check_3x(&parent_state, check_options, + use_cached_result, + check_timestamp_horizon); /* bail out as soon as we find one parent host that is UP */ if (parent_state == host::state_up) { @@ -3392,17 +3399,15 @@ int host::process_check_result_3x(enum host::host_state new_state, "Propagating checks to immediate parent hosts that " "are UP..."); - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (it->second == nullptr) + for (const auto& [key, sptr_host] : parent_hosts) { + if (sptr_host == nullptr) continue; - if (it->second->get_current_state() == host::state_up) { - check_hostlist.push_back(it->second); + if (sptr_host->get_current_state() == host::state_up) { + check_hostlist.push_back(sptr_host.get()); engine_logger(dbg_checks, more) - << "Check of host '" << it->first << "' queued."; + << "Check of host '" << key << "' queued."; SPDLOG_LOGGER_DEBUG(checks_logger, "Check of host '{}' queued.", - it->first); + key); } } @@ -3644,22 +3649,20 @@ enum host::host_state host::determine_host_reachability( /* check all parent hosts to see if we're DOWN or UNREACHABLE */ else { - for (host_map_unsafe::iterator it{parent_hosts.begin()}, - end{parent_hosts.end()}; - it != end; it++) { - if (!it->second) + for (const auto& [key, sptr_host] : parent_hosts) { + if (!sptr_host) continue; /* bail out as soon as we find one parent host that is UP */ - if (it->second->get_current_state() == host::state_up) { + if (sptr_host->get_current_state() == host::state_up) { is_host_present = true; /* set the current state */ state = host::state_down; - engine_logger(dbg_checks, most) << "At least one parent (" << it->first - << ") is up, so host is DOWN."; + engine_logger(dbg_checks, most) + << "At least one parent (" << key << ") is up, so host is DOWN."; SPDLOG_LOGGER_DEBUG(checks_logger, "At least one parent ({}) is up, so host is DOWN.", - it->first); + key); break; } } @@ -3984,22 +3987,20 @@ void host::resolve(int& w, int& e) { } /* check all parent parent host */ - for (host_map_unsafe::iterator it(parent_hosts.begin()), - end(parent_hosts.end()); - it != end; it++) { - host_map::const_iterator it_host{host::hosts.find(it->first)}; + for (auto& [key, sptr_host] : parent_hosts) { + host_map::const_iterator it_host{host::hosts.find(key)}; if (it_host == host::hosts.end() || !it_host->second) { - engine_logger(log_verification_error, basic) << "Error: '" << it->first + engine_logger(log_verification_error, basic) << "Error: '" << key << "' is not a " "valid parent for host '" << name() << "'!"; config_logger->error("Error: '{}' is not a valid parent for host '{}'!", - it->first, name()); + key, name()); errors++; } else { - it->second = it_host->second.get(); - it_host->second->add_child_host( - this); // add a reverse (child) link to make searches faster later on + sptr_host = it_host->second; + it_host->second->add_child_host(this); // add a reverse (child) link to + // make searches faster later on } } diff --git a/engine/src/macros/grab_host.cc b/engine/src/macros/grab_host.cc index c611278d665..22cf6899213 100644 --- a/engine/src/macros/grab_host.cc +++ b/engine/src/macros/grab_host.cc @@ -184,12 +184,10 @@ std::string get_host_total_services(host& hst, nagios_macros* mac) { static std::string get_host_parents(host& hst, nagios_macros* mac) { (void)mac; std::string retval; - for (host_map_unsafe::const_iterator it(hst.parent_hosts.begin()), - end(hst.parent_hosts.end()); - it != end; it++) { + for (const auto& [key, _] : hst.parent_hosts) { if (!retval.empty()) retval.append(","); - retval.append(it->first); + retval.append(key); } return retval; } @@ -205,12 +203,10 @@ static std::string get_host_parents(host& hst, nagios_macros* mac) { static std::string get_host_children(host& hst, nagios_macros* mac) { (void)mac; std::string retval; - for (host_map_unsafe::const_iterator it(hst.child_hosts.begin()), - end(hst.child_hosts.end()); - it != end; it++) { + for (const auto& [key, _] : hst.child_hosts) { if (!retval.empty()) retval.append(","); - retval.append(it->first); + retval.append(key); } return retval; } diff --git a/tests/broker-engine/parent_child_relation.robot b/tests/broker-engine/parent_child_relation.robot new file mode 100644 index 00000000000..2a123df2f5e --- /dev/null +++ b/tests/broker-engine/parent_child_relation.robot @@ -0,0 +1,262 @@ +*** Settings *** +Documentation Centreon Engine/Broker verify relation parent child host. + +Resource ../resources/import.resource + +Suite Setup Ctn Clean Before Suite +Suite Teardown Ctn Clean After Suite +Test Setup Ctn Stop Processes +Test Teardown Ctn Save Logs If Failed + + +*** Test Cases *** + +EBPN0 + [Documentation] Verify if child is in queue when parent is down. + [Tags] broker engine MON-151686 + + Ctn Config Engine ${1} ${5} ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 1 + + Ctn Broker Config Log rrd rrd trace + Ctn Broker Config Log central sql debug + Ctn Broker Config Log rrd core error + Ctn Engine Config Set Value 0 log_level_checks debug + Ctn Config Broker Sql Output central unified_sql 10 + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + + Ctn Clear Retention + Ctn Clear Db resources + + # force the check result to 2 + Ctn Config Host Command Status ${0} checkh1 2 + + # host_1 is parent of host_2 + Ctn Add Parent To Host 0 host_2 host_1 + + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + # check if host_2 is child of host_1 + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + + FOR ${index} IN RANGE 30 + ${output} Query + ... SELECT child_id, parent_id FROM hosts_hosts_parents + Log To Console ${output} + Sleep 1s + IF "${output}" == "((2, 1),)" BREAK + END + Should Be Equal As Strings ${output} ((2, 1),) host parent not inserted + + # check if host_1 is pending + ${result} Ctn Check Host Status host_1 4 1 True + Should Be True ${result} host_1 should be pending + + ${result} Ctn Check Host Status host_2 4 1 True + Should Be True ${result} host_2 should be pending + + ${content} Create List INITIAL HOST STATE: host_1; + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True + ... ${result} + ... An Initial host state on host_1 should be raised before we can start our external commands. + + Ctn Process Host Check Result host_1 0 host_1 UP + + FOR ${i} IN RANGE ${4} + Ctn Schedule Forced Host Check host_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd + Sleep 1s + END + + ${content} Create List + ... EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; + ... HOST ALERT: host_1;DOWN;HARD; + + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} Message about SCHEDULE HOST should be down in log. + + ${result} Ctn Check Host Status host_1 1 1 True + Should Be True ${result} host_1 should be down/hard + + ${content} Create List + ... Check of child host 'host_2' queued. + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} Check of child host 'host_2' should be queued. + + Disconnect From Database + Ctn Stop Engine + Ctn Kindly Stop Broker + +EBPN1 + [Documentation] verify relation parent child when delete parent. + [Tags] broker engine MON-151686 + + Ctn Config Engine ${1} ${5} ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 1 + + Ctn Broker Config Log rrd rrd error + Ctn Broker Config Log rrd core error + Ctn Broker Config Log module0 core error + + Ctn Broker Config Log central sql debug + Ctn Engine Config Set Value 0 log_level_checks error + Ctn Config Broker Sql Output central unified_sql 10 + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + + Ctn Clear Retention + + + # host_1 is parent of host_2 + Ctn Add Parent To Host 0 host_2 host_1 + + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + ${output} Ctn Get Host Info Grpc ${2} + Log To Console parents:${output}[parentHosts] + Should Contain ${output}[parentHosts] host_1 parentHosts + + ${output} Ctn Get Host Info Grpc ${1} + Log To Console childs:${output}[childHosts] + Should Contain ${output}[childHosts] host_2 childHosts + + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + + FOR ${index} IN RANGE 30 + ${output} Query + ... SELECT child_id, parent_id FROM hosts_hosts_parents + Log To Console ${output} + Sleep 1s + IF "${output}" == "((2, 1),)" BREAK + END + Should Be Equal As Strings ${output} ((2, 1),) the parent link not inserted + + Ctn Engine Config Del Block In Cfg ${0} host host_1 hosts.cfg + Ctn Engine Config Del Block In Cfg ${0} service host_1 services.cfg + Ctn Engine Config Delete Value In Hosts ${0} host_2 parents + + ${start} Get Current Date + Ctn Reload Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + ${content} Create List Reload configuration finished + ${result} Ctn Find In Log With Timeout + ... ${ENGINE_LOG}/config0/centengine.log + ... ${start} + ... ${content} + ... 60 + ... verbose=False + Should Be True ${result} Engine is Not Ready after 60s!! + + + ${output} Ctn Get Host Info Grpc ${2} + Log To Console parents:${output}[parentHosts] + Should Be Empty ${output}[parentHosts] + + FOR ${index} IN RANGE 30 + ${output} Query + ... SELECT child_id, parent_id FROM hosts_hosts_parents + Log To Console ${output} + Sleep 1s + IF "${output}" == "()" BREAK + END + Should Be Equal As Strings ${output} () the parent link should be deleted + + Disconnect From Database + Ctn Stop Engine + Ctn Kindly Stop Broker + +EBPN2 + [Documentation] verify relation parent child when delete child. + [Tags] broker engine MON-151686 + + Ctn Config Engine ${1} ${5} ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 1 + + Ctn Broker Config Log rrd rrd error + Ctn Broker Config Log rrd core error + Ctn Broker Config Log module0 core error + + Ctn Broker Config Log central sql debug + Ctn Engine Config Set Value 0 log_level_checks error + Ctn Config Broker Sql Output central unified_sql 10 + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + + Ctn Clear Retention + + # host_1 is parent of host_2 + Ctn Add Parent To Host 0 host_2 host_1 + + ${start} Get Current Date + + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + ${output} Ctn Get Host Info Grpc ${2} + Log To Console parents:${output}[parentHosts] + Should Contain ${output}[parentHosts] host_1 parentHosts + + ${output} Ctn Get Host Info Grpc ${1} + Log To Console childs:${output}[childHosts] + Should Contain ${output}[childHosts] host_2 childHosts + + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + + FOR ${index} IN RANGE 30 + ${output} Query + ... SELECT child_id, parent_id FROM hosts_hosts_parents + Log To Console ${output} + Sleep 1s + IF "${output}" == "((2, 1),)" BREAK + END + Should Be Equal As Strings ${output} ((2, 1),) the parent link not inserted + + Ctn Engine Config Del Block In Cfg ${0} host host_2 hosts.cfg + Ctn Engine Config Del Block In Cfg ${0} service host_2 services.cfg + Ctn Engine Config Delete Value In Hosts ${0} host_2 parents + + ${start} Get Current Date + Ctn Reload Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + ${content} Create List Reload configuration finished + ${result} Ctn Find In Log With Timeout + ... ${ENGINE_LOG}/config0/centengine.log + ... ${start} + ... ${content} + ... 60 + ... verbose=False + Should Be True ${result} Engine is Not Ready after 60s!! + + ${output} Ctn Get Host Info Grpc ${1} + Log To Console childs:${output}[childHosts] + Should Be Empty ${output}[childHosts] + + FOR ${index} IN RANGE 30 + ${output} Query + ... SELECT child_id, parent_id FROM hosts_hosts_parents + Log To Console ${output} + Sleep 1s + IF "${output}" == "()" BREAK + END + Should Be Equal As Strings ${output} () the parent link should be deleted + + Disconnect From Database + Ctn Stop Engine + Ctn Kindly Stop Broker \ No newline at end of file diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 3df8fddf7db..2def5e7ec3c 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -32,6 +32,7 @@ from array import array from dateutil import parser import datetime +from google.protobuf.json_format import MessageToDict from os import makedirs, chmod from os.path import exists, dirname from robot.api import logger @@ -3648,3 +3649,102 @@ def ctn_send_otl_to_engine(port: int, resource_metrics: list): logger.console("gRPC server not ready") +def ctn_engine_config_del_block_in_cfg(idx: int, type: str, key: str, file): + """ + Delete a element in the file given for the Engine configuration idx. + + Args: + idx (int): Index of the Engine configuration (from 0) + type (str): The type (host/service/...). + key (str): The parameter that will be deleted. + file (str): The file to delete the key from. + """ + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/{file}" + + with open(filename, "r") as f: + content = f.read() + + if type == "host": + pattern = rf"define host \{{\s*host_name\s+{re.escape(key)}\b.*?\}}" + elif type == "service": + pattern = rf"define service \{{\s*host_name\s+{re.escape(key)}\b.*?\}}" + + # Use re.sub to remove the matched block + new_content = re.sub(pattern, '', content, flags=re.DOTALL) + new_content = re.sub(r'\n\s*\n', '\n', new_content) + + if content != new_content: + with open(filename, "w") as f: + f.write(new_content) + else: + logger.console(f'\n\033[91mFailed : Cannot delete the block with the type : {type} and the key : {key} in {file}\033[0m') + +def ctn_get_host_info_grpc(id:int): + """ + Retrieve host information via a gRPC call. + + Args: + id: The identifier of the host to retrieve. + + Returns: + A dictionary containing the host informations, if successfully retrieved. + """ + if id is not None: + limit = time.time() + 30 + while time.time() < limit: + time.sleep(1) + with grpc.insecure_channel("127.0.0.1:50001") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + request = engine_pb2.HostIdentifier(id=id) + try: + host = stub.GetHost(request) + host_dict = MessageToDict(host, always_print_fields_with_no_presence=True) + return host_dict + except Exception as e: + logger.console(f"gRPC server not ready {e}") + return {} + +def ctn_engine_config_delete_value_in_hosts(idx: int, desc: str, key: str, file: str = 'hosts.cfg'): + """ + Delete a parameter in the hosts.cfg for the Engine configuration idx. + + Args: + idx (int): Index of the Engine configuration (from 0) + desc (str): host name of the host to modify. + key (str): the parameter that will be deleted. + file (str): The file to modify, default value 'hosts.cfg' + """ + + + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/{file}" + with open(filename, "r") as f: + lines = f.readlines() + + r = re.compile(r"^\s*host_name\s+" + desc + "\s*$") + rbis = re.compile(r"^\s*name\s+" + desc + "\s*$") + found = False + for i in range(len(lines)): + if r.match(lines[i]): + print("here" + lines[i]) + for j in range(i + 1, len(lines)): + if '}' in lines[j]: + break + if key in lines[j]: + del lines[j] + found = True + break + break + + if not found: + for i in range(len(lines)): + if rbis.match(lines[i]): + for j in range(i + 1, len(lines)): + if '}' in lines[j]: + break + if key in lines[j]: + del lines[j] + found = True + break + break + with open(filename, "w") as f: + f.writelines(lines) \ No newline at end of file From 88bf24ad08bb43ce3ba28969d95d1431411a9e23 Mon Sep 17 00:00:00 2001 From: Evan-Adam <152897682+Evan-Adam@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:58:24 +0100 Subject: [PATCH 08/12] fix(gorgone): reduce buffer_size and document it. Refs:MON-1515144 --- gorgone/docs/modules/core/proxy.md | 46 ++++++++++++++++----- gorgone/gorgone/modules/core/proxy/hooks.pm | 2 +- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/gorgone/docs/modules/core/proxy.md b/gorgone/docs/modules/core/proxy.md index 5891cee4fb3..a4947424222 100644 --- a/gorgone/docs/modules/core/proxy.md +++ b/gorgone/docs/modules/core/proxy.md @@ -12,13 +12,30 @@ A SSH client library make routing to non-gorgoned nodes possible. ## Configuration -| Directive | Description | Default value | -| :------------------- | :------------------------------------------------------------------ | :------------ | -| pool | Number of childs to instantiate to process events | `5` | -| synchistory_time | Time in seconds between two logs synchronisation | `60` | -| synchistory_timeout | Time in seconds before logs synchronisation is considered timed out | `30` | -| ping | Time in seconds between two node pings | `60` | -| pong_discard_timeout | Time in seconds before a node is considered dead | `300` | +| Directive | Description | Default value | +|:---------------------|:---------------------------------------------------------------------------------------------------------------------------------------|:--------------| +| pool | Number of children to instantiate to process events | `5` | +| synchistory_time | Time in seconds between two log synchronisations | `60` | +| synchistory_timeout | Time in seconds before log synchronisation is considered timed out | `30` | +| ping | Time in seconds between two node pings | `60` | +| pong_discard_timeout | Time in seconds before a ping is considered lost | `300` | +| buffer_size | Maximum size of the packet sent from a node to another. This is mainly used by legacycmd to send files from the central to the poller. | `150000` | + + +This part of the configuration is only used if some poller must connect with the pullwss module. + +| Directive | Description | Default value | +|:--------------|:-----------------------------------------------------------------------------------------------|:--------------| +| httpserver | Array containing all the configuration below for a pullwss connection | no value. | +| enable | Boolean if HTTP server should be enabled | `false` | +| ssl | Should connection be made over TLS/SSL or not | `false` | +| ssl_cert_file | Path to a SSL certificate file. required if ssl: true | | +| ssl_key_file | Path to a SSL key file associated to the certificate already configured. required if ssl: true | | +| passphrase | May be an optional passphrase for the SSL key. | | +| token | Allow to authenticate node. It is required to enable the HTTP server. | | +| address | Address to listen to. It can be 0.0.0.0 to listen on all IPv4 addresses. | | +| port | TCP port to listen to. | | + #### Example @@ -31,12 +48,19 @@ synchistory_time: 60 synchistory_timeout: 30 ping: 60 pong_discard_timeout: 300 +httpserver: # this is used only if you want to configure pullwss nodes. to make it work you have to add the register module and configure a configuration file for it. + enable: true + ssl: true + ssl_cert_file: /etc/centreon-gorgone/keys/public.pem + ssl_key_file: /etc/centreon-gorgone/keys/private.pem + token: secure_token + address: "0.0.0.0" ``` ## Events | Event | Description | -| :-------------- | :----------------------------------------------------------------------------- | +|:----------------|:-------------------------------------------------------------------------------| | PROXYREADY | Internal event to notify the core | | REMOTECOPY | Copy files or directories from the server running the daemon to another server | | SETLOGS | Internal event to insert logs into the database | @@ -53,20 +77,20 @@ pong_discard_timeout: 300 ### Copy files or directory to remote server | Endpoint | Method | -| :------------------------- | :----- | +|:---------------------------|:-------| | /api/core/proxy/remotecopy | `POST` | #### Headers | Header | Value | -| :----------- | :--------------- | +|:-------------|:-----------------| | Accept | application/json | | Content-Type | application/json | #### Body | Key | Value | -| :---------- | :------------------------------------------------ | +|:------------|:--------------------------------------------------| | source | Path of the source file or directory | | destination | Path of the destination file or directory | | cache_dir | Path to the cache directory for archiving purpose | diff --git a/gorgone/gorgone/modules/core/proxy/hooks.pm b/gorgone/gorgone/modules/core/proxy/hooks.pm index 1319abad40e..25b5c5dc2e4 100644 --- a/gorgone/gorgone/modules/core/proxy/hooks.pm +++ b/gorgone/gorgone/modules/core/proxy/hooks.pm @@ -1169,7 +1169,7 @@ sub prepare_remote_copy { sysopen(FH, $localsrc, O_RDONLY); binmode(FH); - my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 500_000; + my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 150_000; my $buffer; while (my $bytes = sysread(FH, $buffer, $buffer_size)) { my $action = JSON::XS->new->encode({ From eb82c6cdf2c09d4e026802f1800ef18275038a22 Mon Sep 17 00:00:00 2001 From: pkippes <144150042+pkippes@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:11:16 +0100 Subject: [PATCH 09/12] chore(release) bump collect to 24.04.8 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bd1b441cd9e..69f2ec1541f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -110,7 +110,7 @@ endif() # Version. set(COLLECT_MAJOR 24) set(COLLECT_MINOR 04) -set(COLLECT_PATCH 7) +set(COLLECT_PATCH 8) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") From 9991c04b5afdd2700e1c6aaeaa80668a19ab5821 Mon Sep 17 00:00:00 2001 From: pkippes <144150042+pkippes@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:11:30 +0100 Subject: [PATCH 10/12] chore(release) bump collect to 24.04.8 --- .version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.version b/.version index be9d7ead1f6..18adcbafc58 100644 --- a/.version +++ b/.version @@ -1,2 +1,2 @@ MAJOR=24.04 -MINOR=7 +MINOR=8 From e5a68e01c3e9d318f1b8c6e0a5f8bf9af9c17ef4 Mon Sep 17 00:00:00 2001 From: pkippes <144150042+pkippes@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:12:53 +0100 Subject: [PATCH 11/12] chore(release) bump collect to 24.04.5 --- gorgone/.version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gorgone/.version b/gorgone/.version index e03770729bf..1a3ba0012dc 100644 --- a/gorgone/.version +++ b/gorgone/.version @@ -1 +1 @@ -MINOR=4 +MINOR=5 From 08410b8cdd9356e132301b4047b3e6a582a36897 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Wed, 18 Dec 2024 12:09:30 +0100 Subject: [PATCH 12/12] enh(broker): AdaptiveHostStatus and AdaptiveServiceStatus added (#1869) (#1954) * enh(broker/grpc): Improvement of the grpc python generator script * enh(tests): new test on downtimes, to check duplicates in rrd * enh(broker): AdaptiveHostStatus and AdaptiveServiceStatus added * fix(engine): update_status() generalized with an argument * fix(broker): neb::callbacks works better with service_type and internal_id * enh(broker/lua): the lua stream must handle AdaptiveHostStatus and AdaptiveServiceStatus * enh(broker/unified_sql): work on adaptive status in unified_sql * cleanup(collect): headers removed when not needed * enh(cmake): cmake-vcpkg improved * fix(broker/bam): pb_adaptive_service_status are handled by bam now REFS: MON-151251 --- bbdo/bam.proto | 2 +- bbdo/bam/ba_status.hh | 2 - bbdo/bam/dimension_ba_bv_relation_event.hh | 2 - bbdo/bam/dimension_ba_event.hh | 2 - bbdo/bam/dimension_bv_event.hh | 2 - bbdo/bam/dimension_truncate_table_signal.hh | 2 - bbdo/bam/kpi_status.hh | 2 - bbdo/bam/rebuild.hh | 2 - bbdo/events.hh | 4 +- bbdo/neb.proto | 26 + .../com/centreon/broker/bam/bool_service.hh | 5 +- .../com/centreon/broker/bam/service_book.hh | 2 + .../centreon/broker/bam/service_listener.hh | 4 +- broker/bam/src/bool_service.cc | 27 + broker/bam/src/connector.cc | 26 +- broker/bam/src/monitoring_stream.cc | 20 +- broker/bam/src/reporting_stream.cc | 4 +- broker/bam/src/service_book.cc | 19 + broker/bam/src/service_listener.cc | 10 + .../inc/com/centreon/broker/config/parser.hh | 1 - .../inc/com/centreon/broker/config/state.hh | 1 - .../centreon/broker/sql/mysql_connection.hh | 1 - broker/grpc/generate_proto.py | 46 +- .../com/centreon/broker/lua/macro_cache.hh | 15 +- broker/lua/src/macro_cache.cc | 776 ++++++++++-------- broker/lua/test/lua.cc | 228 ++++- .../inc/com/centreon/broker/neb/comment.hh | 5 +- .../broker/neb/custom_variable_status.hh | 5 +- .../inc/com/centreon/broker/neb/downtime.hh | 5 +- .../neb/inc/com/centreon/broker/neb/host.hh | 36 +- .../inc/com/centreon/broker/neb/host_check.hh | 3 - .../broker/neb/instance_configuration.hh | 5 +- .../centreon/broker/neb/instance_status.hh | 3 +- .../inc/com/centreon/broker/neb/internal.hh | 7 + .../com/centreon/broker/neb/service_check.hh | 3 - .../com/centreon/broker/neb/service_status.hh | 3 +- .../com/centreon/broker/neb/set_log_data.hh | 1 - broker/neb/precomp_inc/precomp.hpp | 1 + broker/neb/src/broker.cc | 8 + broker/neb/src/callbacks.cc | 593 ++++++------- .../tls/inc/com/centreon/broker/tls/stream.hh | 1 - broker/tls/test/acceptor.cc | 5 +- .../com/centreon/broker/unified_sql/stream.hh | 4 +- broker/unified_sql/src/stream.cc | 4 +- broker/unified_sql/src/stream_sql.cc | 419 +++++++--- clib/inc/com/centreon/logging/temp_logger.hh | 1 - cmake-vcpkg.sh | 4 + engine/enginerpc/engine_impl.cc | 34 +- engine/inc/com/centreon/engine/broker.hh | 9 +- .../com/centreon/engine/commands/command.hh | 1 - .../centreon/engine/commands/processing.hh | 3 - engine/inc/com/centreon/engine/common.hh | 86 +- .../com/centreon/engine/downtimes/downtime.hh | 1 - engine/inc/com/centreon/engine/escalation.hh | 1 - engine/inc/com/centreon/engine/flapping.hh | 1 - engine/inc/com/centreon/engine/globals.hh | 41 +- engine/inc/com/centreon/engine/host.hh | 2 +- engine/inc/com/centreon/engine/macros.hh | 1 - .../com/centreon/engine/macros/grab_host.hh | 1 - .../centreon/engine/macros/grab_service.hh | 1 - engine/inc/com/centreon/engine/nebstructs.hh | 4 +- engine/inc/com/centreon/engine/notifier.hh | 20 +- engine/inc/com/centreon/engine/objects.hh | 2 - engine/inc/com/centreon/engine/sehandlers.hh | 2 - engine/inc/com/centreon/engine/service.hh | 3 +- engine/inc/com/centreon/engine/statusdata.hh | 1 - engine/inc/com/centreon/engine/utils.hh | 2 - engine/src/broker.cc | 10 +- engine/src/commands/commands.cc | 17 +- engine/src/downtimes/host_downtime.cc | 6 +- engine/src/downtimes/service_downtime.cc | 9 +- engine/src/events/loop.cc | 7 +- engine/src/host.cc | 17 +- engine/src/notifier.cc | 7 +- engine/src/service.cc | 16 +- tests/README.md | 644 ++++++++------- tests/bam/inherited_downtime.robot | 36 +- tests/bam/pb_inherited_downtime.robot | 64 +- tests/broker-engine/acknowledgement.robot | 14 +- tests/broker-engine/downtimes.robot | 65 ++ tests/broker-engine/notifications.robot | 1 - tests/resources/Broker.py | 2 +- tests/resources/Common.py | 25 +- tests/resources/resources.resource | 12 +- tests/update-doc.py | 28 +- 85 files changed, 2142 insertions(+), 1401 deletions(-) diff --git a/bbdo/bam.proto b/bbdo/bam.proto index 50e748158bf..2af2a3d0a90 100644 --- a/bbdo/bam.proto +++ b/bbdo/bam.proto @@ -53,7 +53,7 @@ message BaStatus { message BaEvent { uint32 ba_id = 1; double first_level = 2; - int64 end_time = 3; + uint64 end_time = 3; bool in_downtime = 4; uint64 start_time = 5; State status = 6; diff --git a/bbdo/bam/ba_status.hh b/bbdo/bam/ba_status.hh index 209ee02258c..a946473bfd1 100644 --- a/bbdo/bam/ba_status.hh +++ b/bbdo/bam/ba_status.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_BA_STATUS_HH #define CCB_BAM_BA_STATUS_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/dimension_ba_bv_relation_event.hh b/bbdo/bam/dimension_ba_bv_relation_event.hh index 1b79c87d2f1..c512bed7c3d 100644 --- a/bbdo/bam/dimension_ba_bv_relation_event.hh +++ b/bbdo/bam/dimension_ba_bv_relation_event.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_DIMENSION_BA_BV_RELATION_EVENT_HH #define CCB_BAM_DIMENSION_BA_BV_RELATION_EVENT_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/dimension_ba_event.hh b/bbdo/bam/dimension_ba_event.hh index ac1ea83057a..646a95b69b4 100644 --- a/bbdo/bam/dimension_ba_event.hh +++ b/bbdo/bam/dimension_ba_event.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_DIMENSION_BA_EVENT_HH #define CCB_BAM_DIMENSION_BA_EVENT_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/dimension_bv_event.hh b/bbdo/bam/dimension_bv_event.hh index 3ee958b42e9..2a912d0c4fe 100644 --- a/bbdo/bam/dimension_bv_event.hh +++ b/bbdo/bam/dimension_bv_event.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_DIMENSION_BV_EVENT_HH #define CCB_BAM_DIMENSION_BV_EVENT_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/dimension_truncate_table_signal.hh b/bbdo/bam/dimension_truncate_table_signal.hh index adf2107af4c..e8e50491987 100644 --- a/bbdo/bam/dimension_truncate_table_signal.hh +++ b/bbdo/bam/dimension_truncate_table_signal.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_DIMENSION_TRUNCATE_TABLE_SIGNAL_HH #define CCB_BAM_DIMENSION_TRUNCATE_TABLE_SIGNAL_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/kpi_status.hh b/bbdo/bam/kpi_status.hh index 77b2c225aa0..1de9adedc74 100644 --- a/bbdo/bam/kpi_status.hh +++ b/bbdo/bam/kpi_status.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_KPI_STATUS_HH #define CCB_BAM_KPI_STATUS_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/timestamp.hh" diff --git a/bbdo/bam/rebuild.hh b/bbdo/bam/rebuild.hh index b965bcf3ec7..09e3a6e5f09 100644 --- a/bbdo/bam/rebuild.hh +++ b/bbdo/bam/rebuild.hh @@ -19,9 +19,7 @@ #ifndef CCB_BAM_REBUILD_HH #define CCB_BAM_REBUILD_HH -#include "bbdo/events.hh" #include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" diff --git a/bbdo/events.hh b/bbdo/events.hh index 5e392018328..9585bd26055 100644 --- a/bbdo/events.hh +++ b/bbdo/events.hh @@ -152,7 +152,9 @@ enum data_element { de_pb_service_group = 51, de_pb_service_group_member = 52, de_pb_host_parent = 53, - de_pb_instance_configuration = 54 + de_pb_instance_configuration = 54, + de_pb_adaptive_service_status = 55, + de_pb_adaptive_host_status = 56, }; } // namespace neb namespace storage { diff --git a/bbdo/neb.proto b/bbdo/neb.proto index ab06297195c..4528d140fa4 100644 --- a/bbdo/neb.proto +++ b/bbdo/neb.proto @@ -156,6 +156,21 @@ message Service { uint64 icon_id = 87; } +/** + * @brief Message sent in BBDO 3.0.0 to update a service status partially + * changed. For example, it is convenient for downtime changed. + */ +/* io::neb, neb::de_pb_adaptive_service_status, 53 */ +message AdaptiveServiceStatus { + uint64 host_id = 1; + uint64 service_id = 2; + ServiceType type = 3; + uint64 internal_id = 4; + optional int32 scheduled_downtime_depth = 5; + optional AckType acknowledgement_type = 6; + optional int32 notification_number = 7; +} + /** * @brief Message sent in BBDO 3.0.0 instead of neb::service_status */ @@ -406,6 +421,17 @@ message HostStatus { int32 scheduled_downtime_depth = 28; } +/** + * @brief Message sent in BBDO 3.0.0 to update a host status partially + * changed. For example, it is convenient for downtime changed. + */ +/* io::neb, neb::de_pb_adaptive_host_status, 55 */ +message AdaptiveHostStatus { + uint64 host_id = 1; + optional int32 scheduled_downtime_depth = 2; + optional AckType acknowledgement_type = 3; + optional int32 notification_number = 4; +} /** * @brief Message used to send adaptive host configuration. When only one * or two configuration items change, this event is used. diff --git a/broker/bam/inc/com/centreon/broker/bam/bool_service.hh b/broker/bam/inc/com/centreon/broker/bam/bool_service.hh index 03d3c13053b..b39e936131e 100644 --- a/broker/bam/inc/com/centreon/broker/bam/bool_service.hh +++ b/broker/bam/inc/com/centreon/broker/bam/bool_service.hh @@ -21,8 +21,6 @@ #include "com/centreon/broker/bam/bool_value.hh" #include "com/centreon/broker/bam/service_listener.hh" -#include "com/centreon/broker/io/stream.hh" -#include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker::bam { /** @@ -56,6 +54,9 @@ class bool_service : public bool_value, public service_listener { io::stream* visitor = nullptr) override; void service_update(const std::shared_ptr& status, io::stream* visitor = nullptr) override; + void service_update( + const std::shared_ptr& status, + io::stream* visitor = nullptr) override; void service_update(const std::shared_ptr& status, io::stream* visitor = nullptr) override; double value_hard() const override; diff --git a/broker/bam/inc/com/centreon/broker/bam/service_book.hh b/broker/bam/inc/com/centreon/broker/bam/service_book.hh index 5e9b7dfecb3..469698b5bac 100644 --- a/broker/bam/inc/com/centreon/broker/bam/service_book.hh +++ b/broker/bam/inc/com/centreon/broker/bam/service_book.hh @@ -78,6 +78,8 @@ class service_book { io::stream* visitor = nullptr); void update(const std::shared_ptr& t, io::stream* visitor = nullptr); + void update(const std::shared_ptr& t, + io::stream* visitor = nullptr); void save_to_cache(persistent_cache& cache) const; void apply_services_state(const ServicesBookState& state); }; diff --git a/broker/bam/inc/com/centreon/broker/bam/service_listener.hh b/broker/bam/inc/com/centreon/broker/bam/service_listener.hh index 5534f23ef54..449aa22050d 100644 --- a/broker/bam/inc/com/centreon/broker/bam/service_listener.hh +++ b/broker/bam/inc/com/centreon/broker/bam/service_listener.hh @@ -19,7 +19,6 @@ #ifndef CCB_BAM_SERVICE_LISTENER_HH #define CCB_BAM_SERVICE_LISTENER_HH -#include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker { @@ -51,6 +50,9 @@ class service_listener { virtual void service_update(const service_state& s); virtual void service_update(std::shared_ptr const& status, io::stream* visitor = nullptr); + virtual void service_update( + std::shared_ptr const& status, + io::stream* visitor = nullptr); virtual void service_update( std::shared_ptr const& status, io::stream* visitor = nullptr); diff --git a/broker/bam/src/bool_service.cc b/broker/bam/src/bool_service.cc index 13f4bae0c72..1a06d86acdd 100644 --- a/broker/bam/src/bool_service.cc +++ b/broker/bam/src/bool_service.cc @@ -167,6 +167,33 @@ void bool_service::service_update( } } +/** + * @brief Notify of a service status update (usually used for downtimes). + * + * @param status The adaptive status of the service. + * @param visitor The visitor to handle events. + */ +void bool_service::service_update( + const std::shared_ptr& status, + io::stream* visitor) { + auto& o = status->obj(); + if (o.has_scheduled_downtime_depth()) { + SPDLOG_LOGGER_TRACE(_logger, + "bool_service: service ({},{}) updated with " + "neb::pb_adaptive_service_status downtime: {}", + o.host_id(), o.service_id(), + o.scheduled_downtime_depth()); + if (o.host_id() == _host_id && o.service_id() == _service_id) { + bool new_in_downtime = o.scheduled_downtime_depth() > 0; + if (_in_downtime != new_in_downtime) { + _in_downtime = new_in_downtime; + _logger->trace("bool_service: updated with downtime: {}", _in_downtime); + notify_parents_of_change(visitor); + } + } + } +} + /** * Get the hard value. * diff --git a/broker/bam/src/connector.cc b/broker/bam/src/connector.cc index 0819f341921..24165c0cd07 100644 --- a/broker/bam/src/connector.cc +++ b/broker/bam/src/connector.cc @@ -35,20 +35,28 @@ #include "com/centreon/broker/neb/acknowledgement.hh" #include "com/centreon/broker/neb/downtime.hh" #include "com/centreon/broker/neb/service.hh" -#include "com/centreon/broker/neb/service_status.hh" using namespace com::centreon::broker; using namespace com::centreon::broker::bam; static constexpr multiplexing::muxer_filter _monitoring_stream_filter = { - neb::service_status::static_type(), neb::pb_service_status::static_type(), - neb::service::static_type(), neb::pb_service::static_type(), - neb::acknowledgement::static_type(), neb::pb_acknowledgement::static_type(), - neb::downtime::static_type(), neb::pb_downtime::static_type(), - bam::ba_status::static_type(), bam::pb_ba_status::static_type(), - bam::kpi_status::static_type(), bam::pb_kpi_status::static_type(), - inherited_downtime::static_type(), pb_inherited_downtime::static_type(), - extcmd::pb_ba_info::static_type(), pb_services_book_state::static_type()}; + neb::service_status::static_type(), + neb::pb_service_status::static_type(), + neb::service::static_type(), + neb::pb_service::static_type(), + neb::acknowledgement::static_type(), + neb::pb_acknowledgement::static_type(), + neb::downtime::static_type(), + neb::pb_downtime::static_type(), + neb::pb_adaptive_service_status::static_type(), + bam::ba_status::static_type(), + bam::pb_ba_status::static_type(), + bam::kpi_status::static_type(), + bam::pb_kpi_status::static_type(), + inherited_downtime::static_type(), + pb_inherited_downtime::static_type(), + extcmd::pb_ba_info::static_type(), + pb_services_book_state::static_type()}; static constexpr multiplexing::muxer_filter _monitoring_forbidden_filter = multiplexing::muxer_filter(_monitoring_stream_filter).reverse(); diff --git a/broker/bam/src/monitoring_stream.cc b/broker/bam/src/monitoring_stream.cc index 4d83c740251..1b7d8b0cf53 100644 --- a/broker/bam/src/monitoring_stream.cc +++ b/broker/bam/src/monitoring_stream.cc @@ -23,23 +23,15 @@ #include "bbdo/bam/ba_status.hh" #include "bbdo/bam/kpi_status.hh" #include "bbdo/bam/rebuild.hh" -#include "bbdo/events.hh" #include "com/centreon/broker/bam/configuration/reader_v2.hh" -#include "com/centreon/broker/bam/configuration/state.hh" #include "com/centreon/broker/bam/event_cache_visitor.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/exceptions/shutdown.hh" -#include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/misc/fifo_client.hh" -#include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/acknowledgement.hh" #include "com/centreon/broker/neb/downtime.hh" -#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service.hh" -#include "com/centreon/broker/neb/service_status.hh" -#include "com/centreon/broker/timestamp.hh" #include "com/centreon/common/pool.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; @@ -410,6 +402,18 @@ int monitoring_stream::write(const std::shared_ptr& data) { _applier.book_service().update(ss, &ev_cache); ev_cache.commit_to(pblshr); } break; + case neb::pb_adaptive_service_status::static_type(): { + auto ss = std::static_pointer_cast(data); + auto& o = ss->obj(); + SPDLOG_LOGGER_TRACE(_logger, + "BAM: processing pb adaptive service status (host: " + "{}, service: {})", + o.host_id(), o.service_id()); + multiplexing::publisher pblshr; + event_cache_visitor ev_cache; + _applier.book_service().update(ss, &ev_cache); + ev_cache.commit_to(pblshr); + } break; case neb::pb_service::static_type(): { auto s = std::static_pointer_cast(data); auto& o = s->obj(); diff --git a/broker/bam/src/reporting_stream.cc b/broker/bam/src/reporting_stream.cc index e159484af9e..7f478546b80 100644 --- a/broker/bam/src/reporting_stream.cc +++ b/broker/bam/src/reporting_stream.cc @@ -1113,7 +1113,7 @@ void reporting_stream::_process_pb_ba_event( id_start ba_key = std::make_pair(be.ba_id(), be.start_time()); // event exists? if (_ba_event_cache.find(ba_key) != _ba_event_cache.end()) { - if (be.end_time() <= 0) + if (static_cast(be.end_time()) <= 0) _ba_event_update.bind_null_u64(0); else _ba_event_update.bind_value_as_u64(0, be.end_time()); @@ -1135,7 +1135,7 @@ void reporting_stream::_process_pb_ba_event( _ba_full_event_insert.bind_value_as_i32(1, be.first_level()); _ba_full_event_insert.bind_value_as_u64(2, be.start_time()); - if (be.end_time() <= 0) + if (static_cast(be.end_time()) <= 0) _ba_full_event_insert.bind_null_i64(3); else _ba_full_event_insert.bind_value_as_i64(3, be.end_time()); diff --git a/broker/bam/src/service_book.cc b/broker/bam/src/service_book.cc index f80f5c927a4..c737fafd9d4 100644 --- a/broker/bam/src/service_book.cc +++ b/broker/bam/src/service_book.cc @@ -160,6 +160,25 @@ void service_book::update(const std::shared_ptr& t, l->service_update(t, visitor); } +/** + * @brief Propagate events of type neb::service_status to the concerned services + * and then to the corresponding kpi. + * + * @param t The event to handle. + * @param visitor The stream to write into. + */ +void service_book::update( + const std::shared_ptr& t, + io::stream* visitor) { + auto obj = t->obj(); + auto found = _book.find(std::make_pair(obj.host_id(), obj.service_id())); + if (found == _book.end()) + return; + + for (auto l : found->second.listeners) + l->service_update(t, visitor); +} + /** * @brief Propagate events of type pb_service to the * concerned services and then to the corresponding kpi. diff --git a/broker/bam/src/service_listener.cc b/broker/bam/src/service_listener.cc index 61f60b9564f..89dd1407d5a 100644 --- a/broker/bam/src/service_listener.cc +++ b/broker/bam/src/service_listener.cc @@ -59,6 +59,16 @@ void service_listener::service_update( const std::shared_ptr& status [[maybe_unused]], io::stream* visitor [[maybe_unused]]) {} +/** + * @brief Notify of a service status update (usually used for downtimes). + * + * @param [[maybe_unused]] + * @param [[maybe_unused]] + */ +void service_listener::service_update( + const std::shared_ptr& status + [[maybe_unused]], + io::stream* visitor [[maybe_unused]]) {} /** * Notify of a protobuf acknowledgement. * diff --git a/broker/core/inc/com/centreon/broker/config/parser.hh b/broker/core/inc/com/centreon/broker/config/parser.hh index e6bbb93bd2a..c40f2f4eb03 100644 --- a/broker/core/inc/com/centreon/broker/config/parser.hh +++ b/broker/core/inc/com/centreon/broker/config/parser.hh @@ -21,7 +21,6 @@ #include -#include #include "com/centreon/broker/config/state.hh" #include "com/centreon/exceptions/msg_fmt.hh" diff --git a/broker/core/inc/com/centreon/broker/config/state.hh b/broker/core/inc/com/centreon/broker/config/state.hh index 2b724d2459e..9165f4229e6 100644 --- a/broker/core/inc/com/centreon/broker/config/state.hh +++ b/broker/core/inc/com/centreon/broker/config/state.hh @@ -19,7 +19,6 @@ #ifndef CCB_CONFIG_STATE_HH #define CCB_CONFIG_STATE_HH -#include #include "bbdo/bbdo/bbdo_version.hh" #include "com/centreon/broker/config/endpoint.hh" diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh index efe6140ab21..6dd4b5147c9 100644 --- a/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh +++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh @@ -19,7 +19,6 @@ #ifndef CCB_MYSQL_CONNECTION_HH #define CCB_MYSQL_CONNECTION_HH -#include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/sql/database_config.hh" #include "com/centreon/broker/sql/mysql_bulk_stmt.hh" #include "com/centreon/broker/sql/mysql_error.hh" diff --git a/broker/grpc/generate_proto.py b/broker/grpc/generate_proto.py index c544b07605f..4c158efcf60 100755 --- a/broker/grpc/generate_proto.py +++ b/broker/grpc/generate_proto.py @@ -195,28 +195,46 @@ class received_protobuf : public io::protobuf { for line in proto_file.readlines(): line_counter += 1 m = re.match(message_parser, line) - if m is not None and io_protobuf_match is not None: - messages.append([m.group(1), io_protobuf_match.group(1), io_protobuf_match.group(2)]) + if m and io_protobuf_match: + # Check that the message and the io_protobuf_match are coherent + # Let's take the message name and remove the de_pb_ prefix if it exists + message_name = io_protobuf_match.group(1).split(',')[ + 1].split('::')[1] + message_name = message_name[3:] if message_name.startswith( + 'de_') else message_name + message_name = message_name[3:] if message_name.startswith( + 'pb_') else message_name + # Let's change the name into SnakeCase + message_name = ''.join(word.title() + for word in message_name.split('_')) + if m.group(1) != message_name: + print( + f"generate_proto.py : Error: Message {{ {m.group(1)} }} does not match the io_protobuf_match {{ {io_protobuf_match[1]} }} : file :{file}:{line_counter}", file=sys.stderr) + exit(2) + messages.append( + [m.group(1), io_protobuf_match.group(1), io_protobuf_match.group(2)]) io_protobuf_match = None flag_ignore = True else: io_protobuf_match = re.match(io_protobuf_parser, line) - #check if no bbo message have the comment: Ignore + # check if no bbo message have the comment: Ignore if ignore_message in line: flag_ignore = True - #check if message have comment ignore or it's bbo message - if flag_ignore and m is not None: + # check if message has comment ignore or it's bbdo message + if flag_ignore and m: flag_ignore = False - elif not flag_ignore and m is not None : - print (f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}",file=sys.stderr) - print (f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/",file=sys.stderr) + elif not flag_ignore and m: + print( + f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}", file=sys.stderr) + print( + f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/", file=sys.stderr) exit(1) - + if len(messages) > 0: - file_begin_content += f"import \"{file}\";\n" - message_save += messages -#sort the message with index (io_protobuf_match.group(2)) + file_begin_content += f"import \"{file}\";\n" + message_save += messages +# sort the message with index (io_protobuf_match.group(2)) message_save.sort(key=lambda x: int(x[2])) for mess, id, index in message_save: # proto file @@ -240,8 +258,8 @@ class received_protobuf : public io::protobuf { """ -#The following message is not in bbdo protobuff files so we need to add manually. - +# The following message is not in bbdo protobuff files so we need to add manually. + file_message_centreon_event += f" opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest ExportMetricsServiceRequest_ = {one_of_index};\n" cc_file_protobuf_to_event_function += """ diff --git a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh index d9f04f48fe2..1b3f64ba22c 100644 --- a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh +++ b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh @@ -27,7 +27,6 @@ #include "com/centreon/broker/neb/host_group.hh" #include "com/centreon/broker/neb/host_group_member.hh" #include "com/centreon/broker/neb/instance.hh" -#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service.hh" #include "com/centreon/broker/neb/service_group.hh" #include "com/centreon/broker/neb/service_group_member.hh" @@ -42,7 +41,7 @@ namespace com::centreon::broker::lua { class macro_cache { std::shared_ptr _cache; absl::flat_hash_map> _instances; - absl::flat_hash_map> _hosts; + absl::flat_hash_map> _hosts; /* The host groups cache stores also a set with the pollers telling they need * the cache. So if no more poller needs a host group, we can remove it from * the cache. */ @@ -54,7 +53,8 @@ class macro_cache { _host_group_members; absl::flat_hash_map, std::shared_ptr> _custom_vars; - absl::flat_hash_map, std::shared_ptr> + absl::flat_hash_map, + std::shared_ptr> _services; /* The service groups cache stores also a set with the pollers telling they * need the cache. So if no more poller needs a service group, we can remove @@ -89,8 +89,8 @@ class macro_cache { const storage::pb_index_mapping& get_index_mapping(uint64_t index_id) const; const std::shared_ptr& get_metric_mapping( uint64_t metric_id) const; - const std::shared_ptr& get_host(uint64_t host_id) const; - const std::shared_ptr& get_service(uint64_t host_id, + const std::shared_ptr& get_host(uint64_t host_id) const; + const std::shared_ptr& get_service(uint64_t host_id, uint64_t service_id) const; const std::string& get_host_name(uint64_t host_id) const; const std::string& get_notes_url(uint64_t host_id, uint64_t service_id) const; @@ -129,6 +129,7 @@ class macro_cache { void _process_host(std::shared_ptr const& data); void _process_pb_host(std::shared_ptr const& data); void _process_pb_host_status(std::shared_ptr const& data); + void _process_pb_adaptive_host_status(const std::shared_ptr& data); void _process_pb_adaptive_host(std::shared_ptr const& data); void _process_host_group(std::shared_ptr const& data); void _process_pb_host_group(std::shared_ptr const& data); @@ -138,7 +139,9 @@ class macro_cache { void _process_pb_custom_variable(std::shared_ptr const& data); void _process_service(std::shared_ptr const& data); void _process_pb_service(std::shared_ptr const& data); - void _process_pb_service_status(std::shared_ptr const& data); + void _process_pb_service_status(const std::shared_ptr& data); + void _process_pb_adaptive_service_status( + const std::shared_ptr& data); void _process_pb_adaptive_service(std::shared_ptr const& data); void _process_service_group(std::shared_ptr const& data); void _process_pb_service_group(std::shared_ptr const& data); diff --git a/broker/lua/src/macro_cache.cc b/broker/lua/src/macro_cache.cc index 1c92d24cb31..139f7c8c37c 100644 --- a/broker/lua/src/macro_cache.cc +++ b/broker/lua/src/macro_cache.cc @@ -18,14 +18,12 @@ #include "com/centreon/broker/lua/macro_cache.hh" #include -#include +#include #include "bbdo/bam/dimension_ba_bv_relation_event.hh" #include "bbdo/bam/dimension_ba_event.hh" #include "bbdo/bam/dimension_bv_event.hh" #include "bbdo/storage/index_mapping.hh" #include "bbdo/storage/metric_mapping.hh" -#include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::exceptions; @@ -101,7 +99,7 @@ macro_cache::get_metric_mapping(uint64_t metric_id) const { * * @return A shared pointer on the service. */ -const std::shared_ptr& macro_cache::get_service( +const std::shared_ptr& macro_cache::get_service( uint64_t host_id, uint64_t service_id) const { auto found = _services.find({host_id, service_id}); @@ -119,7 +117,8 @@ const std::shared_ptr& macro_cache::get_service( * * @return A shared pointer on the host. */ -const std::shared_ptr& macro_cache::get_host(uint64_t host_id) const { +const std::shared_ptr& macro_cache::get_host( + uint64_t host_id) const { auto found = _hosts.find(host_id); if (found == _hosts.end()) @@ -141,13 +140,7 @@ std::string const& macro_cache::get_host_name(uint64_t host_id) const { if (found == _hosts.end()) throw msg_fmt("lua: could not find information on host {}", host_id); - if (found->second->type() == neb::host::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->host_name; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().name(); - } + return found->second->obj().name(); } /** @@ -210,13 +203,8 @@ std::string_view macro_cache::get_check_command(uint64_t host_id, "lua: could not find the check command of the service (host_id: {}, " "service_id: {})", host_id, service_id); - if (found->second->type() == neb::service::static_type()) { - neb::service& s = static_cast(*found->second); - retval = s.check_command; - } else { - neb::pb_service& s = static_cast(*found->second); - retval = s.obj().check_command(); - } + neb::pb_service& s = static_cast(*found->second); + retval = s.obj().check_command(); } /* Case of hosts */ else { @@ -225,13 +213,8 @@ std::string_view macro_cache::get_check_command(uint64_t host_id, throw msg_fmt( "lua: could not find the check command of the host (host_id: {})", host_id); - if (found->second->type() == neb::host::static_type()) { - neb::host& s = static_cast(*found->second); - retval = s.check_command; - } else { - neb::pb_host& s = static_cast(*found->second); - retval = s.obj().check_command(); - } + neb::pb_host& s = static_cast(*found->second); + retval = s.obj().check_command(); } return retval; } @@ -252,26 +235,14 @@ std::string const& macro_cache::get_notes_url(uint64_t host_id, if (found == _services.end()) throw msg_fmt("lua: could not find information on service ({}, {})", host_id, service_id); - if (found->second->type() == neb::service::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->notes_url; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().notes_url(); - } + return found->second->obj().notes_url(); } else { auto found = _hosts.find(host_id); if (found == _hosts.end()) throw msg_fmt("lua: could not find information on host {}", host_id); - if (found->second->type() == neb::host::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->notes_url; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().notes_url(); - } + return found->second->obj().notes_url(); } } @@ -291,26 +262,14 @@ std::string const& macro_cache::get_action_url(uint64_t host_id, if (found == _services.end()) throw msg_fmt("lua: could not find information on service ({}, {})", host_id, service_id); - if (found->second->type() == neb::service::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->action_url; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().action_url(); - } + return found->second->obj().action_url(); } else { auto found = _hosts.find(host_id); if (found == _hosts.end()) throw msg_fmt("lua: could not find information on host {}", host_id); - if (found->second->type() == neb::host::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->action_url; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().action_url(); - } + return found->second->obj().action_url(); } } @@ -330,26 +289,13 @@ std::string const& macro_cache::get_notes(uint64_t host_id, if (found == _services.end()) throw msg_fmt("lua: cound not find information on service ({}, {})", host_id, service_id); - if (found->second->type() == neb::service::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->notes; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().notes(); - } + return found->second->obj().notes(); } else { auto found = _hosts.find(host_id); if (found == _hosts.end()) throw msg_fmt("lua: could not find information on host {}", host_id); - - if (found->second->type() == neb::host::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->notes; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().notes(); - } + return found->second->obj().notes(); } } @@ -396,13 +342,7 @@ std::string const& macro_cache::get_service_description( if (found == _services.end()) throw msg_fmt("lua: could not find information on service ({}, {})", host_id, service_id); - if (found->second->type() == neb::service::static_type()) { - auto const& s = std::static_pointer_cast(found->second); - return s->service_description; - } else { - auto const& s = std::static_pointer_cast(found->second); - return s->obj().description(); - } + return found->second->obj().description(); } /** @@ -529,6 +469,9 @@ void macro_cache::write(std::shared_ptr const& data) { case neb::pb_adaptive_host::static_type(): _process_pb_adaptive_host(data); break; + case neb::pb_adaptive_host_status::static_type(): + _process_pb_adaptive_host_status(data); + break; case neb::host_group::static_type(): _process_host_group(data); break; @@ -550,6 +493,9 @@ void macro_cache::write(std::shared_ptr const& data) { case neb::pb_service_status::static_type(): _process_pb_service_status(data); break; + case neb::pb_adaptive_service_status::static_type(): + _process_pb_adaptive_service_status(data); + break; case neb::pb_adaptive_service::static_type(): _process_pb_adaptive_service(data); break; @@ -632,14 +578,105 @@ void macro_cache::_process_pb_instance(std::shared_ptr const& data) { * * @param h The event. */ -void macro_cache::_process_host(std::shared_ptr const& data) { - std::shared_ptr const& h = +void macro_cache::_process_host(const std::shared_ptr& data) { + const std::shared_ptr& h = std::static_pointer_cast(data); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host '{}' of id {}", h->host_name, h->host_id); - if (h->enabled) - _hosts[h->host_id] = data; - else + if (h->enabled) { + auto found = _hosts.find(h->host_id); + if (found == _hosts.end()) { + auto new_host = std::make_shared(); + _hosts[h->host_id] = new_host; + found = _hosts.find(h->host_id); + } + Host& current_host = + std::static_pointer_cast(found->second)->mut_obj(); + current_host.set_host_id(h->host_id); + current_host.set_acknowledged(h->acknowledged); + current_host.set_acknowledgement_type( + static_cast(h->acknowledgement_type)); + current_host.set_active_checks(h->active_checks_enabled); + current_host.set_enabled(h->enabled); + current_host.set_scheduled_downtime_depth(h->downtime_depth); + current_host.set_check_command(h->check_command); + current_host.set_check_interval(h->check_interval); + current_host.set_check_period(h->check_period); + current_host.set_check_type(static_cast(h->check_type)); + current_host.set_check_attempt(h->current_check_attempt); + current_host.set_state(static_cast(h->current_state)); + current_host.set_event_handler_enabled(h->event_handler_enabled); + current_host.set_event_handler(h->event_handler); + current_host.set_execution_time(h->execution_time); + current_host.set_flap_detection(h->default_flap_detection_enabled); + current_host.set_checked(h->has_been_checked); + current_host.set_flapping(h->is_flapping); + current_host.set_last_check(h->last_check); + current_host.set_last_hard_state( + static_cast(h->last_hard_state)); + current_host.set_last_hard_state_change(h->last_hard_state_change); + current_host.set_last_notification(h->last_notification); + current_host.set_notification_number(h->notification_number); + current_host.set_last_state_change(h->last_state_change); + current_host.set_last_time_down(h->last_time_down); + current_host.set_last_time_unreachable(h->last_time_unreachable); + current_host.set_last_time_up(h->last_time_up); + current_host.set_last_update(h->last_update); + current_host.set_latency(h->latency); + current_host.set_max_check_attempts(h->max_check_attempts); + current_host.set_next_check(h->next_check); + current_host.set_next_host_notification(h->next_notification); + current_host.set_no_more_notifications(h->no_more_notifications); + current_host.set_notify(h->notifications_enabled); + current_host.set_output(h->output); + current_host.set_passive_checks(h->passive_checks_enabled); + current_host.set_percent_state_change(h->percent_state_change); + current_host.set_perfdata(h->perf_data); + current_host.set_retry_interval(h->retry_interval); + current_host.set_should_be_scheduled(h->should_be_scheduled); + current_host.set_obsess_over_host(h->obsess_over); + current_host.set_state_type(static_cast(h->state_type)); + current_host.set_action_url(h->action_url); + current_host.set_address(h->address); + current_host.set_alias(h->alias); + current_host.set_check_freshness(h->check_freshness); + current_host.set_default_active_checks(h->default_active_checks_enabled); + current_host.set_default_event_handler_enabled( + h->default_event_handler_enabled); + current_host.set_default_flap_detection(h->default_flap_detection_enabled); + current_host.set_default_notify(h->default_notifications_enabled); + current_host.set_default_passive_checks(h->default_passive_checks_enabled); + current_host.set_display_name(h->display_name); + current_host.set_first_notification_delay(h->first_notification_delay); + current_host.set_flap_detection_on_down(h->flap_detection_on_down); + current_host.set_flap_detection_on_unreachable( + h->flap_detection_on_unreachable); + current_host.set_flap_detection_on_up(h->flap_detection_on_up); + current_host.set_freshness_threshold(h->freshness_threshold); + current_host.set_high_flap_threshold(h->high_flap_threshold); + current_host.set_low_flap_threshold(h->low_flap_threshold); + current_host.set_name(h->host_name); + current_host.set_icon_image(h->icon_image); + current_host.set_icon_image_alt(h->icon_image_alt); + current_host.set_instance_id(h->poller_id); + current_host.set_notes(h->notes); + current_host.set_notes_url(h->notes_url); + current_host.set_notification_interval(h->notification_interval); + current_host.set_notification_period(h->notification_period); + current_host.set_notify_on_down(h->notify_on_down); + current_host.set_notify_on_downtime(h->notify_on_downtime); + current_host.set_notify_on_flapping(h->notify_on_flapping); + current_host.set_notify_on_recovery(h->notify_on_recovery); + current_host.set_notify_on_unreachable(h->notify_on_unreachable); + current_host.set_stalk_on_down(h->stalk_on_down); + current_host.set_stalk_on_unreachable(h->stalk_on_unreachable); + current_host.set_stalk_on_up(h->stalk_on_up); + current_host.set_statusmap_image(h->statusmap_image); + current_host.set_retain_nonstatus_information( + h->retain_nonstatus_information); + current_host.set_retain_status_information(h->retain_status_information); + current_host.set_timezone(h->timezone); + } else _hosts.erase(h->host_id); } @@ -649,12 +686,11 @@ void macro_cache::_process_host(std::shared_ptr const& data) { * @param h The event. */ void macro_cache::_process_pb_host(std::shared_ptr const& data) { - std::shared_ptr const& h = - std::static_pointer_cast(data); + const auto& h = std::static_pointer_cast(data); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host '{}' of id {}", h->obj().name(), h->obj().host_id()); if (h->obj().enabled()) - _hosts[h->obj().host_id()] = data; + _hosts[h->obj().host_id()] = h; else _hosts.erase(h->obj().host_id()); } @@ -676,67 +712,67 @@ void macro_cache::_process_pb_host_status( return; } - if (it->second->type() == make_type(io::neb, neb::de_host)) { - auto& hst = *std::static_pointer_cast(it->second); - hst.has_been_checked = obj.checked(); - hst.check_type = obj.check_type(); - hst.current_state = obj.state(); - hst.state_type = obj.state_type(); - hst.last_state_change = obj.last_state_change(); - hst.last_hard_state = obj.last_hard_state(); - hst.last_hard_state_change = obj.last_hard_state_change(); - hst.last_time_up = obj.last_time_up(); - hst.last_time_down = obj.last_time_down(); - hst.last_time_unreachable = obj.last_time_unreachable(); - hst.output = obj.output(); - hst.perf_data = obj.perfdata(); - hst.is_flapping = obj.flapping(); - hst.percent_state_change = obj.percent_state_change(); - hst.latency = obj.latency(); - hst.execution_time = obj.execution_time(); - hst.last_check = obj.last_check(); - hst.next_check = obj.next_check(); - hst.should_be_scheduled = obj.should_be_scheduled(); - hst.current_check_attempt = obj.check_attempt(); - hst.notification_number = obj.notification_number(); - hst.no_more_notifications = obj.no_more_notifications(); - hst.last_notification = obj.last_notification(); - hst.next_notification = obj.next_host_notification(); - hst.acknowledgement_type = obj.acknowledgement_type(); - hst.downtime_depth = obj.scheduled_downtime_depth(); - } else if (it->second->type() == make_type(io::neb, neb::de_pb_host)) { - auto& hst = std::static_pointer_cast(it->second)->mut_obj(); - hst.set_checked(obj.checked()); - hst.set_check_type(static_cast(obj.check_type())); - hst.set_state(static_cast(obj.state())); - hst.set_state_type(static_cast(obj.state_type())); - hst.set_last_state_change(obj.last_state_change()); - hst.set_last_hard_state(static_cast(obj.last_hard_state())); - hst.set_last_hard_state_change(obj.last_hard_state_change()); - hst.set_last_time_up(obj.last_time_up()); - hst.set_last_time_down(obj.last_time_down()); - hst.set_last_time_unreachable(obj.last_time_unreachable()); - hst.set_output(obj.output()); - hst.set_perfdata(obj.perfdata()); - hst.set_flapping(obj.flapping()); - hst.set_percent_state_change(obj.percent_state_change()); - hst.set_latency(obj.latency()); - hst.set_execution_time(obj.execution_time()); - hst.set_last_check(obj.last_check()); - hst.set_next_check(obj.next_check()); - hst.set_should_be_scheduled(obj.should_be_scheduled()); - hst.set_check_attempt(obj.check_attempt()); - hst.set_notification_number(obj.notification_number()); - hst.set_no_more_notifications(obj.no_more_notifications()); - hst.set_last_notification(obj.last_notification()); - hst.set_next_host_notification(obj.next_host_notification()); - hst.set_acknowledgement_type(obj.acknowledgement_type()); - hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); - } else { - _cache->logger()->error("lua: The host ({}) stored in cache is corrupted", - obj.host_id()); + auto& hst = std::static_pointer_cast(it->second)->mut_obj(); + hst.set_checked(obj.checked()); + hst.set_check_type(static_cast(obj.check_type())); + hst.set_state(static_cast(obj.state())); + hst.set_state_type(static_cast(obj.state_type())); + hst.set_last_state_change(obj.last_state_change()); + hst.set_last_hard_state(static_cast(obj.last_hard_state())); + hst.set_last_hard_state_change(obj.last_hard_state_change()); + hst.set_last_time_up(obj.last_time_up()); + hst.set_last_time_down(obj.last_time_down()); + hst.set_last_time_unreachable(obj.last_time_unreachable()); + hst.set_output(obj.output()); + hst.set_perfdata(obj.perfdata()); + hst.set_flapping(obj.flapping()); + hst.set_percent_state_change(obj.percent_state_change()); + hst.set_latency(obj.latency()); + hst.set_execution_time(obj.execution_time()); + hst.set_last_check(obj.last_check()); + hst.set_next_check(obj.next_check()); + hst.set_should_be_scheduled(obj.should_be_scheduled()); + hst.set_check_attempt(obj.check_attempt()); + hst.set_notification_number(obj.notification_number()); + hst.set_no_more_notifications(obj.no_more_notifications()); + hst.set_last_notification(obj.last_notification()); + hst.set_next_host_notification(obj.next_host_notification()); + hst.set_acknowledgement_type(obj.acknowledgement_type()); + hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); +} + +/** + * @brief Process a pb adaptive host event. + * + * @param data An AdaptiveHostStatus event. + */ +void macro_cache::_process_pb_adaptive_host_status( + const std::shared_ptr& data) { + const auto& s = std::static_pointer_cast(data); + const auto& obj = s->obj(); + + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing adaptive host status ({})", + obj.host_id()); + + auto it = _hosts.find(obj.host_id()); + if (it == _hosts.end()) { + _cache->logger()->warn( + "lua: Attempt to update host ({}) in lua cache, but it does not " + "exist. Maybe Engine should be restarted to update the cache.", + obj.host_id()); + return; } + + auto& hst = std::static_pointer_cast(it->second)->mut_obj(); + if (obj.has_scheduled_downtime_depth()) + hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); + if (obj.has_acknowledgement_type()) + hst.set_acknowledgement_type(obj.acknowledgement_type()); + if (obj.has_notification_number()) + hst.set_notification_number(obj.notification_number()); } + /** * Process a pb adaptive host event. * @@ -750,71 +786,37 @@ void macro_cache::_process_pb_adaptive_host( auto& ah = h->obj(); auto it = _hosts.find(ah.host_id()); if (it != _hosts.end()) { - if (it->second->type() == make_type(io::neb, neb::de_host)) { - auto& h = *std::static_pointer_cast(it->second); - if (ah.has_notify()) - h.notifications_enabled = ah.notify(); - if (ah.has_active_checks()) - h.active_checks_enabled = ah.active_checks(); - if (ah.has_should_be_scheduled()) - h.should_be_scheduled = ah.should_be_scheduled(); - if (ah.has_passive_checks()) - h.passive_checks_enabled = ah.passive_checks(); - if (ah.has_event_handler_enabled()) - h.event_handler_enabled = ah.event_handler_enabled(); - if (ah.has_flap_detection()) - h.flap_detection_enabled = ah.flap_detection(); - if (ah.has_obsess_over_host()) - h.obsess_over = ah.obsess_over_host(); - if (ah.has_event_handler()) - h.event_handler = ah.event_handler(); - if (ah.has_check_command()) - h.check_command = ah.check_command(); - if (ah.has_check_interval()) - h.check_interval = ah.check_interval(); - if (ah.has_retry_interval()) - h.retry_interval = ah.retry_interval(); - if (ah.has_max_check_attempts()) - h.max_check_attempts = ah.max_check_attempts(); - if (ah.has_check_freshness()) - h.check_freshness = ah.check_freshness(); - if (ah.has_check_period()) - h.check_period = ah.check_period(); - if (ah.has_notification_period()) - h.notification_period = ah.notification_period(); - } else { - auto& h = std::static_pointer_cast(it->second)->mut_obj(); - if (ah.has_notify()) - h.set_notify(ah.notify()); - if (ah.has_active_checks()) - h.set_active_checks(ah.active_checks()); - if (ah.has_should_be_scheduled()) - h.set_should_be_scheduled(ah.should_be_scheduled()); - if (ah.has_passive_checks()) - h.set_passive_checks(ah.passive_checks()); - if (ah.has_event_handler_enabled()) - h.set_event_handler_enabled(ah.event_handler_enabled()); - if (ah.has_flap_detection()) - h.set_flap_detection(ah.flap_detection()); - if (ah.has_obsess_over_host()) - h.set_obsess_over_host(ah.obsess_over_host()); - if (ah.has_event_handler()) - h.set_event_handler(ah.event_handler()); - if (ah.has_check_command()) - h.set_check_command(ah.check_command()); - if (ah.has_check_interval()) - h.set_check_interval(ah.check_interval()); - if (ah.has_retry_interval()) - h.set_retry_interval(ah.retry_interval()); - if (ah.has_max_check_attempts()) - h.set_max_check_attempts(ah.max_check_attempts()); - if (ah.has_check_freshness()) - h.set_check_freshness(ah.check_freshness()); - if (ah.has_check_period()) - h.set_check_period(ah.check_period()); - if (ah.has_notification_period()) - h.set_notification_period(ah.notification_period()); - } + auto& h = it->second->mut_obj(); + if (ah.has_notify()) + h.set_notify(ah.notify()); + if (ah.has_active_checks()) + h.set_active_checks(ah.active_checks()); + if (ah.has_should_be_scheduled()) + h.set_should_be_scheduled(ah.should_be_scheduled()); + if (ah.has_passive_checks()) + h.set_passive_checks(ah.passive_checks()); + if (ah.has_event_handler_enabled()) + h.set_event_handler_enabled(ah.event_handler_enabled()); + if (ah.has_flap_detection()) + h.set_flap_detection(ah.flap_detection()); + if (ah.has_obsess_over_host()) + h.set_obsess_over_host(ah.obsess_over_host()); + if (ah.has_event_handler()) + h.set_event_handler(ah.event_handler()); + if (ah.has_check_command()) + h.set_check_command(ah.check_command()); + if (ah.has_check_interval()) + h.set_check_interval(ah.check_interval()); + if (ah.has_retry_interval()) + h.set_retry_interval(ah.retry_interval()); + if (ah.has_max_check_attempts()) + h.set_max_check_attempts(ah.max_check_attempts()); + if (ah.has_check_freshness()) + h.set_check_freshness(ah.check_freshness()); + if (ah.has_check_period()) + h.set_check_period(ah.check_period()); + if (ah.has_notification_period()) + h.set_notification_period(ah.notification_period()); } else SPDLOG_LOGGER_WARN( _cache->logger(), @@ -958,9 +960,137 @@ void macro_cache::_process_service(std::shared_ptr const& data) { SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing service ({}, {}) (description:{})", s->host_id, s->service_id, s->service_description); - if (s->enabled) - _services[{s->host_id, s->service_id}] = data; - else + if (s->enabled) { + auto found = _services.find({s->host_id, s->service_id}); + if (found == _services.end()) { + auto new_service = std::make_shared(); + _services[{s->host_id, s->service_id}] = new_service; + found = _services.find({s->host_id, s->service_id}); + } + Service& current_service = + std::static_pointer_cast(found->second)->mut_obj(); + current_service.set_host_id(s->host_id); + current_service.set_service_id(s->service_id); + current_service.set_acknowledged(s->acknowledged); + current_service.set_acknowledgement_type( + static_cast(s->acknowledgement_type)); + current_service.set_active_checks(s->active_checks_enabled); + current_service.set_enabled(s->enabled); + current_service.set_scheduled_downtime_depth(s->downtime_depth); + current_service.set_check_command(s->check_command); + current_service.set_check_interval(s->check_interval); + current_service.set_check_period(s->check_period); + current_service.set_check_type( + static_cast(s->check_type)); + current_service.set_check_attempt(s->current_check_attempt); + current_service.set_state(static_cast(s->current_state)); + current_service.set_event_handler_enabled(s->event_handler_enabled); + current_service.set_event_handler(s->event_handler); + current_service.set_execution_time(s->execution_time); + current_service.set_flap_detection(s->default_flap_detection_enabled); + current_service.set_checked(s->has_been_checked); + current_service.set_flapping(s->is_flapping); + current_service.set_last_check(s->last_check); + current_service.set_last_hard_state( + static_cast(s->last_hard_state)); + current_service.set_last_hard_state_change(s->last_hard_state_change); + current_service.set_last_notification(s->last_notification); + current_service.set_notification_number(s->notification_number); + current_service.set_last_state_change(s->last_state_change); + current_service.set_last_time_ok(s->last_time_ok); + current_service.set_last_time_warning(s->last_time_warning); + current_service.set_last_time_critical(s->last_time_critical); + current_service.set_last_time_unknown(s->last_time_unknown); + current_service.set_last_update(s->last_update); + current_service.set_latency(s->latency); + current_service.set_max_check_attempts(s->max_check_attempts); + current_service.set_next_check(s->next_check); + current_service.set_next_notification(s->next_notification); + current_service.set_no_more_notifications(s->no_more_notifications); + current_service.set_notify(s->notifications_enabled); + std::string_view long_output = s->output; + std::vector output = + absl::StrSplit(long_output, absl::MaxSplits('\n', 2)); + switch (output.size()) { + case 2: + current_service.set_long_output(std::string(output[1])); + case 1: + current_service.set_output(std::string(output[0])); + break; + } + current_service.set_passive_checks(s->passive_checks_enabled); + current_service.set_percent_state_change(s->percent_state_change); + current_service.set_perfdata(s->perf_data); + current_service.set_retry_interval(s->retry_interval); + current_service.set_host_name(s->host_name); + current_service.set_description(s->service_description); + current_service.set_should_be_scheduled(s->should_be_scheduled); + current_service.set_obsess_over_service(s->obsess_over); + current_service.set_state_type( + static_cast(s->state_type)); + current_service.set_action_url(s->action_url); + current_service.set_check_freshness(s->check_freshness); + current_service.set_default_active_checks(s->default_active_checks_enabled); + current_service.set_default_event_handler_enabled( + s->default_event_handler_enabled); + current_service.set_default_flap_detection( + s->default_flap_detection_enabled); + current_service.set_default_notify(s->default_notifications_enabled); + current_service.set_default_passive_checks( + s->default_passive_checks_enabled); + current_service.set_display_name(s->display_name); + current_service.set_first_notification_delay(s->first_notification_delay); + current_service.set_flap_detection_on_critical( + s->flap_detection_on_critical); + current_service.set_flap_detection_on_ok(s->flap_detection_on_ok); + current_service.set_flap_detection_on_unknown(s->flap_detection_on_unknown); + current_service.set_flap_detection_on_warning(s->flap_detection_on_warning); + current_service.set_freshness_threshold(s->freshness_threshold); + current_service.set_high_flap_threshold(s->high_flap_threshold); + current_service.set_low_flap_threshold(s->low_flap_threshold); + current_service.set_icon_image(s->icon_image); + current_service.set_icon_image_alt(s->icon_image_alt); + current_service.set_is_volatile(s->is_volatile); + current_service.set_notes(s->notes); + current_service.set_notes_url(s->notes_url); + current_service.set_notification_interval(s->notification_interval); + current_service.set_notification_period(s->notification_period); + current_service.set_notify_on_critical(s->notify_on_critical); + current_service.set_notify_on_downtime(s->notify_on_downtime); + current_service.set_notify_on_flapping(s->notify_on_flapping); + current_service.set_notify_on_recovery(s->notify_on_recovery); + current_service.set_notify_on_unknown(s->notify_on_unknown); + current_service.set_notify_on_warning(s->notify_on_warning); + current_service.set_stalk_on_critical(s->stalk_on_critical); + current_service.set_stalk_on_ok(s->stalk_on_ok); + current_service.set_stalk_on_unknown(s->stalk_on_unknown); + current_service.set_stalk_on_warning(s->stalk_on_warning); + current_service.set_retain_nonstatus_information( + s->retain_nonstatus_information); + current_service.set_retain_status_information(s->retain_status_information); + if (std::string_view(current_service.host_name().data(), 12) == + "_Module_Meta") { + if (std::string_view(current_service.description().data(), 5) == + "meta_") { + current_service.set_type(METASERVICE); + uint64_t iid; + std::string_view id = + std::string_view(current_service.description()).substr(5); + if (absl::SimpleAtoi(id, &iid)) + current_service.set_internal_id(iid); + } + } else if (std::string_view(current_service.host_name().data(), 11) == + "_Module_BAM") { + if (std::string_view(current_service.description().data(), 3) == "ba_") { + current_service.set_type(BA); + uint64_t iid; + std::string_view id = + std::string_view(current_service.description()).substr(3); + if (absl::SimpleAtoi(id, &iid)) + current_service.set_internal_id(iid); + } + } + } else _services.erase({s->host_id, s->service_id}); } @@ -975,11 +1105,44 @@ void macro_cache::_process_pb_service(std::shared_ptr const& data) { _cache->logger(), "lua: processing service ({}, {}) (description:{})", s->obj().host_id(), s->obj().service_id(), s->obj().description()); if (s->obj().enabled()) - _services[{s->obj().host_id(), s->obj().service_id()}] = data; + _services[{s->obj().host_id(), s->obj().service_id()}] = s; else _services.erase({s->obj().host_id(), s->obj().service_id()}); } +/** + * @brief Process a pb adaptive service event. + * + * @param data An AdaptiveServiceStatus event. + */ +void macro_cache::_process_pb_adaptive_service_status( + const std::shared_ptr& data) { + const auto& s = + std::static_pointer_cast(data); + const auto& obj = s->obj(); + + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing adaptive service status ({}, {})", + obj.host_id(), obj.service_id()); + + auto it = _services.find({obj.host_id(), obj.service_id()}); + if (it == _services.end()) { + _cache->logger()->warn( + "lua: Attempt to update service ({}, {}) in lua cache, but it does not " + "exist. Maybe Engine should be restarted to update the cache.", + obj.host_id(), obj.service_id()); + return; + } + + auto& svc = std::static_pointer_cast(it->second)->mut_obj(); + if (obj.has_acknowledgement_type()) + svc.set_acknowledgement_type(obj.acknowledgement_type()); + if (obj.has_scheduled_downtime_depth()) + svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); + if (obj.has_notification_number()) + svc.set_notification_number(obj.notification_number()); +} + void macro_cache::_process_pb_service_status( const std::shared_ptr& data) { const auto& s = std::static_pointer_cast(data); @@ -998,70 +1161,34 @@ void macro_cache::_process_pb_service_status( return; } - if (it->second->type() == make_type(io::neb, neb::de_service)) { - auto& svc = *std::static_pointer_cast(it->second); - svc.has_been_checked = obj.checked(); - svc.check_type = obj.check_type(); - svc.current_state = obj.state(); - svc.state_type = obj.state_type(); - svc.last_state_change = obj.last_state_change(); - svc.last_hard_state = obj.last_hard_state(); - svc.last_hard_state_change = obj.last_hard_state_change(); - svc.last_time_ok = obj.last_time_ok(); - svc.last_time_warning = obj.last_time_warning(); - svc.last_time_critical = obj.last_time_critical(); - svc.last_time_unknown = obj.last_time_unknown(); - svc.output = obj.output(); - svc.perf_data = obj.perfdata(); - svc.is_flapping = obj.flapping(); - svc.percent_state_change = obj.percent_state_change(); - svc.latency = obj.latency(); - svc.execution_time = obj.execution_time(); - svc.last_check = obj.last_check(); - svc.next_check = obj.next_check(); - svc.should_be_scheduled = obj.should_be_scheduled(); - svc.current_check_attempt = obj.check_attempt(); - svc.notification_number = obj.notification_number(); - svc.no_more_notifications = obj.no_more_notifications(); - svc.last_notification = obj.last_notification(); - svc.next_notification = obj.next_notification(); - svc.acknowledgement_type = obj.acknowledgement_type(); - svc.downtime_depth = obj.scheduled_downtime_depth(); - } else if (it->second->type() == make_type(io::neb, neb::de_pb_service)) { - auto& svc = - std::static_pointer_cast(it->second)->mut_obj(); - svc.set_checked(obj.checked()); - svc.set_check_type(static_cast(obj.check_type())); - svc.set_state(static_cast(obj.state())); - svc.set_state_type(static_cast(obj.state_type())); - svc.set_last_state_change(obj.last_state_change()); - svc.set_last_hard_state(static_cast(obj.last_hard_state())); - svc.set_last_hard_state_change(obj.last_hard_state_change()); - svc.set_last_time_ok(obj.last_time_ok()); - svc.set_last_time_warning(obj.last_time_warning()); - svc.set_last_time_critical(obj.last_time_critical()); - svc.set_last_time_unknown(obj.last_time_unknown()); - svc.set_output(obj.output()); - svc.set_perfdata(obj.perfdata()); - svc.set_flapping(obj.flapping()); - svc.set_percent_state_change(obj.percent_state_change()); - svc.set_latency(obj.latency()); - svc.set_execution_time(obj.execution_time()); - svc.set_last_check(obj.last_check()); - svc.set_next_check(obj.next_check()); - svc.set_should_be_scheduled(obj.should_be_scheduled()); - svc.set_check_attempt(obj.check_attempt()); - svc.set_notification_number(obj.notification_number()); - svc.set_no_more_notifications(obj.no_more_notifications()); - svc.set_last_notification(obj.last_notification()); - svc.set_next_notification(obj.next_notification()); - svc.set_acknowledgement_type(obj.acknowledgement_type()); - svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); - } else { - _cache->logger()->error( - "lua: The service ({}, {}) stored in cache is corrupted", obj.host_id(), - obj.service_id()); - } + auto& svc = it->second->mut_obj(); + svc.set_checked(obj.checked()); + svc.set_check_type(static_cast(obj.check_type())); + svc.set_state(static_cast(obj.state())); + svc.set_state_type(static_cast(obj.state_type())); + svc.set_last_state_change(obj.last_state_change()); + svc.set_last_hard_state(static_cast(obj.last_hard_state())); + svc.set_last_hard_state_change(obj.last_hard_state_change()); + svc.set_last_time_ok(obj.last_time_ok()); + svc.set_last_time_warning(obj.last_time_warning()); + svc.set_last_time_critical(obj.last_time_critical()); + svc.set_last_time_unknown(obj.last_time_unknown()); + svc.set_output(obj.output()); + svc.set_perfdata(obj.perfdata()); + svc.set_flapping(obj.flapping()); + svc.set_percent_state_change(obj.percent_state_change()); + svc.set_latency(obj.latency()); + svc.set_execution_time(obj.execution_time()); + svc.set_last_check(obj.last_check()); + svc.set_next_check(obj.next_check()); + svc.set_should_be_scheduled(obj.should_be_scheduled()); + svc.set_check_attempt(obj.check_attempt()); + svc.set_notification_number(obj.notification_number()); + svc.set_no_more_notifications(obj.no_more_notifications()); + svc.set_last_notification(obj.last_notification()); + svc.set_next_notification(obj.next_notification()); + svc.set_acknowledgement_type(obj.acknowledgement_type()); + svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth()); } /** @@ -1078,72 +1205,37 @@ void macro_cache::_process_pb_adaptive_service( auto& as = s->obj(); auto it = _services.find({as.host_id(), as.service_id()}); if (it != _services.end()) { - if (it->second->type() == make_type(io::neb, neb::de_service)) { - auto& s = *std::static_pointer_cast(it->second); - if (as.has_notify()) - s.notifications_enabled = as.notify(); - if (as.has_active_checks()) - s.active_checks_enabled = as.active_checks(); - if (as.has_should_be_scheduled()) - s.should_be_scheduled = as.should_be_scheduled(); - if (as.has_passive_checks()) - s.passive_checks_enabled = as.passive_checks(); - if (as.has_event_handler_enabled()) - s.event_handler_enabled = as.event_handler_enabled(); - if (as.has_flap_detection_enabled()) - s.flap_detection_enabled = as.flap_detection_enabled(); - if (as.has_obsess_over_service()) - s.obsess_over = as.obsess_over_service(); - if (as.has_event_handler()) - s.event_handler = as.event_handler(); - if (as.has_check_command()) - s.check_command = as.check_command(); - if (as.has_check_interval()) - s.check_interval = as.check_interval(); - if (as.has_retry_interval()) - s.retry_interval = as.retry_interval(); - if (as.has_max_check_attempts()) - s.max_check_attempts = as.max_check_attempts(); - if (as.has_check_freshness()) - s.check_freshness = as.check_freshness(); - if (as.has_check_period()) - s.check_period = as.check_period(); - if (as.has_notification_period()) - s.notification_period = as.notification_period(); - } else { - auto& s = - std::static_pointer_cast(it->second)->mut_obj(); - if (as.has_notify()) - s.set_notify(as.notify()); - if (as.has_active_checks()) - s.set_active_checks(as.active_checks()); - if (as.has_should_be_scheduled()) - s.set_should_be_scheduled(as.should_be_scheduled()); - if (as.has_passive_checks()) - s.set_passive_checks(as.passive_checks()); - if (as.has_event_handler_enabled()) - s.set_event_handler_enabled(as.event_handler_enabled()); - if (as.has_flap_detection_enabled()) - s.set_flap_detection(as.flap_detection_enabled()); - if (as.has_obsess_over_service()) - s.set_obsess_over_service(as.obsess_over_service()); - if (as.has_event_handler()) - s.set_event_handler(as.event_handler()); - if (as.has_check_command()) - s.set_check_command(as.check_command()); - if (as.has_check_interval()) - s.set_check_interval(as.check_interval()); - if (as.has_retry_interval()) - s.set_retry_interval(as.retry_interval()); - if (as.has_max_check_attempts()) - s.set_max_check_attempts(as.max_check_attempts()); - if (as.has_check_freshness()) - s.set_check_freshness(as.check_freshness()); - if (as.has_check_period()) - s.set_check_period(as.check_period()); - if (as.has_notification_period()) - s.set_notification_period(as.notification_period()); - } + auto& s = it->second->mut_obj(); + if (as.has_notify()) + s.set_notify(as.notify()); + if (as.has_active_checks()) + s.set_active_checks(as.active_checks()); + if (as.has_should_be_scheduled()) + s.set_should_be_scheduled(as.should_be_scheduled()); + if (as.has_passive_checks()) + s.set_passive_checks(as.passive_checks()); + if (as.has_event_handler_enabled()) + s.set_event_handler_enabled(as.event_handler_enabled()); + if (as.has_flap_detection_enabled()) + s.set_flap_detection(as.flap_detection_enabled()); + if (as.has_obsess_over_service()) + s.set_obsess_over_service(as.obsess_over_service()); + if (as.has_event_handler()) + s.set_event_handler(as.event_handler()); + if (as.has_check_command()) + s.set_check_command(as.check_command()); + if (as.has_check_interval()) + s.set_check_interval(as.check_interval()); + if (as.has_retry_interval()) + s.set_retry_interval(as.retry_interval()); + if (as.has_max_check_attempts()) + s.set_max_check_attempts(as.max_check_attempts()); + if (as.has_check_freshness()) + s.set_check_freshness(as.check_freshness()); + if (as.has_check_period()) + s.set_check_period(as.check_period()); + if (as.has_notification_period()) + s.set_notification_period(as.notification_period()); } else { SPDLOG_LOGGER_WARN( _cache->logger(), diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc index 8268c693b3e..eef66569bdf 100644 --- a/broker/lua/test/lua.cc +++ b/broker/lua/test/lua.cc @@ -17,7 +17,6 @@ */ #include -#include #include #include @@ -30,10 +29,7 @@ #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/config/applier/modules.hh" #include "com/centreon/broker/lua/luabinding.hh" -#include "com/centreon/broker/lua/macro_cache.hh" -#include "com/centreon/broker/misc/variant.hh" #include "com/centreon/broker/neb/events.hh" -#include "com/centreon/broker/neb/instance.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -4836,3 +4832,227 @@ TEST_F(LuaTest, WithBadFilter2) { ASSERT_FALSE(bb->has_filter()); RemoveFile(filename); } + +// When a host is stored in the cache and an AdaptiveHostStatus is written +// Then the host in cache is updated. +TEST_F(LuaTest, AdaptiveHostCacheTest) { + config::applier::modules modules(log_v2::instance().get(log_v2::LUA)); + modules.load_file("./broker/neb/10-neb.so"); + std::map conf; + std::string filename("/tmp/cache_test.lua"); + auto hst{std::make_shared()}; + hst->host_id = 1; + hst->host_name = "centreon"; + hst->check_command = "echo 'John Doe'"; + hst->alias = "alias-centreon"; + hst->address = "4.3.2.1"; + _cache->write(hst); + + auto ahoststatus = std::make_shared(); + auto& obj = ahoststatus->mut_obj(); + obj.set_host_id(1); + obj.set_scheduled_downtime_depth(2); + _cache->write(ahoststatus); + + CreateScript(filename, + "function init(conf)\n" + " broker_log:set_parameters(3, '/tmp/log')\n" + " local hst = broker_cache:get_host(1)\n" + " broker_log:info(1, 'alias ' .. hst.alias .. ' address ' .. " + "hst.address .. ' name ' .. hst.name .. ' " + "scheduled_downtime_depth ' .. hst.scheduled_downtime_depth)\n" + "end\n\n" + "function write(d)\n" + " return true\n" + "end\n"); + auto binding{std::make_unique(filename, conf, *_cache)}; + std::string lst(ReadFile("/tmp/log")); + + ASSERT_NE(lst.find("alias alias-centreon address 4.3.2.1 name centreon " + "scheduled_downtime_depth 2"), + std::string::npos); + RemoveFile(filename); + RemoveFile("/tmp/log"); +} + +// When an AdaptiveHostStatus is written +// Then only the written fields are available. +TEST_F(LuaTest, AdaptiveHostCacheFieldTest) { + config::applier::modules modules(log_v2::instance().get(log_v2::LUA)); + modules.load_file("./broker/neb/10-neb.so"); + std::map conf; + std::string filename("/tmp/cache_test.lua"); + auto hst{std::make_shared()}; + hst->host_id = 1; + hst->host_name = "centreon"; + hst->check_command = "echo 'John Doe'"; + hst->alias = "alias-centreon"; + hst->address = "4.3.2.1"; + _cache->write(hst); + + CreateScript(filename, + "broker_api_version = 2\n" + "function init(conf)\n" + " broker_log:set_parameters(3, '/tmp/log')\n" + "end\n\n" + "function write(d)\n" + " broker_log:info(1, broker.json_encode(d))\n" + " return true\n" + "end\n"); + + auto binding{std::make_unique(filename, conf, *_cache)}; + + auto ahoststatus1 = std::make_shared(); + { + auto& obj = ahoststatus1->mut_obj(); + obj.set_host_id(1); + obj.set_notification_number(9); + binding->write(ahoststatus1); + } + + auto ahoststatus2 = std::make_shared(); + { + auto& obj = ahoststatus2->mut_obj(); + obj.set_host_id(2); + obj.set_acknowledgement_type(STICKY); + binding->write(ahoststatus2); + } + + auto ahoststatus3 = std::make_shared(); + { + auto& obj = ahoststatus3->mut_obj(); + obj.set_host_id(3); + obj.set_scheduled_downtime_depth(5); + binding->write(ahoststatus3); + } + std::string lst(ReadFile("/tmp/log")); + ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, " + "\"host_id\":1, \"notification_number\":9}"), + std::string::npos); + ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, " + "\"host_id\":2, \"acknowledgement_type\":2}"), + std::string::npos); + ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, " + "\"host_id\":3, \"scheduled_downtime_depth\":5}"), + std::string::npos); + RemoveFile(filename); + RemoveFile("/tmp/log"); +} + +// When a service is stored in the cache and an AdaptiveServiceStatus is written +// Then the service in cache is updated. +TEST_F(LuaTest, AdaptiveServiceCacheTest) { + config::applier::modules modules(log_v2::instance().get(log_v2::LUA)); + modules.load_file("./broker/neb/10-neb.so"); + std::map conf; + std::string filename("/tmp/cache_test.lua"); + auto svc{std::make_shared()}; + svc->host_id = 1; + svc->service_id = 2; + svc->host_name = "centreon-host"; + svc->service_description = "centreon-description"; + svc->check_command = "echo 'John Doe'"; + svc->display_name = "alias-centreon"; + _cache->write(svc); + + auto aservicestatus = std::make_shared(); + auto& obj = aservicestatus->mut_obj(); + obj.set_host_id(1); + obj.set_service_id(2); + obj.set_scheduled_downtime_depth(3); + _cache->write(aservicestatus); + + CreateScript(filename, + "function init(conf)\n" + " broker_log:set_parameters(3, '/tmp/log')\n" + " local svc = broker_cache:get_service(1, 2)\n" + " broker_log:info(1, 'display_name ' .. svc.display_name .. ' " + "description ' .. " + "svc.description .. ' check command ' .. svc.check_command .. ' " + "scheduled_downtime_depth ' .. svc.scheduled_downtime_depth)\n" + "end\n\n" + "function write(d)\n" + " return true\n" + "end\n"); + auto binding{std::make_unique(filename, conf, *_cache)}; + std::string lst(ReadFile("/tmp/log")); + + ASSERT_NE( + lst.find("display_name alias-centreon description centreon-description " + "check command echo 'John Doe' scheduled_downtime_depth 3"), + std::string::npos); + RemoveFile(filename); + // RemoveFile("/tmp/log"); +} + +// When an AdaptiveHostStatus is written +// Then only the written fields are available. +TEST_F(LuaTest, AdaptiveServiceCacheFieldTest) { + config::applier::modules modules(log_v2::instance().get(log_v2::LUA)); + modules.load_file("./broker/neb/10-neb.so"); + std::map conf; + std::string filename("/tmp/cache_test.lua"); + auto svc{std::make_shared()}; + svc->host_id = 1; + svc->service_id = 2; + svc->host_name = "centreon-host"; + svc->service_description = "centreon-description"; + svc->check_command = "echo 'John Doe'"; + svc->display_name = "alias-centreon"; + _cache->write(svc); + + CreateScript(filename, + "broker_api_version = 2\n" + "function init(conf)\n" + " broker_log:set_parameters(3, '/tmp/log')\n" + "end\n\n" + "function write(d)\n" + " broker_log:info(1, broker.json_encode(d))\n" + " return true\n" + "end\n"); + + auto binding{std::make_unique(filename, conf, *_cache)}; + + auto aservicestatus1 = std::make_shared(); + { + auto& obj = aservicestatus1->mut_obj(); + obj.set_host_id(1); + obj.set_service_id(2); + obj.set_notification_number(9); + binding->write(aservicestatus1); + } + + auto aservicestatus2 = std::make_shared(); + { + auto& obj = aservicestatus2->mut_obj(); + obj.set_host_id(1); + obj.set_service_id(2); + obj.set_acknowledgement_type(STICKY); + binding->write(aservicestatus2); + } + + auto aservicestatus3 = std::make_shared(); + { + auto& obj = aservicestatus3->mut_obj(); + obj.set_host_id(1); + obj.set_service_id(3); + obj.set_scheduled_downtime_depth(5); + binding->write(aservicestatus3); + } + std::string lst(ReadFile("/tmp/log")); + std::cout << lst << std::endl; + ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, " + "\"host_id\":1, \"service_id\":2, \"type\":0, " + "\"internal_id\":0, \"notification_number\":9}"), + std::string::npos); + ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, " + "\"host_id\":1, \"service_id\":2, \"type\":0, " + "\"internal_id\":0, \"acknowledgement_type\":2}"), + std::string::npos); + ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, " + "\"host_id\":1, \"service_id\":3, \"type\":0, " + "\"internal_id\":0, \"scheduled_downtime_depth\":5}"), + std::string::npos); + RemoveFile(filename); + // RemoveFile("/tmp/log"); +} diff --git a/broker/neb/inc/com/centreon/broker/neb/comment.hh b/broker/neb/inc/com/centreon/broker/neb/comment.hh index e50b7219146..4633d6786e2 100644 --- a/broker/neb/inc/com/centreon/broker/neb/comment.hh +++ b/broker/neb/inc/com/centreon/broker/neb/comment.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_COMMENT_HH #define CCB_NEB_COMMENT_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -68,6 +65,6 @@ class comment : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_COMMENT_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh b/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh index c4dd57843c6..75c7428aab3 100644 --- a/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_CUSTOM_VARIABLE_STATUS_HH #define CCB_NEB_CUSTOM_VARIABLE_STATUS_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -62,6 +59,6 @@ class custom_variable_status : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_CUSTOM_VARIABLE_STATUS_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/downtime.hh b/broker/neb/inc/com/centreon/broker/neb/downtime.hh index 64c50828565..1deed00962f 100644 --- a/broker/neb/inc/com/centreon/broker/neb/downtime.hh +++ b/broker/neb/inc/com/centreon/broker/neb/downtime.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_DOWNTIME_HH #define CCB_NEB_DOWNTIME_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -85,6 +82,6 @@ class downtime : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_DOWNTIME_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/host.hh b/broker/neb/inc/com/centreon/broker/neb/host.hh index 6b328e42a86..c8c2180a4e9 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2009-2013,2015 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2009-2013,2015 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_NEB_HOST_HH #define CCB_NEB_HOST_HH @@ -72,6 +72,6 @@ class host : public host_service, public host_status { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_HOST_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/host_check.hh b/broker/neb/inc/com/centreon/broker/neb/host_check.hh index 29cc98f2225..3f75d131d65 100644 --- a/broker/neb/inc/com/centreon/broker/neb/host_check.hh +++ b/broker/neb/inc/com/centreon/broker/neb/host_check.hh @@ -19,11 +19,8 @@ #ifndef CCB_NEB_HOST_CHECK_HH #define CCB_NEB_HOST_CHECK_HH -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/check.hh" -#include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker { diff --git a/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh b/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh index 8e311835ea2..b3ee87151a1 100644 --- a/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh +++ b/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh @@ -19,12 +19,9 @@ #ifndef CCB_NEB_INSTANCE_CONFIGURATION_HH #define CCB_NEB_INSTANCE_CONFIGURATION_HH -#include "com/centreon/broker/io/data.hh" -#include "com/centreon/broker/io/event_info.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -59,6 +56,6 @@ class instance_configuration : public io::data { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_INSTANCE_CONFIGURATION_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh index d5fdb19ed19..badf7b9ffaa 100644 --- a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh @@ -24,7 +24,6 @@ #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/status.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -70,6 +69,6 @@ class instance_status : public status { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_EVENTS_INSTANCE_STATUS_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/internal.hh b/broker/neb/inc/com/centreon/broker/neb/internal.hh index 083f48ccf13..8ae6c9c3820 100644 --- a/broker/neb/inc/com/centreon/broker/neb/internal.hh +++ b/broker/neb/inc/com/centreon/broker/neb/internal.hh @@ -49,6 +49,9 @@ using pb_downtime = using pb_host_status = io::protobuf; +using pb_adaptive_host_status = + io::protobuf; using pb_host = io::protobuf; using pb_adaptive_host = io::protobuf; @@ -62,6 +65,10 @@ using pb_adaptive_service = using pb_service_status = io::protobuf; +using pb_adaptive_service_status = + io::protobuf; + using pb_severity = io::protobuf; diff --git a/broker/neb/inc/com/centreon/broker/neb/service_check.hh b/broker/neb/inc/com/centreon/broker/neb/service_check.hh index 97474467007..2278c93c1dd 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service_check.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service_check.hh @@ -19,11 +19,8 @@ #ifndef CCB_NEB_SERVICE_CHECK_HH #define CCB_NEB_SERVICE_CHECK_HH -#include "com/centreon/broker/io/event_info.hh" -#include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/check.hh" -#include "com/centreon/broker/neb/internal.hh" namespace com::centreon::broker { diff --git a/broker/neb/inc/com/centreon/broker/neb/service_status.hh b/broker/neb/inc/com/centreon/broker/neb/service_status.hh index c486e1b66a3..a2d635f5a34 100644 --- a/broker/neb/inc/com/centreon/broker/neb/service_status.hh +++ b/broker/neb/inc/com/centreon/broker/neb/service_status.hh @@ -24,7 +24,6 @@ #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/neb/host_service_status.hh" #include "com/centreon/broker/neb/internal.hh" -#include "com/centreon/broker/timestamp.hh" namespace com::centreon::broker { @@ -62,6 +61,6 @@ class service_status : public host_service_status { }; } // namespace neb -} +} // namespace com::centreon::broker #endif // !CCB_NEB_SERVICE_STATUS_HH diff --git a/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh b/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh index 9e1dc851477..038a202e279 100644 --- a/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh +++ b/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh @@ -21,7 +21,6 @@ #include "com/centreon/broker/neb/log_entry.hh" #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" namespace com { namespace centreon { diff --git a/broker/neb/precomp_inc/precomp.hpp b/broker/neb/precomp_inc/precomp.hpp index 0c373302776..83f32a05ff3 100644 --- a/broker/neb/precomp_inc/precomp.hpp +++ b/broker/neb/precomp_inc/precomp.hpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include diff --git a/broker/neb/src/broker.cc b/broker/neb/src/broker.cc index d8645e48670..c0f80c4c6fe 100644 --- a/broker/neb/src/broker.cc +++ b/broker/neb/src/broker.cc @@ -160,6 +160,10 @@ void broker_module_init(void const* arg) { e.register_event(make_type(io::neb, neb::de_pb_service_status), "ServiceStatus", &neb::pb_service_status::operations, "services"); + e.register_event(make_type(io::neb, neb::de_pb_adaptive_service_status), + "AdaptiveServiceStatus", + &neb::pb_adaptive_service_status::operations, + "services"); e.register_event(make_type(io::neb, neb::de_pb_host), "Host", &neb::pb_host::operations, "hosts"); @@ -169,6 +173,10 @@ void broker_module_init(void const* arg) { e.register_event(make_type(io::neb, neb::de_pb_host_status), "HostStatus", &neb::pb_host_status::operations, "hosts"); + e.register_event(make_type(io::neb, neb::de_pb_adaptive_host_status), + "AdaptiveHostStatus", + &neb::pb_adaptive_host_status::operations, + "hosts"); e.register_event(make_type(io::neb, neb::de_pb_severity), "Severity", &neb::pb_severity::operations, "severities"); diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index f6a8ec4b3ab..ee07d114fa4 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -18,38 +18,26 @@ #include "com/centreon/broker/neb/callbacks.hh" +#include #include #include -#include "bbdo/neb.pb.h" -#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" - #include "com/centreon/broker/bbdo/internal.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/config/parser.hh" -#include "com/centreon/broker/config/state.hh" -#include "com/centreon/broker/neb/callback.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/neb/initial.hh" -#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/set_log_data.hh" #include "com/centreon/common/time.hh" #include "com/centreon/common/utf8.hh" #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/broker.hh" -#include "com/centreon/engine/comment.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/hostdependency.hh" -#include "com/centreon/engine/hostgroup.hh" -#include "com/centreon/engine/nebcallbacks.hh" #include "com/centreon/engine/nebstructs.hh" #include "com/centreon/engine/servicedependency.hh" -#include "com/centreon/engine/servicegroup.hh" #include "com/centreon/engine/severity.hh" -#include "com/centreon/engine/tag.hh" -#include "com/centreon/exceptions/msg_fmt.hh" -#include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; using namespace com::centreon::exceptions; @@ -2298,10 +2286,9 @@ int neb::callback_pb_host_check(int callback_type, void* data) { * * @return 0 on success. */ -int neb::callback_host_status(int callback_type, void* data) { +int neb::callback_host_status(int callback_type [[maybe_unused]], void* data) { // Log message. SPDLOG_LOGGER_DEBUG(neb_logger, "callbacks: generating host status event"); - (void)callback_type; try { // In/Out variables. @@ -2427,90 +2414,115 @@ int neb::callback_host_status(int callback_type, void* data) { * * @return 0 on success. */ -int neb::callback_pb_host_status(int callback_type, void* data) noexcept { +int neb::callback_pb_host_status(int callback_type [[maybe_unused]], + void* data) noexcept { // Log message. SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating pb host status check result event protobuf"); - (void)callback_type; - const engine::host* eh{static_cast( - static_cast(data)->object_ptr)}; + nebstruct_host_status_data* hsd = + static_cast(data); + const engine::host* eh = static_cast(hsd->object_ptr); + + auto handle_acknowledgement = [](uint16_t state, auto& hscr) { + auto it = gl_acknowledgements.find(std::make_pair(hscr.host_id(), 0u)); + if (it != gl_acknowledgements.end() && + hscr.acknowledgement_type() == AckType::NONE) { + if (it->second->type() == make_type(io::neb, de_pb_acknowledgement)) { + neb_logger->debug("acknowledgement found on host {}", hscr.host_id()); + neb::pb_acknowledgement* a = + static_cast(it->second.get()); + if (!(!state // !(OK or (normal ack and NOK)) + || (!a->obj().sticky() && state != a->obj().state()))) { + a->mut_obj().set_deletion_time(time(nullptr)); + gl_publisher.write(std::move(it->second)); + } + } else { + neb::acknowledgement* a = + static_cast(it->second.get()); + if (!(!state // !(OK or (normal ack and NOK)) + || (!a->is_sticky && state != a->state))) { + a->deletion_time = time(nullptr); + gl_publisher.write(std::move(it->second)); + } + } + gl_acknowledgements.erase(it); + } + }; + + uint16_t state = + eh->has_been_checked() ? eh->get_current_state() : 4; // Pending state. + + if (hsd->attributes != engine::host::STATUS_ALL) { + auto h{std::make_shared()}; + AdaptiveHostStatus& hst = h.get()->mut_obj(); + if (hsd->attributes & engine::host::STATUS_DOWNTIME_DEPTH) { + hst.set_host_id(eh->host_id()); + hst.set_scheduled_downtime_depth(eh->get_scheduled_downtime_depth()); + } + if (hsd->attributes & engine::host::STATUS_NOTIFICATION_NUMBER) { + hst.set_host_id(eh->host_id()); + hst.set_notification_number(eh->get_notification_number()); + } + if (hsd->attributes & engine::host::STATUS_ACKNOWLEDGEMENT) { + hst.set_host_id(eh->host_id()); + hst.set_acknowledgement_type(eh->get_acknowledgement()); + } + gl_publisher.write(h); - auto h{std::make_shared()}; - HostStatus& hscr = h.get()->mut_obj(); + // Acknowledgement event. + handle_acknowledgement(state, hst); + } else { + auto h{std::make_shared()}; + HostStatus& hscr = h.get()->mut_obj(); - hscr.set_host_id(eh->host_id()); - if (hscr.host_id() == 0) - SPDLOG_LOGGER_ERROR(neb_logger, "could not find ID of host '{}'", - eh->name()); + hscr.set_host_id(eh->host_id()); + if (hscr.host_id() == 0) + SPDLOG_LOGGER_ERROR(neb_logger, "could not find ID of host '{}'", + eh->name()); - if (eh->problem_has_been_acknowledged()) hscr.set_acknowledgement_type(eh->get_acknowledgement()); - else - hscr.set_acknowledgement_type(AckType::NONE); - - hscr.set_check_type(static_cast(eh->get_check_type())); - hscr.set_check_attempt(eh->get_current_attempt()); - hscr.set_state(static_cast( - eh->has_been_checked() ? eh->get_current_state() : 4)); // Pending state. - hscr.set_execution_time(eh->get_execution_time()); - hscr.set_checked(eh->has_been_checked()); - hscr.set_flapping(eh->get_is_flapping()); - hscr.set_last_check(eh->get_last_check()); - hscr.set_last_hard_state( - static_cast(eh->get_last_hard_state())); - hscr.set_last_hard_state_change(eh->get_last_hard_state_change()); - hscr.set_last_notification(eh->get_last_notification()); - hscr.set_notification_number(eh->get_notification_number()); - hscr.set_last_state_change(eh->get_last_state_change()); - hscr.set_last_time_down(eh->get_last_time_down()); - hscr.set_last_time_unreachable(eh->get_last_time_unreachable()); - hscr.set_last_time_up(eh->get_last_time_up()); - hscr.set_latency(eh->get_latency()); - hscr.set_next_check(eh->get_next_check()); - hscr.set_next_host_notification(eh->get_next_notification()); - hscr.set_no_more_notifications(eh->get_no_more_notifications()); - if (!eh->get_plugin_output().empty()) - hscr.set_output(common::check_string_utf8(eh->get_plugin_output())); - if (!eh->get_long_plugin_output().empty()) - hscr.set_output(common::check_string_utf8(eh->get_long_plugin_output())); - - hscr.set_percent_state_change(eh->get_percent_state_change()); - if (!eh->get_perf_data().empty()) - hscr.set_perfdata(common::check_string_utf8(eh->get_perf_data())); - hscr.set_should_be_scheduled(eh->get_should_be_scheduled()); - hscr.set_state_type(static_cast( - eh->has_been_checked() ? eh->get_state_type() : engine::notifier::hard)); - hscr.set_scheduled_downtime_depth(eh->get_scheduled_downtime_depth()); + hscr.set_check_type( + static_cast(eh->get_check_type())); + hscr.set_check_attempt(eh->get_current_attempt()); + hscr.set_state(static_cast(state)); + hscr.set_execution_time(eh->get_execution_time()); + hscr.set_checked(eh->has_been_checked()); + hscr.set_flapping(eh->get_is_flapping()); + hscr.set_last_check(eh->get_last_check()); + hscr.set_last_hard_state( + static_cast(eh->get_last_hard_state())); + hscr.set_last_hard_state_change(eh->get_last_hard_state_change()); + hscr.set_last_notification(eh->get_last_notification()); + hscr.set_notification_number(eh->get_notification_number()); + hscr.set_last_state_change(eh->get_last_state_change()); + hscr.set_last_time_down(eh->get_last_time_down()); + hscr.set_last_time_unreachable(eh->get_last_time_unreachable()); + hscr.set_last_time_up(eh->get_last_time_up()); + hscr.set_latency(eh->get_latency()); + hscr.set_next_check(eh->get_next_check()); + hscr.set_next_host_notification(eh->get_next_notification()); + hscr.set_no_more_notifications(eh->get_no_more_notifications()); + if (!eh->get_plugin_output().empty()) + hscr.set_output(common::check_string_utf8(eh->get_plugin_output())); + if (!eh->get_long_plugin_output().empty()) + hscr.set_output(common::check_string_utf8(eh->get_long_plugin_output())); - // Send event(s). - gl_publisher.write(h); + hscr.set_percent_state_change(eh->get_percent_state_change()); + if (!eh->get_perf_data().empty()) + hscr.set_perfdata(common::check_string_utf8(eh->get_perf_data())); + hscr.set_should_be_scheduled(eh->get_should_be_scheduled()); + hscr.set_state_type(static_cast( + eh->has_been_checked() ? eh->get_state_type() + : engine::notifier::hard)); + hscr.set_scheduled_downtime_depth(eh->get_scheduled_downtime_depth()); - // Acknowledgement event. - auto it = gl_acknowledgements.find(std::make_pair(hscr.host_id(), 0u)); - if (it != gl_acknowledgements.end() && - hscr.acknowledgement_type() == AckType::NONE) { - if (it->second->type() == make_type(io::neb, de_pb_acknowledgement)) { - neb_logger->debug("acknowledgement found on host {}", hscr.host_id()); - neb::pb_acknowledgement* a = - static_cast(it->second.get()); - if (!(!hscr.state() // !(OK or (normal ack and NOK)) - || (!a->obj().sticky() && - hscr.state() != static_cast(a->obj().state())))) { - a->mut_obj().set_deletion_time(time(nullptr)); - gl_publisher.write(std::move(it->second)); - } - } else { - neb::acknowledgement* a = - static_cast(it->second.get()); - if (!(!hscr.state() // !(OK or (normal ack and NOK)) - || (!a->is_sticky && hscr.state() != a->state))) { - a->deletion_time = time(nullptr); - gl_publisher.write(std::move(it->second)); - } - } - gl_acknowledgements.erase(it); + // Send event(s). + gl_publisher.write(h); + + // Acknowledgement event. + handle_acknowledgement(state, hscr); } neb_logger->debug("Still {} running acknowledgements", gl_acknowledgements.size()); @@ -3157,6 +3169,50 @@ int neb::callback_service(int callback_type, void* data) { return 0; } +template +static void fill_service_type(SrvStatus& ss, + const com::centreon::engine::service* es) { + switch (es->get_service_type()) { + case com::centreon::engine::service_type::METASERVICE: { + ss.set_type(METASERVICE); + uint64_t iid; + if (absl::SimpleAtoi(es->description().c_str() + 5, &iid)) + ss.set_internal_id(iid); + else { + SPDLOG_LOGGER_ERROR( + neb_logger, + "callbacks: service ('{}', '{}') looks like a meta-service but " + "its name is malformed", + es->get_hostname(), es->description()); + } + } break; + case com::centreon::engine::service_type::BA: { + ss.set_type(BA); + uint64_t iid; + if (absl::SimpleAtoi(es->description().c_str() + 3, &iid)) + ss.set_internal_id(iid); + else { + SPDLOG_LOGGER_ERROR( + neb_logger, + "callbacks: service ('{}', '{}') looks like a business-activity " + "but its name is malformed", + es->get_hostname(), es->description()); + } + } break; + case com::centreon::engine::service_type::ANOMALY_DETECTION: + ss.set_type(ANOMALY_DETECTION); + { + auto ad = + static_cast(es); + ss.set_internal_id(ad->get_internal_id()); + } + break; + default: + ss.set_type(SERVICE); + break; + } +} + /** * @brief Function that process protobuf service data. * @@ -3184,40 +3240,69 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { if (ds->type == NEBTYPE_ADAPTIVESERVICE_UPDATE && ds->modified_attribute != MODATTR_ALL) { auto s{std::make_shared()}; + bool done = false; AdaptiveService& srv = s.get()->mut_obj(); - if (ds->modified_attribute & MODATTR_NOTIFICATIONS_ENABLED) + if (ds->modified_attribute & MODATTR_NOTIFICATIONS_ENABLED) { srv.set_notify(es->get_notifications_enabled()); - else if (ds->modified_attribute & MODATTR_ACTIVE_CHECKS_ENABLED) { + done = true; + } + if (ds->modified_attribute & MODATTR_ACTIVE_CHECKS_ENABLED) { srv.set_active_checks(es->active_checks_enabled()); srv.set_should_be_scheduled(es->get_should_be_scheduled()); - } else if (ds->modified_attribute & MODATTR_PASSIVE_CHECKS_ENABLED) + done = true; + } + if (ds->modified_attribute & MODATTR_PASSIVE_CHECKS_ENABLED) { srv.set_passive_checks(es->passive_checks_enabled()); - else if (ds->modified_attribute & MODATTR_EVENT_HANDLER_ENABLED) + done = true; + } + if (ds->modified_attribute & MODATTR_EVENT_HANDLER_ENABLED) { srv.set_event_handler_enabled(es->event_handler_enabled()); - else if (ds->modified_attribute & MODATTR_FLAP_DETECTION_ENABLED) + done = true; + } + if (ds->modified_attribute & MODATTR_FLAP_DETECTION_ENABLED) { srv.set_flap_detection_enabled(es->flap_detection_enabled()); - else if (ds->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) + done = true; + } + if (ds->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) { srv.set_obsess_over_service(es->obsess_over()); - else if (ds->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) + done = true; + } + if (ds->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) { srv.set_event_handler(common::check_string_utf8(es->event_handler())); - else if (ds->modified_attribute & MODATTR_CHECK_COMMAND) + done = true; + } + if (ds->modified_attribute & MODATTR_CHECK_COMMAND) { srv.set_check_command(common::check_string_utf8(es->check_command())); - else if (ds->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) + done = true; + } + if (ds->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) { srv.set_check_interval(es->check_interval()); - else if (ds->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) + done = true; + } + if (ds->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) { srv.set_retry_interval(es->retry_interval()); - else if (ds->modified_attribute & MODATTR_MAX_CHECK_ATTEMPTS) + done = true; + } + if (ds->modified_attribute & MODATTR_MAX_CHECK_ATTEMPTS) { srv.set_max_check_attempts(es->max_check_attempts()); - else if (ds->modified_attribute & MODATTR_FRESHNESS_CHECKS_ENABLED) + done = true; + } + if (ds->modified_attribute & MODATTR_FRESHNESS_CHECKS_ENABLED) { srv.set_check_freshness(es->check_freshness_enabled()); - else if (ds->modified_attribute & MODATTR_CHECK_TIMEPERIOD) + done = true; + } + if (ds->modified_attribute & MODATTR_CHECK_TIMEPERIOD) { srv.set_check_period(es->check_period()); - else if (ds->modified_attribute & MODATTR_NOTIFICATION_TIMEPERIOD) + done = true; + } + if (ds->modified_attribute & MODATTR_NOTIFICATION_TIMEPERIOD) { srv.set_notification_period(es->notification_period()); - else { - SPDLOG_LOGGER_ERROR(neb_logger, - "callbacks: adaptive service not implemented."); - assert(1 == 0); + done = true; + } + if (!done) { + SPDLOG_LOGGER_ERROR( + neb_logger, "callbacks: adaptive service field {} not implemented.", + ds->modified_attribute); } std::pair p{ engine::get_host_and_service_id(es->get_hostname(), es->description())}; @@ -3290,58 +3375,12 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_high_flap_threshold(es->get_high_flap_threshold()); if (!es->description().empty()) srv.set_description(common::check_string_utf8(es->description())); - if (!es->get_hostname().empty()) { std::string name{common::check_string_utf8(es->get_hostname())}; - switch (es->get_service_type()) { - case com::centreon::engine::service_type::METASERVICE: { - srv.set_type(METASERVICE); - uint64_t iid = 0; - for (auto c = srv.description().begin() + 5; - c != srv.description().end(); ++c) { - if (!isdigit(*c)) { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: service ('{}', '{}') looks like a meta-service " - "but its name is malformed", - name, srv.description()); - break; - } - iid = 10 * iid + (*c - '0'); - } - srv.set_internal_id(iid); - } break; - case com::centreon::engine::service_type::BA: { - srv.set_type(BA); - uint64_t iid = 0; - for (auto c = srv.description().begin() + 3; - c != srv.description().end(); ++c) { - if (!isdigit(*c)) { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: service ('{}', '{}') looks like a " - "business-activity but its name is malformed", - name, srv.description()); - break; - } - iid = 10 * iid + (*c - '0'); - } - srv.set_internal_id(iid); - } break; - case com::centreon::engine::service_type::ANOMALY_DETECTION: - srv.set_type(ANOMALY_DETECTION); - { - auto ad = - static_cast(es); - srv.set_internal_id(ad->get_internal_id()); - } - break; - default: - srv.set_type(SERVICE); - break; - } *srv.mutable_host_name() = std::move(name); } + fill_service_type(srv, es); + if (!es->get_icon_image().empty()) *srv.mutable_icon_image() = common::check_string_utf8(es->get_icon_image()); @@ -3686,140 +3725,132 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], SPDLOG_LOGGER_DEBUG( neb_logger, "callbacks: generating pb service status check result event"); - const engine::service* es{static_cast( - static_cast(data)->object_ptr)}; - neb_logger->debug("callbacks: pb_service_status ({},{}) status {}, type {}", - es->host_id(), es->service_id(), - static_cast(es->get_current_state()), - static_cast(es->get_check_type())); - - auto s{std::make_shared()}; - ServiceStatus& sscr = s.get()->mut_obj(); - - sscr.set_host_id(es->host_id()); - sscr.set_service_id(es->service_id()); - if (es->host_id() == 0 || es->service_id() == 0) - SPDLOG_LOGGER_ERROR(neb_logger, "could not find ID of service ('{}', '{}')", - es->get_hostname(), es->description()); - - if (es->problem_has_been_acknowledged()) - sscr.set_acknowledgement_type(es->get_acknowledgement()); - else - sscr.set_acknowledgement_type(AckType::NONE); - - sscr.set_check_type( - static_cast(es->get_check_type())); - sscr.set_check_attempt(es->get_current_attempt()); - sscr.set_state(static_cast( - es->has_been_checked() ? es->get_current_state() : 4)); // Pending state. - sscr.set_execution_time(es->get_execution_time()); - sscr.set_checked(es->has_been_checked()); - sscr.set_flapping(es->get_is_flapping()); - sscr.set_last_check(es->get_last_check()); - sscr.set_last_hard_state( - static_cast(es->get_last_hard_state())); - sscr.set_last_hard_state_change(es->get_last_hard_state_change()); - sscr.set_last_notification(es->get_last_notification()); - sscr.set_notification_number(es->get_notification_number()); - sscr.set_last_state_change(es->get_last_state_change()); - sscr.set_last_time_critical(es->get_last_time_critical()); - sscr.set_last_time_ok(es->get_last_time_ok()); - sscr.set_last_time_unknown(es->get_last_time_unknown()); - sscr.set_last_time_warning(es->get_last_time_warning()); - sscr.set_latency(es->get_latency()); - sscr.set_next_check(es->get_next_check()); - sscr.set_next_notification(es->get_next_notification()); - sscr.set_no_more_notifications(es->get_no_more_notifications()); - if (!es->get_plugin_output().empty()) - sscr.set_output(common::check_string_utf8(es->get_plugin_output())); - if (!es->get_long_plugin_output().empty()) - sscr.set_long_output( - common::check_string_utf8(es->get_long_plugin_output())); - sscr.set_percent_state_change(es->get_percent_state_change()); - if (!es->get_perf_data().empty()) { - sscr.set_perfdata(common::check_string_utf8(es->get_perf_data())); - SPDLOG_LOGGER_TRACE(neb_logger, - "callbacks: service ({}, {}) has perfdata <<{}>>", - es->host_id(), es->service_id(), es->get_perf_data()); - } else { - SPDLOG_LOGGER_TRACE(neb_logger, - "callbacks: service ({}, {}) has no perfdata", - es->host_id(), es->service_id()); - } - sscr.set_should_be_scheduled(es->get_should_be_scheduled()); - sscr.set_state_type(static_cast( - es->has_been_checked() ? es->get_state_type() : engine::notifier::hard)); - sscr.set_scheduled_downtime_depth(es->get_scheduled_downtime_depth()); - - if (!es->get_hostname().empty()) { - if (strncmp(es->get_hostname().c_str(), "_Module_Meta", 13) == 0) { - if (strncmp(es->description().c_str(), "meta_", 5) == 0) { - sscr.set_type(METASERVICE); - uint64_t iid = 0; - for (auto c = es->description().begin() + 5; - c != es->description().end(); ++c) { - if (!isdigit(*c)) { - SPDLOG_LOGGER_ERROR( - neb_logger, - "callbacks: service ('{}', '{}') looks like a meta-service " - "but its name is malformed", - es->get_hostname(), es->description()); - break; - } - iid = 10 * iid + (*c - '0'); + nebstruct_service_status_data* ds = + static_cast(data); + const engine::service* es = static_cast(ds->object_ptr); + neb_logger->debug( + "callbacks: pb_service_status ({},{}) status {}, attributes {}, type {}", + es->host_id(), es->service_id(), + static_cast(es->get_current_state()), ds->attributes, + static_cast(es->get_check_type())); + + auto handle_acknowledgement = [](uint16_t state, auto& r) { + neb_logger->debug("Looking for acknowledgement on service ({}:{})", + r.host_id(), r.service_id()); + auto it = + gl_acknowledgements.find(std::make_pair(r.host_id(), r.service_id())); + if (it != gl_acknowledgements.end() && + r.acknowledgement_type() == AckType::NONE) { + neb_logger->debug("acknowledgement found on service ({}:{})", r.host_id(), + r.service_id()); + if (it->second->type() == make_type(io::neb, de_pb_acknowledgement)) { + neb::pb_acknowledgement* a = + static_cast(it->second.get()); + if (!(!state // !(OK or (normal ack and NOK)) + || (!a->obj().sticky() && state != a->obj().state()))) { + a->mut_obj().set_deletion_time(time(nullptr)); + gl_publisher.write(std::move(it->second)); } - sscr.set_internal_id(iid); - } - } else if (strncmp(es->get_hostname().c_str(), "_Module_BAM", 11) == 0) { - if (strncmp(es->description().c_str(), "ba_", 3) == 0) { - sscr.set_type(BA); - uint64_t iid = 0; - for (auto c = es->description().begin() + 3; - c != es->description().end(); ++c) { - if (!isdigit(*c)) { - SPDLOG_LOGGER_ERROR(neb_logger, - "callbacks: service ('{}', '{}') looks like a " - "business-activity but its name is malformed", - es->get_hostname(), es->description()); - break; - } - iid = 10 * iid + (*c - '0'); + } else { + neb::acknowledgement* a = + static_cast(it->second.get()); + if (!(!state // !(OK or (normal ack and NOK)) + || (!a->is_sticky && state != a->state))) { + a->deletion_time = time(nullptr); + gl_publisher.write(std::move(it->second)); } - sscr.set_internal_id(iid); } + gl_acknowledgements.erase(it); } - } - // Send event(s). - gl_publisher.write(s); + }; + uint16_t state = + es->has_been_checked() ? es->get_current_state() : 4; // Pending state. + if (ds->attributes != engine::service::STATUS_ALL) { + auto as = std::make_shared(); + AdaptiveServiceStatus& asscr = as.get()->mut_obj(); + fill_service_type(asscr, es); + if (ds->attributes & engine::service::STATUS_DOWNTIME_DEPTH) { + asscr.set_host_id(es->host_id()); + asscr.set_service_id(es->service_id()); + asscr.set_scheduled_downtime_depth(es->get_scheduled_downtime_depth()); + } + if (ds->attributes & engine::service::STATUS_NOTIFICATION_NUMBER) { + asscr.set_host_id(es->host_id()); + asscr.set_service_id(es->service_id()); + asscr.set_notification_number(es->get_notification_number()); + } + if (ds->attributes & engine::service::STATUS_ACKNOWLEDGEMENT) { + asscr.set_host_id(es->host_id()); + asscr.set_service_id(es->service_id()); + asscr.set_acknowledgement_type(es->get_acknowledgement()); + } + gl_publisher.write(as); - neb_logger->debug("Looking for acknowledgement on service ({}:{})", - sscr.host_id(), sscr.service_id()); - // Acknowledgement event. - auto it = gl_acknowledgements.find( - std::make_pair(sscr.host_id(), sscr.service_id())); - if (it != gl_acknowledgements.end() && - sscr.acknowledgement_type() == AckType::NONE) { - neb_logger->debug("acknowledgement found on service ({}:{})", - sscr.host_id(), sscr.service_id()); - if (it->second->type() == make_type(io::neb, de_pb_acknowledgement)) { - neb::pb_acknowledgement* a = - static_cast(it->second.get()); - if (!(!sscr.state() // !(OK or (normal ack and NOK)) - || (!a->obj().sticky() && - static_cast(sscr.state()) != a->obj().state()))) { - a->mut_obj().set_deletion_time(time(nullptr)); - gl_publisher.write(std::move(it->second)); - } + // Acknowledgement event. + handle_acknowledgement(state, asscr); + } else { + auto s{std::make_shared()}; + ServiceStatus& sscr = s.get()->mut_obj(); + + fill_service_type(sscr, es); + sscr.set_host_id(es->host_id()); + sscr.set_service_id(es->service_id()); + if (es->host_id() == 0 || es->service_id() == 0) + SPDLOG_LOGGER_ERROR(neb_logger, + "could not find ID of service ('{}', '{}')", + es->get_hostname(), es->description()); + + sscr.set_acknowledgement_type(es->get_acknowledgement()); + + sscr.set_check_type( + static_cast(es->get_check_type())); + sscr.set_check_attempt(es->get_current_attempt()); + sscr.set_state(static_cast(state)); + sscr.set_execution_time(es->get_execution_time()); + sscr.set_checked(es->has_been_checked()); + sscr.set_flapping(es->get_is_flapping()); + sscr.set_last_check(es->get_last_check()); + sscr.set_last_hard_state( + static_cast(es->get_last_hard_state())); + sscr.set_last_hard_state_change(es->get_last_hard_state_change()); + sscr.set_last_notification(es->get_last_notification()); + sscr.set_notification_number(es->get_notification_number()); + sscr.set_last_state_change(es->get_last_state_change()); + sscr.set_last_time_critical(es->get_last_time_critical()); + sscr.set_last_time_ok(es->get_last_time_ok()); + sscr.set_last_time_unknown(es->get_last_time_unknown()); + sscr.set_last_time_warning(es->get_last_time_warning()); + sscr.set_latency(es->get_latency()); + sscr.set_next_check(es->get_next_check()); + sscr.set_next_notification(es->get_next_notification()); + sscr.set_no_more_notifications(es->get_no_more_notifications()); + if (!es->get_plugin_output().empty()) + sscr.set_output(common::check_string_utf8(es->get_plugin_output())); + if (!es->get_long_plugin_output().empty()) + sscr.set_long_output( + common::check_string_utf8(es->get_long_plugin_output())); + sscr.set_percent_state_change(es->get_percent_state_change()); + if (!es->get_perf_data().empty()) { + sscr.set_perfdata(common::check_string_utf8(es->get_perf_data())); + SPDLOG_LOGGER_TRACE(neb_logger, + "callbacks: service ({}, {}) has perfdata <<{}>>", + es->host_id(), es->service_id(), es->get_perf_data()); } else { - neb::acknowledgement* a = - static_cast(it->second.get()); - if (!(!sscr.state() // !(OK or (normal ack and NOK)) - || (!a->is_sticky && sscr.state() != a->state))) { - a->deletion_time = time(nullptr); - gl_publisher.write(std::move(it->second)); - } + SPDLOG_LOGGER_TRACE(neb_logger, + "callbacks: service ({}, {}) has no perfdata", + es->host_id(), es->service_id()); } - gl_acknowledgements.erase(it); + sscr.set_should_be_scheduled(es->get_should_be_scheduled()); + sscr.set_state_type(static_cast( + es->has_been_checked() ? es->get_state_type() + : engine::notifier::hard)); + sscr.set_scheduled_downtime_depth(es->get_scheduled_downtime_depth()); + + // Send event(s). + gl_publisher.write(s); + + // Acknowledgement event. + handle_acknowledgement(state, sscr); } neb_logger->debug("Still {} running acknowledgements", gl_acknowledgements.size()); diff --git a/broker/tls/inc/com/centreon/broker/tls/stream.hh b/broker/tls/inc/com/centreon/broker/tls/stream.hh index 57274cb75be..300fb722e85 100644 --- a/broker/tls/inc/com/centreon/broker/tls/stream.hh +++ b/broker/tls/inc/com/centreon/broker/tls/stream.hh @@ -19,7 +19,6 @@ #ifndef CCB_TLS_STREAM_HH #define CCB_TLS_STREAM_HH -#include #include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/tls/params.hh" diff --git a/broker/tls/test/acceptor.cc b/broker/tls/test/acceptor.cc index f2a051b4fc0..d6a32c82016 100644 --- a/broker/tls/test/acceptor.cc +++ b/broker/tls/test/acceptor.cc @@ -19,10 +19,8 @@ #include "com/centreon/broker/tcp/acceptor.hh" -#include #include -#include #include "com/centreon/broker/io/raw.hh" #include "com/centreon/broker/misc/buffer.hh" @@ -34,7 +32,6 @@ #include "com/centreon/broker/tls/connector.hh" #include "com/centreon/broker/tls/internal.hh" #include "com/centreon/broker/tls/stream.hh" -#include "com/centreon/common/pool.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::broker; @@ -178,7 +175,7 @@ TEST_F(TlsTest, AnonTlsStreamContinuous) { tls::stream* tls_centengine = static_cast(io_tls_centengine.get()); - char str[20]; + char str[24]; int i = 0; /* This is not representative of a real stream. Here we have to call write diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index f0944953f6f..4d424e9a582 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -18,8 +18,6 @@ #ifndef CCB_UNIFIED_SQL_STREAM_HH #define CCB_UNIFIED_SQL_STREAM_HH -#include -#include #include #include #include @@ -449,11 +447,13 @@ class stream : public io::stream { void _process_pb_host(const std::shared_ptr& d); uint64_t _process_pb_host_in_resources(const Host& h, int32_t conn); void _process_pb_host_status(const std::shared_ptr& d); + void _process_pb_adaptive_host_status(const std::shared_ptr& d); void _process_pb_adaptive_host(const std::shared_ptr& d); void _process_pb_service(const std::shared_ptr& d); uint64_t _process_pb_service_in_resources(const Service& s, int32_t conn); void _process_pb_adaptive_service(const std::shared_ptr& d); void _process_pb_service_status(const std::shared_ptr& d); + void _process_pb_adaptive_service_status(const std::shared_ptr& d); void _process_severity(const std::shared_ptr& d); void _process_tag(const std::shared_ptr& d); void _process_pb_log(const std::shared_ptr& d); diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index eaaa899f879..af7d96bff9b 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -118,7 +118,9 @@ constexpr void (stream::*const stream::neb_processing_table[])( &stream::_process_pb_service_group, &stream::_process_pb_service_group_member, &stream::_process_pb_host_parent, - nullptr // pb_instance_configuration + nullptr, // pb_instance_configuration + &stream::_process_pb_adaptive_service_status, + &stream::_process_pb_adaptive_host_status, }; constexpr size_t neb_processing_table_size = diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index adf4cbe0269..ab802a7989c 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -15,16 +15,12 @@ * * For more information : contact@centreon.com */ -#include -#include #include "bbdo/neb.pb.h" #include "bbdo/storage/index_mapping.hh" #include "com/centreon/broker/cache/global_cache.hh" -#include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/events.hh" -#include "com/centreon/broker/sql/mysql_result.hh" #include "com/centreon/broker/sql/query_preparator.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" @@ -882,10 +878,8 @@ void stream::_process_downtime(const std::shared_ptr& d) { SPDLOG_LOGGER_INFO(_logger_sql, "unified_sql: processing downtime event (poller: {}" ", host: {}, service: {}, start time: {}, end_time: {}" - ", actual start time: {}" - ", actual end time: {}" - ", duration: {}, entry time: {}" - ", deletion time: {})", + ", actual start time: {}, actual end time: {}, duration: " + "{}, entry time: {}, deletion time: {})", dd.poller_id, dd.host_id, dd.service_id, dd.start_time, dd.end_time, dd.actual_start_time, dd.actual_end_time, dd.duration, dd.entry_time, dd.deletion_time); @@ -980,11 +974,9 @@ void stream::_process_pb_downtime(const std::shared_ptr& d) { // Log message. SPDLOG_LOGGER_INFO(_logger_sql, "unified_sql: processing pb downtime event (poller: {}" - ", host: {}, service: {}, start time: {}, end_time: {}" - ", actual start time: {}" - ", actual end time: {}" - ", duration: {}, entry time: {}" - ", deletion time: {})", + ", host: {}, service: {}, start time: {}, end_time: {}, " + "actual start time: {}, actual end time: {}, duration: " + "{}, entry time: {}, deletion time: {})", dt_obj.instance_id(), dt_obj.host_id(), dt_obj.service_id(), dt_obj.start_time(), dt_obj.end_time(), dt_obj.actual_start_time(), @@ -2734,6 +2726,80 @@ void stream::_process_pb_host_status(const std::shared_ptr& d) { now, hscr.state(), hscr.state_type()); } +void stream::_process_pb_adaptive_host_status( + const std::shared_ptr& d) { + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); + // Processed object. + auto h{static_cast(d.get())}; + auto& hscr = h->obj(); + + SPDLOG_LOGGER_DEBUG( + _logger_sql, "unified_sql: pb adaptive host {} status -{}{}{}", + hscr.host_id(), + hscr.has_acknowledgement_type() ? " with acknowledgement type" : "", + hscr.has_notification_number() ? " with notification number" : "", + hscr.has_scheduled_downtime_depth() ? " with scheduled downtime depth" + : ""); + + if (!_host_instance_known(hscr.host_id())) { + SPDLOG_LOGGER_WARN(_logger_sql, + "unified_sql: pb adaptive host status {} thrown away " + "because host {} is not known by " + "any poller", + hscr.host_id(), hscr.host_id()); + return; + } + + int32_t conn = _mysql.choose_connection_by_instance( + _cache_host_instance[static_cast(hscr.host_id())]); + + if (_store_in_hosts_services) { + constexpr std::string_view buf("UPDATE hosts SET "); + std::string query{buf}; + if (hscr.has_acknowledgement_type()) + query += fmt::format("acknowledged='{}',acknowledgement_type='{}',", + hscr.acknowledgement_type() != AckType::NONE ? 1 : 0, + hscr.acknowledgement_type()); + if (hscr.has_notification_number()) + query += + fmt::format("notification_number={},", hscr.notification_number()); + if (hscr.has_scheduled_downtime_depth()) + query += fmt::format("scheduled_downtime_depth={},", + hscr.scheduled_downtime_depth()); + if (query.size() > buf.size()) { + query.resize(query.size() - 1); + query += fmt::format(" WHERE host_id={}", hscr.host_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", query); + _mysql.run_query(query, database::mysql_error::store_host_status, conn); + _add_action(conn, actions::hosts); + } + } + + if (_store_in_resources) { + constexpr std::string_view res_buf("UPDATE resources SET "); + std::string res_query{res_buf}; + if (hscr.has_acknowledgement_type()) + res_query += + fmt::format("acknowledged='{}',", + hscr.acknowledgement_type() != AckType::NONE ? 1 : 0); + if (hscr.has_notification_number()) + res_query += + fmt::format("notification_number={},", hscr.notification_number()); + if (hscr.has_scheduled_downtime_depth()) + res_query += + fmt::format("in_downtime={},", hscr.scheduled_downtime_depth() > 0); + if (res_query.size() > res_buf.size()) { + res_query.resize(res_query.size() - 1); + res_query += fmt::format(" WHERE parent_id=0 AND id={}", hscr.host_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", res_query); + _mysql.run_query(res_query, database::mysql_error::update_resources, + conn); + _add_action(conn, actions::resources); + } + } +} + /** * Process an instance event. The thread executing the command is * controlled so that queries depending on this one will be made by the same @@ -2804,8 +2870,8 @@ void stream::_process_pb_instance(const std::shared_ptr& d) { actions::hostgroups | actions::service_dependencies | actions::host_dependencies); - /* Now, the local::pb_stop is handled by unified_sql. So the pb_instance with - * running = false, seems no more useful. */ + /* Now, the local::pb_stop is handled by unified_sql. So the pb_instance + * with running = false, seems no more useful. */ // Log message. SPDLOG_LOGGER_INFO( _logger_sql, @@ -2914,10 +2980,10 @@ void stream::_process_pb_instance_status(const std::shared_ptr& d) { actions::comments); // Log message. - SPDLOG_LOGGER_DEBUG( - _logger_sql, - "unified_sql: processing poller status event (id: {}, last alive: {} {})", - is.instance_id(), is.last_alive(), is.ShortDebugString()); + SPDLOG_LOGGER_DEBUG(_logger_sql, + "unified_sql: processing poller status event (id: {}, " + "last alive: {} {})", + is.instance_id(), is.last_alive(), is.ShortDebugString()); // Processing. if (_is_valid_poller(is.instance_id())) { @@ -3053,10 +3119,10 @@ void stream::_process_pb_log(const std::shared_ptr& d) { const auto& le_obj = le.obj(); // Log message. - SPDLOG_LOGGER_INFO( - _logger_sql, - "unified_sql: processing pb log of poller '{}' generated at {} (type {})", - le_obj.instance_name(), le_obj.ctime(), le_obj.msg_type()); + SPDLOG_LOGGER_INFO(_logger_sql, + "unified_sql: processing pb log of poller '{}' " + "generated at {} (type {})", + le_obj.instance_name(), le_obj.ctime(), le_obj.msg_type()); if (_logs->is_bulk()) { auto binder = [&](database::mysql_bulk_bind& b) { @@ -3752,10 +3818,10 @@ void stream::_process_service(const std::shared_ptr& d) { // Processed object. const neb::service& s(*static_cast(d.get())); if (!_host_instance_known(s.host_id)) { - SPDLOG_LOGGER_WARN( - _logger_sql, - "unified_sql: service ({0}, {1}) thrown away because host {0} unknown", - s.host_id, s.service_id); + SPDLOG_LOGGER_WARN(_logger_sql, + "unified_sql: service ({0}, {1}) thrown away because " + "host {0} unknown", + s.host_id, s.service_id); return; } auto cache_ptr = cache::global_cache::instance_ptr(); @@ -4062,6 +4128,9 @@ uint64_t stream::_process_pb_service_in_resources(const Service& s, 5, svc_ordered_status[s.state()]); _resources_service_insert.bind_value_as_u64_ext( 6, s.last_state_change(), mapping::entry::invalid_on_zero); + _logger_sql->debug("service1 ({}, {}) scheduled_downtime_depth: {}", + s.host_id(), s.service_id(), + s.scheduled_downtime_depth()); _resources_service_insert.bind_value_as_bool( 7, s.scheduled_downtime_depth() > 0); _resources_service_insert.bind_value_as_bool( @@ -4155,6 +4224,9 @@ uint64_t stream::_process_pb_service_in_resources(const Service& s, 3, svc_ordered_status[s.state()]); _resources_service_update.bind_value_as_u64_ext( 4, s.last_state_change(), mapping::entry::invalid_on_zero); + _logger_sql->debug("service2 ({}, {}) scheduled_downtime_depth: {}", + s.host_id(), s.service_id(), + s.scheduled_downtime_depth()); _resources_service_update.bind_value_as_bool( 5, s.scheduled_downtime_depth() > 0); _resources_service_update.bind_value_as_bool( @@ -4296,96 +4368,98 @@ void stream::_process_pb_adaptive_service(const std::shared_ptr& d) { int32_t conn = _mysql.choose_connection_by_instance( _cache_host_instance[static_cast(as.host_id())]); - constexpr std::string_view buf("UPDATE services SET"); - std::string query{buf.data(), buf.size()}; - if (as.has_notify()) - query += fmt::format(" notify='{}',", as.notify() ? 1 : 0); - if (as.has_active_checks()) - query += fmt::format(" active_checks='{}',", as.active_checks() ? 1 : 0); - if (as.has_should_be_scheduled()) - query += fmt::format(" should_be_scheduled='{}',", - as.should_be_scheduled() ? 1 : 0); - if (as.has_passive_checks()) - query += fmt::format(" passive_checks='{}',", as.passive_checks() ? 1 : 0); - if (as.has_event_handler_enabled()) - query += fmt::format(" event_handler_enabled='{}',", - as.event_handler_enabled() ? 1 : 0); - if (as.has_flap_detection_enabled()) - query += fmt::format(" flap_detection='{}',", - as.flap_detection_enabled() ? 1 : 0); - if (as.has_obsess_over_service()) - query += fmt::format(" obsess_over_service='{}',", - as.obsess_over_service() ? 1 : 0); - if (as.has_event_handler()) - query += fmt::format( - " event_handler='{}',", - misc::string::escape(as.event_handler(), - get_centreon_storage_services_col_size( - centreon_storage_services_event_handler))); - if (as.has_check_command()) - query += fmt::format( - " check_command='{}',", - misc::string::escape(as.check_command(), - get_centreon_storage_services_col_size( - centreon_storage_services_check_command))); - if (as.has_check_interval()) - query += fmt::format(" check_interval={},", as.check_interval()); - if (as.has_retry_interval()) - query += fmt::format(" retry_interval={},", as.retry_interval()); - if (as.has_max_check_attempts()) - query += fmt::format(" max_check_attempts={},", as.max_check_attempts()); - if (as.has_check_freshness()) - query += - fmt::format(" check_freshness='{}',", as.check_freshness() ? 1 : 0); - if (as.has_check_period()) - query += fmt::format( - " check_period='{}',", - misc::string::escape(as.check_period(), - get_centreon_storage_services_col_size( - centreon_storage_services_check_period))); - if (as.has_notification_period()) - query += - fmt::format(" notification_period='{}',", - misc::string::escape( - as.notification_period(), - get_centreon_storage_services_col_size( - centreon_storage_services_notification_period))); - - // If nothing was added to query, we can exit immediately. - if (query.size() > buf.size()) { - query.resize(query.size() - 1); - query += fmt::format(" WHERE host_id={} AND service_id={}", as.host_id(), - as.service_id()); - SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", query); - _mysql.run_query(query, database::mysql_error::store_service, conn); - _add_action(conn, actions::services); - - if (_store_in_resources) { - constexpr std::string_view res_buf("UPDATE resources SET"); - std::string res_query{res_buf.data(), res_buf.size()}; - if (as.has_notify()) - res_query += - fmt::format(" notifications_enabled='{}',", as.notify() ? 1 : 0); - if (as.has_active_checks()) - res_query += fmt::format(" active_checks_enabled='{}',", - as.active_checks() ? 1 : 0); - if (as.has_passive_checks()) - res_query += fmt::format(" passive_checks_enabled='{}',", - as.passive_checks() ? 1 : 0); - if (as.has_max_check_attempts()) - res_query += - fmt::format(" max_check_attempts={},", as.max_check_attempts()); + if (_store_in_hosts_services) { + constexpr std::string_view buf("UPDATE services SET"); + std::string query{buf.data(), buf.size()}; + if (as.has_notify()) + query += fmt::format(" notify='{}',", as.notify() ? 1 : 0); + if (as.has_active_checks()) + query += fmt::format(" active_checks='{}',", as.active_checks() ? 1 : 0); + if (as.has_should_be_scheduled()) + query += fmt::format(" should_be_scheduled='{}',", + as.should_be_scheduled() ? 1 : 0); + if (as.has_passive_checks()) + query += + fmt::format(" passive_checks='{}',", as.passive_checks() ? 1 : 0); + if (as.has_event_handler_enabled()) + query += fmt::format(" event_handler_enabled='{}',", + as.event_handler_enabled() ? 1 : 0); + if (as.has_flap_detection_enabled()) + query += fmt::format(" flap_detection='{}',", + as.flap_detection_enabled() ? 1 : 0); + if (as.has_obsess_over_service()) + query += fmt::format(" obsess_over_service='{}',", + as.obsess_over_service() ? 1 : 0); + if (as.has_event_handler()) + query += fmt::format( + " event_handler='{}',", + misc::string::escape(as.event_handler(), + get_centreon_storage_services_col_size( + centreon_storage_services_event_handler))); + if (as.has_check_command()) + query += fmt::format( + " check_command='{}',", + misc::string::escape(as.check_command(), + get_centreon_storage_services_col_size( + centreon_storage_services_check_command))); + if (as.has_check_interval()) + query += fmt::format(" check_interval={},", as.check_interval()); + if (as.has_retry_interval()) + query += fmt::format(" retry_interval={},", as.retry_interval()); + if (as.has_max_check_attempts()) + query += fmt::format(" max_check_attempts={},", as.max_check_attempts()); + if (as.has_check_freshness()) + query += + fmt::format(" check_freshness='{}',", as.check_freshness() ? 1 : 0); + if (as.has_check_period()) + query += fmt::format( + " check_period='{}',", + misc::string::escape(as.check_period(), + get_centreon_storage_services_col_size( + centreon_storage_services_check_period))); + if (as.has_notification_period()) + query += + fmt::format(" notification_period='{}',", + misc::string::escape( + as.notification_period(), + get_centreon_storage_services_col_size( + centreon_storage_services_notification_period))); + + // If nothing was added to query, we can exit immediately. + if (query.size() > buf.size()) { + query.resize(query.size() - 1); + query += fmt::format(" WHERE host_id={} AND service_id={}", as.host_id(), + as.service_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", query); + _mysql.run_query(query, database::mysql_error::store_service, conn); + _add_action(conn, actions::services); + } + } - if (res_query.size() > res_buf.size()) { - res_query.resize(res_query.size() - 1); - res_query += fmt::format(" WHERE parent_id={} AND id={}", as.host_id(), - as.service_id()); - SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", - res_query); - _mysql.run_query(res_query, database::mysql_error::update_resources, - conn); - _add_action(conn, actions::resources); - } + if (_store_in_resources) { + constexpr std::string_view res_buf("UPDATE resources SET"); + std::string res_query{res_buf.data(), res_buf.size()}; + if (as.has_notify()) + res_query += + fmt::format(" notifications_enabled='{}',", as.notify() ? 1 : 0); + if (as.has_active_checks()) + res_query += fmt::format(" active_checks_enabled='{}',", + as.active_checks() ? 1 : 0); + if (as.has_passive_checks()) + res_query += fmt::format(" passive_checks_enabled='{}',", + as.passive_checks() ? 1 : 0); + if (as.has_max_check_attempts()) + res_query += + fmt::format(" max_check_attempts={},", as.max_check_attempts()); + + if (res_query.size() > res_buf.size()) { + res_query.resize(res_query.size() - 1); + res_query += fmt::format(" WHERE parent_id={} AND id={}", as.host_id(), + as.service_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", res_query); + _mysql.run_query(res_query, database::mysql_error::update_resources, + conn); + _add_action(conn, actions::resources); } } } @@ -4570,7 +4644,8 @@ void stream::_process_service_status(const std::shared_ptr& d) { // Do nothing. SPDLOG_LOGGER_INFO( _logger_sql, - "unified_sql: not processing service status event (host: {}, service: " + "unified_sql: not processing service status event (host: {}, " + "service: " "{}, " "check type: {}, last check: {}, next check: {}, now: {}, state ({}, " "{}))", @@ -4677,6 +4752,9 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); b->set_value_as_bool(25, sscr.acknowledgement_type() != AckType::NONE); b->set_value_as_i32(26, sscr.acknowledgement_type()); + _logger_sql->debug("service3 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); b->set_value_as_i32(27, sscr.scheduled_downtime_depth()); b->set_value_as_i32(28, sscr.host_id()); b->set_value_as_i32(29, sscr.service_id()); @@ -4732,6 +4810,9 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { _sscr_update->bind_value_as_bool( 25, sscr.acknowledgement_type() != AckType::NONE); _sscr_update->bind_value_as_i32(26, sscr.acknowledgement_type()); + _logger_sql->debug("service4 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); _sscr_update->bind_value_as_i32(27, sscr.scheduled_downtime_depth()); _sscr_update->bind_value_as_i32(28, sscr.host_id()); _sscr_update->bind_value_as_i32(29, sscr.service_id()); @@ -4764,6 +4845,9 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { b->set_value_as_i32(1, svc_ordered_status[sscr.state()]); b->set_value_as_u64(2, sscr.last_state_change(), mapping::entry::invalid_on_zero); + _logger_sql->debug("service5 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); b->set_value_as_bool(3, sscr.scheduled_downtime_depth() > 0); b->set_value_as_bool(4, sscr.acknowledgement_type() != AckType::NONE); b->set_value_as_bool(5, @@ -4785,13 +4869,17 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { b->current_row()); } else { _logger_sql->debug( - "unified_sql: NOT BULK pb service status ({}, {}) {} in resources", + "unified_sql: NOT BULK pb service status ({}, {}) {} in " + "resources", sscr.host_id(), sscr.service_id(), sscr.state()); _sscr_resources_update->bind_value_as_i32(0, sscr.state()); _sscr_resources_update->bind_value_as_i32( 1, svc_ordered_status[sscr.state()]); _sscr_resources_update->bind_value_as_u64_ext( 2, sscr.last_state_change(), mapping::entry::invalid_on_zero); + _logger_sql->debug("service6 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); _sscr_resources_update->bind_value_as_bool( 3, sscr.scheduled_downtime_depth() > 0); _sscr_resources_update->bind_value_as_bool( @@ -4817,7 +4905,8 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { // Do nothing. SPDLOG_LOGGER_INFO( _logger_sql, - "unified_sql: not processing service status check result event (host: " + "unified_sql: not processing service status check result event " + "(host: " "{}, " "service: {}, " "check type: {}, last check: {}, next check: {}, now: {}, " @@ -4830,6 +4919,98 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { _unified_sql_process_pb_service_status(d); } +/** + * @brief Process an adaptive service status event. + * + * @param d The event to process. + */ +void stream::_process_pb_adaptive_service_status( + const std::shared_ptr& d) { + _finish_action( + -1, actions::host_parents | actions::comments | actions::downtimes); + // Processed object. + auto s{static_cast(d.get())}; + auto& sscr = s->obj(); + + SPDLOG_LOGGER_DEBUG( + _logger_sql, + "unified_sql: processing pb adaptive service status of ({}, {}) " + "-{}{}{}", + sscr.host_id(), sscr.service_id(), + sscr.has_acknowledgement_type() ? "with acknowledge type" : "", + sscr.has_notification_number() ? "with notification number" : "", + sscr.has_scheduled_downtime_depth() ? "with scheduled downtime depth" + : ""); + + if (!_host_instance_known(sscr.host_id())) { + SPDLOG_LOGGER_WARN( + _logger_sql, + "unified_sql: pb adaptive service status ({}, {}) thrown " + "away because host {} is not known by any poller", + sscr.host_id(), sscr.service_id(), sscr.host_id()); + return; + } + + int32_t conn = _mysql.choose_connection_by_instance( + _cache_host_instance[sscr.host_id()]); + + if (_store_in_hosts_services) { + constexpr std::string_view query("UPDATE services SET "); + std::string buf_query(query); + if (sscr.has_acknowledgement_type()) + buf_query += + fmt::format("acknowledged='{}',acknowledgement_type={},", + sscr.acknowledgement_type() != AckType::NONE ? 1 : 0, + sscr.acknowledgement_type()); + if (sscr.has_notification_number()) + buf_query += + fmt::format("notification_number={},", sscr.notification_number()); + _logger_sql->debug("service7 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); + if (sscr.has_scheduled_downtime_depth()) + buf_query += fmt::format("scheduled_downtime_depth={},", + sscr.scheduled_downtime_depth()); + if (buf_query.size() > query.size()) { + buf_query.resize(buf_query.size() - 1); + buf_query += fmt::format(" WHERE host_id={} AND service_id={}", + sscr.host_id(), sscr.service_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", buf_query); + _mysql.run_query(buf_query, database::mysql_error::store_service_status, + conn); + _add_action(conn, actions::services); + } + } + + if (_store_in_resources) { + constexpr std::string_view res_query("UPDATE resources SET "); + std::string buf_res_query(res_query); + if (sscr.has_acknowledgement_type()) + buf_res_query += + fmt::format("acknowledged='{}',", + sscr.acknowledgement_type() != AckType::NONE ? 1 : 0); + if (sscr.has_notification_number()) + buf_res_query += + fmt::format("notification_number={},", sscr.notification_number()); + _logger_sql->debug("service8 ({}, {}) scheduled_downtime_depth: {}", + sscr.host_id(), sscr.service_id(), + sscr.scheduled_downtime_depth()); + if (sscr.has_scheduled_downtime_depth()) + buf_res_query += + fmt::format("in_downtime={},", sscr.scheduled_downtime_depth() > 0); + if (buf_res_query.size() > res_query.size()) { + buf_res_query.resize(buf_res_query.size() - 1); + buf_res_query += fmt::format(" WHERE parent_id={} AND id={}", + sscr.host_id(), sscr.service_id()); + SPDLOG_LOGGER_TRACE(_logger_sql, "unified_sql: query <<{}>>", + buf_res_query); + _mysql.run_query(buf_res_query, database::mysql_error::update_resources, + conn); + _add_action(conn, actions::resources); + } + } +} + void stream::_process_severity(const std::shared_ptr& d) { if (!_store_in_resources) return; diff --git a/clib/inc/com/centreon/logging/temp_logger.hh b/clib/inc/com/centreon/logging/temp_logger.hh index d8f09e3fc13..8caff4b2017 100644 --- a/clib/inc/com/centreon/logging/temp_logger.hh +++ b/clib/inc/com/centreon/logging/temp_logger.hh @@ -19,7 +19,6 @@ #ifndef CC_LOGGING_TEMP_LOGGER_HH #define CC_LOGGING_TEMP_LOGGER_HH -#include #include "com/centreon/logging/engine.hh" #include "com/centreon/misc/stringifier.hh" diff --git a/cmake-vcpkg.sh b/cmake-vcpkg.sh index e8eb514dcdb..deb59447bc1 100755 --- a/cmake-vcpkg.sh +++ b/cmake-vcpkg.sh @@ -104,6 +104,9 @@ if [ -r /etc/centos-release -o -r /etc/almalinux-release ] ; then elif [[ "$maj" == "centos7" ]] ; then yum -y install epel-release cmake3 cmake='cmake3' + elif [[ "$maj" == "centos9" ]] ; then + dnf config-manager --set-enabled crb + dnf -y install epel-release else dnf -y install cmake cmake='cmake' @@ -145,6 +148,7 @@ if [ -r /etc/centos-release -o -r /etc/almalinux-release ] ; then openssl-devel libssh2-devel libcurl-devel + tar zlib-devel ) if [[ "$maj" == 'centos8' ]] ; then diff --git a/engine/enginerpc/engine_impl.cc b/engine/enginerpc/engine_impl.cc index f32a0aefc5e..bac64ab2a0a 100644 --- a/engine/enginerpc/engine_impl.cc +++ b/engine/enginerpc/engine_impl.cc @@ -22,49 +22,30 @@ #include -namespace asio = boost::asio; - #include #include #include #include -#include -#include #include "com/centreon/common/process_stat.hh" #include "com/centreon/common/time.hh" -#include "com/centreon/engine/host.hh" -#include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/command_manager.hh" #include "com/centreon/engine/commands/commands.hh" #include "com/centreon/engine/commands/processing.hh" -#include "com/centreon/engine/comment.hh" -#include "com/centreon/engine/common.hh" -#include "com/centreon/engine/contact.hh" -#include "com/centreon/engine/contactgroup.hh" -#include "com/centreon/engine/downtimes/downtime.hh" #include "com/centreon/engine/downtimes/downtime_finder.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/downtimes/service_downtime.hh" -#include "com/centreon/engine/engine_impl.hh" #include "com/centreon/engine/events/loop.hh" #include "com/centreon/engine/globals.hh" -#include "com/centreon/engine/hostdependency.hh" -#include "com/centreon/engine/hostgroup.hh" -#include "com/centreon/engine/service.hh" -#include "com/centreon/engine/servicedependency.hh" -#include "com/centreon/engine/servicegroup.hh" -#include "com/centreon/engine/statistics.hh" +#include "com/centreon/engine/severity.hh" #include "com/centreon/engine/statusdata.hh" #include "com/centreon/engine/version.hh" -#include "common/log_v2/log_v2.hh" using namespace com::centreon::engine; -using namespace com::centreon::engine::logging; using namespace com::centreon::engine::downtimes; using com::centreon::common::log_v2::log_v2; @@ -784,7 +765,7 @@ grpc::Status engine_impl::RemoveHostAcknowledgement( /* set the acknowledgement flag */ temp_host->set_acknowledgement(AckType::NONE); /* update the status log with the host info */ - temp_host->update_status(); + temp_host->update_status(host::STATUS_ACKNOWLEDGEMENT); /* remove any non-persistant comments associated with the ack */ comment::delete_host_acknowledgement_comments(temp_host.get()); return 0; @@ -824,7 +805,7 @@ grpc::Status engine_impl::RemoveServiceAcknowledgement( /* set the acknowledgement flag */ temp_service->set_acknowledgement(AckType::NONE); /* update the status log with the service info */ - temp_service->update_status(); + temp_service->update_status(service::STATUS_ACKNOWLEDGEMENT); /* remove any non-persistant comments associated with the ack */ comment::delete_service_acknowledgement_comments(temp_service.get()); return 0; @@ -880,7 +861,7 @@ grpc::Status engine_impl::AcknowledgementHostProblem( request->ack_data(), notifier::notification_option_none); /* update the status log with the host info */ - temp_host->update_status(); + temp_host->update_status(host::STATUS_ACKNOWLEDGEMENT); /* add a comment for the acknowledgement */ auto com = std::make_shared( comment::host, comment::acknowledgment, temp_host->host_id(), 0, @@ -943,7 +924,7 @@ grpc::Status engine_impl::AcknowledgementServiceProblem( request->ack_author(), request->ack_data(), notifier::notification_option_none); /* update the status log with the service info */ - temp_service->update_status(); + temp_service->update_status(service::STATUS_ACKNOWLEDGEMENT); /* add a comment for the acknowledgement */ auto com = std::make_shared( @@ -1757,7 +1738,7 @@ grpc::Status engine_impl::DeleteServiceDowntimeFull( const DowntimeCriterias* request, CommandSuccess* response __attribute__((unused))) { std::string err; - auto fn = std::packaged_task([&err, request]() -> int32_t { + auto fn = std::packaged_task([request]() -> int32_t { std::list dtlist; /* iterate through all current downtime(s) */ for (auto it = downtimes::downtime_manager::instance() @@ -2341,8 +2322,7 @@ grpc::Status engine_impl::ChangeHostObjectIntVar(grpc::ServerContext* context /* modify the check interval */ temp_host->set_check_interval(request->dval()); attr = MODATTR_NORMAL_CHECK_INTERVAL; - temp_host->set_modified_attributes( - temp_host->get_modified_attributes() | attr); + temp_host->add_modified_attributes(attr); /* schedule a host check if previous interval was 0 (checks were not * regularly scheduled) */ diff --git a/engine/inc/com/centreon/engine/broker.hh b/engine/inc/com/centreon/engine/broker.hh index 70c1cc7237d..2d4a2c50e7a 100644 --- a/engine/inc/com/centreon/engine/broker.hh +++ b/engine/inc/com/centreon/engine/broker.hh @@ -25,7 +25,6 @@ #include "com/centreon/engine/commands/command.hh" #include "com/centreon/engine/comment.hh" #include "com/centreon/engine/events/timed_event.hh" -#include "com/centreon/engine/timeperiod.hh" /* Event broker options. */ #define BROKER_NOTHING 0 @@ -454,7 +453,9 @@ int broker_host_check(int type, int check_type, const char* cmdline, char* output); -void broker_host_status(int type, com::centreon::engine::host* hst); +void broker_host_status(int type, + com::centreon::engine::host* hst, + uint32_t attributes); void broker_log_data(char* data, time_t entry_time); int broker_notification_data(int type, int flags, @@ -484,7 +485,9 @@ int broker_service_check(int type, com::centreon::engine::service* svc, int check_type, const char* cmdline); -void broker_service_status(int type, com::centreon::engine::service* svc); +void broker_service_status(int type, + com::centreon::engine::service* svc, + uint32_t attributes); void broker_statechange_data(int type, int flags, int attr, diff --git a/engine/inc/com/centreon/engine/commands/command.hh b/engine/inc/com/centreon/engine/commands/command.hh index 60f39ce058c..58125cbf632 100644 --- a/engine/inc/com/centreon/engine/commands/command.hh +++ b/engine/inc/com/centreon/engine/commands/command.hh @@ -20,7 +20,6 @@ #define CCE_COMMANDS_COMMAND_HH #include "com/centreon/engine/commands/command_listener.hh" -#include "com/centreon/engine/commands/result.hh" #include "com/centreon/engine/macros/defines.hh" namespace com::centreon::engine { diff --git a/engine/inc/com/centreon/engine/commands/processing.hh b/engine/inc/com/centreon/engine/commands/processing.hh index 34152d2ead9..f5cf0036382 100644 --- a/engine/inc/com/centreon/engine/commands/processing.hh +++ b/engine/inc/com/centreon/engine/commands/processing.hh @@ -23,10 +23,7 @@ #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/contact.hh" -#include "com/centreon/engine/contactgroup.hh" -#include "com/centreon/engine/host.hh" #include "com/centreon/engine/hostgroup.hh" -#include "com/centreon/engine/service.hh" #include "com/centreon/engine/servicegroup.hh" namespace com::centreon::engine { diff --git a/engine/inc/com/centreon/engine/common.hh b/engine/inc/com/centreon/engine/common.hh index 9f28ee1ac9a..6ad8003d64f 100644 --- a/engine/inc/com/centreon/engine/common.hh +++ b/engine/inc/com/centreon/engine/common.hh @@ -1,24 +1,23 @@ -/* -** Copyright 1999-2009 Ethan Galstad -** Copyright 2009-2011 Nagios Core Development Team and Community -*Contributors -** Copyright 2011-2013,2016 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 1999-2009 Ethan Galstad + * Copyright 2009-2011 Nagios Core Development Team and Community + * Contributors Copyright 2011-2013,2016, 2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifndef CCE_COMMON_HH #define CCE_COMMON_HH @@ -291,26 +290,31 @@ enum ret_val { 8192 /* Max length of an external command. */ #define MAX_DATETIME_LENGTH 48 -/* Modified attributes. */ -#define MODATTR_NONE 0 -#define MODATTR_NOTIFICATIONS_ENABLED (1 << 0) -#define MODATTR_ACTIVE_CHECKS_ENABLED (1 << 1) -#define MODATTR_PASSIVE_CHECKS_ENABLED (1 << 2) -#define MODATTR_EVENT_HANDLER_ENABLED (1 << 3) -#define MODATTR_FLAP_DETECTION_ENABLED (1 << 4) -#define MODATTR_FAILURE_PREDICTION_ENABLED (1 << 5) -#define MODATTR_PERFORMANCE_DATA_ENABLED (1 << 6) -#define MODATTR_OBSESSIVE_HANDLER_ENABLED (1 << 7) -#define MODATTR_EVENT_HANDLER_COMMAND (1 << 8) -#define MODATTR_CHECK_COMMAND (1 << 9) -#define MODATTR_NORMAL_CHECK_INTERVAL (1 << 10) -#define MODATTR_RETRY_CHECK_INTERVAL (1 << 11) -#define MODATTR_MAX_CHECK_ATTEMPTS (1 << 12) -#define MODATTR_FRESHNESS_CHECKS_ENABLED (1 << 13) -#define MODATTR_CHECK_TIMEPERIOD (1 << 14) -#define MODATTR_CUSTOM_VARIABLE (1 << 15) -#define MODATTR_NOTIFICATION_TIMEPERIOD (1 << 16) -#define MODATTR_ALL (~0u) +/* Modified attributes. These attributes are configuration attributes not real + * time ones. For example, we have a "modified attributes" field in services + * that stores them to know what has been changed in the current configuration, + * usually changed through an external command. */ +enum modattr { + MODATTR_NONE = 0, + MODATTR_NOTIFICATIONS_ENABLED = 1 << 0, + MODATTR_ACTIVE_CHECKS_ENABLED = 1 << 1, + MODATTR_PASSIVE_CHECKS_ENABLED = 1 << 2, + MODATTR_EVENT_HANDLER_ENABLED = 1 << 3, + MODATTR_FLAP_DETECTION_ENABLED = 1 << 4, + MODATTR_FAILURE_PREDICTION_ENABLED = 1 << 5, + MODATTR_PERFORMANCE_DATA_ENABLED = 1 << 6, + MODATTR_OBSESSIVE_HANDLER_ENABLED = 1 << 7, + MODATTR_EVENT_HANDLER_COMMAND = 1 << 8, + MODATTR_CHECK_COMMAND = 1 << 9, + MODATTR_NORMAL_CHECK_INTERVAL = 1 << 10, + MODATTR_RETRY_CHECK_INTERVAL = 1 << 11, + MODATTR_MAX_CHECK_ATTEMPTS = 1 << 12, + MODATTR_FRESHNESS_CHECKS_ENABLED = 1 << 13, + MODATTR_CHECK_TIMEPERIOD = 1 << 14, + MODATTR_CUSTOM_VARIABLE = 1 << 15, + MODATTR_NOTIFICATION_TIMEPERIOD = 1 << 16, + MODATTR_ALL = ~0u, +}; /* Default values. */ #define DEFAULT_ORPHAN_CHECK_INTERVAL \ diff --git a/engine/inc/com/centreon/engine/downtimes/downtime.hh b/engine/inc/com/centreon/engine/downtimes/downtime.hh index 421433e96c2..4f321f4e6e2 100644 --- a/engine/inc/com/centreon/engine/downtimes/downtime.hh +++ b/engine/inc/com/centreon/engine/downtimes/downtime.hh @@ -22,7 +22,6 @@ #define CCE_DOWNTIMES_DOWTIME_HH #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" namespace com::centreon::engine { diff --git a/engine/inc/com/centreon/engine/escalation.hh b/engine/inc/com/centreon/engine/escalation.hh index 4db5c189d05..ba92c3327bf 100644 --- a/engine/inc/com/centreon/engine/escalation.hh +++ b/engine/inc/com/centreon/engine/escalation.hh @@ -20,7 +20,6 @@ #ifndef CCE_ESCALATION_HH #define CCE_ESCALATION_HH -#include "com/centreon/engine/contactgroup.hh" #include "com/centreon/engine/notifier.hh" #include "com/centreon/engine/shared.hh" diff --git a/engine/inc/com/centreon/engine/flapping.hh b/engine/inc/com/centreon/engine/flapping.hh index 601e06c700b..66ae8c29ca8 100644 --- a/engine/inc/com/centreon/engine/flapping.hh +++ b/engine/inc/com/centreon/engine/flapping.hh @@ -22,7 +22,6 @@ #define CCE_FLAPPING_HH #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" // Flapping Types #define HOST_FLAPPING 0 diff --git a/engine/inc/com/centreon/engine/globals.hh b/engine/inc/com/centreon/engine/globals.hh index 1dc0bfb56ab..8a08bdb1fd9 100644 --- a/engine/inc/com/centreon/engine/globals.hh +++ b/engine/inc/com/centreon/engine/globals.hh @@ -1,24 +1,23 @@ -/* -** Copyright 1999-2009 Ethan Galstad -** Copyright 2009-2010 Nagios Core Development Team and Community Contributors -** Copyright 2011-2019 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ - +/** + * Copyright 1999-2009 Ethan Galstad + * Copyright 2009-2010 Nagios Core Development Team and Community Contributors + * Copyright 2011-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifndef CCE_GLOBALS_HH #define CCE_GLOBALS_HH diff --git a/engine/inc/com/centreon/engine/host.hh b/engine/inc/com/centreon/engine/host.hh index 0c97d18c5eb..600e5048b4e 100644 --- a/engine/inc/com/centreon/engine/host.hh +++ b/engine/inc/com/centreon/engine/host.hh @@ -129,7 +129,7 @@ class host : public notifier { void clear_flap(double percent_change, double high_threshold, double low_threshold); - void update_status() override; + void update_status(uint32_t attributes = STATUS_ALL) override; void update_adaptive_data(); void check_for_expired_acknowledgement(); // bool check_notification_viability(reason_type type, diff --git a/engine/inc/com/centreon/engine/macros.hh b/engine/inc/com/centreon/engine/macros.hh index f3c8d74dab9..cb16341eccf 100644 --- a/engine/inc/com/centreon/engine/macros.hh +++ b/engine/inc/com/centreon/engine/macros.hh @@ -24,7 +24,6 @@ #include "com/centreon/engine/macros/clear_hostgroup.hh" #include "com/centreon/engine/macros/clear_service.hh" #include "com/centreon/engine/macros/clear_servicegroup.hh" -#include "com/centreon/engine/macros/defines.hh" #include "com/centreon/engine/macros/grab_host.hh" #include "com/centreon/engine/macros/grab_service.hh" #include "com/centreon/engine/macros/grab_value.hh" diff --git a/engine/inc/com/centreon/engine/macros/grab_host.hh b/engine/inc/com/centreon/engine/macros/grab_host.hh index 3b81c8e7179..e926b68ccc9 100644 --- a/engine/inc/com/centreon/engine/macros/grab_host.hh +++ b/engine/inc/com/centreon/engine/macros/grab_host.hh @@ -21,7 +21,6 @@ #ifndef CCE_MACROS_GRAB_HOST_HH #define CCE_MACROS_GRAB_HOST_HH -#include "com/centreon/engine/host.hh" #include "com/centreon/engine/macros/defines.hh" #ifdef __cplusplus diff --git a/engine/inc/com/centreon/engine/macros/grab_service.hh b/engine/inc/com/centreon/engine/macros/grab_service.hh index 4435965f0cd..9ca42e20b8d 100644 --- a/engine/inc/com/centreon/engine/macros/grab_service.hh +++ b/engine/inc/com/centreon/engine/macros/grab_service.hh @@ -22,7 +22,6 @@ #define CCE_MACROS_GRAB_SERVICE_HH #include "com/centreon/engine/macros/defines.hh" -#include "com/centreon/engine/service.hh" #ifdef __cplusplus extern "C" { diff --git a/engine/inc/com/centreon/engine/nebstructs.hh b/engine/inc/com/centreon/engine/nebstructs.hh index 02073edc4eb..ea602d3ed94 100644 --- a/engine/inc/com/centreon/engine/nebstructs.hh +++ b/engine/inc/com/centreon/engine/nebstructs.hh @@ -22,8 +22,6 @@ #define CCE_NEBSTRUCTS_HH #include "com/centreon/engine/comment.hh" -#include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" /* Acknowledgement structure. */ typedef struct nebstruct_acknowledgement_struct { @@ -187,6 +185,7 @@ typedef struct nebstruct_host_check_struct { typedef struct nebstruct_host_status_struct { int type; void* object_ptr; + uint32_t attributes; } nebstruct_host_status_data; /* Log data structure. */ @@ -248,6 +247,7 @@ typedef struct nebstruct_service_check_struct { typedef struct nebstruct_service_status_struct { int type; void* object_ptr; + uint32_t attributes; } nebstruct_service_status_data; typedef struct nebstruct_bench_struct { diff --git a/engine/inc/com/centreon/engine/notifier.hh b/engine/inc/com/centreon/engine/notifier.hh index 329eb98b137..f0e7e82c0f4 100644 --- a/engine/inc/com/centreon/engine/notifier.hh +++ b/engine/inc/com/centreon/engine/notifier.hh @@ -1,5 +1,5 @@ /* - * Copyright 2019 Centreon (https://www.centreon.com/) + * Copyright 2019-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,6 +39,15 @@ using AckType = com::centreon::broker::AckType; class notifier : public checkable { public: + /* Status attributes. Used as argument in the notifier::update_status(). */ + enum status_attribute { + STATUS_NONE = 0, + STATUS_DOWNTIME_DEPTH = 1 << 0, + STATUS_NOTIFICATION_NUMBER = 1 << 1, + STATUS_ACKNOWLEDGEMENT = 1 << 2, + STATUS_ALL = ~0u, + }; + enum notification_category { cat_normal, cat_recovery, @@ -173,7 +182,14 @@ class notifier : public checkable { virtual bool schedule_check(time_t check_time, uint32_t options, bool no_update_status_now) = 0; - virtual void update_status() = 0; + + /** + * @brief Update the status of the notifier partially. attributes is a bits + * field based on enum status_attribute specifying what has to be updated. + * + * @param attributes A bits field based on enum status_attribute. + */ + virtual void update_status(uint32_t attributes) = 0; int notify(reason_type type, std::string const& not_author, std::string const& not_data, diff --git a/engine/inc/com/centreon/engine/objects.hh b/engine/inc/com/centreon/engine/objects.hh index a206ba72d5c..79ad67e538c 100644 --- a/engine/inc/com/centreon/engine/objects.hh +++ b/engine/inc/com/centreon/engine/objects.hh @@ -22,13 +22,11 @@ #define CCE_OBJECTS_HH #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/comment.hh" -#include "com/centreon/engine/daterange.hh" #include "com/centreon/engine/downtimes/downtime.hh" #include "com/centreon/engine/hostdependency.hh" #include "com/centreon/engine/hostescalation.hh" #include "com/centreon/engine/hostgroup.hh" #include "com/centreon/engine/servicedependency.hh" #include "com/centreon/engine/serviceescalation.hh" -#include "com/centreon/engine/timerange.hh" #endif /* !CCE_OBJECTS_HH */ diff --git a/engine/inc/com/centreon/engine/sehandlers.hh b/engine/inc/com/centreon/engine/sehandlers.hh index e1086bcb160..d3e28d58505 100644 --- a/engine/inc/com/centreon/engine/sehandlers.hh +++ b/engine/inc/com/centreon/engine/sehandlers.hh @@ -21,9 +21,7 @@ #ifndef CCE_SEHANDLERS_HH #define CCE_SEHANDLERS_HH -#include "com/centreon/engine/host.hh" #include "com/centreon/engine/macros/defines.hh" -#include "com/centreon/engine/service.hh" // Event Handler Types #define HOST_EVENTHANDLER 0 diff --git a/engine/inc/com/centreon/engine/service.hh b/engine/inc/com/centreon/engine/service.hh index 8c3e8cfab42..5885d0a8f6b 100644 --- a/engine/inc/com/centreon/engine/service.hh +++ b/engine/inc/com/centreon/engine/service.hh @@ -20,7 +20,6 @@ #define CCE_SERVICE_HH #include "com/centreon/engine/check_result.hh" -#include "com/centreon/engine/common.hh" #include "com/centreon/engine/hash.hh" #include "com/centreon/engine/logging.hh" #include "com/centreon/engine/notifier.hh" @@ -218,7 +217,7 @@ class service : public notifier { double low_threshold); void enable_flap_detection(); void disable_flap_detection(); - void update_status() override; + void update_status(uint32_t status_attributes = STATUS_ALL) override; void update_adaptive_data(); bool verify_check_viability(int check_options, bool* time_is_valid, diff --git a/engine/inc/com/centreon/engine/statusdata.hh b/engine/inc/com/centreon/engine/statusdata.hh index a94aec5c457..e3d32f2e876 100644 --- a/engine/inc/com/centreon/engine/statusdata.hh +++ b/engine/inc/com/centreon/engine/statusdata.hh @@ -22,7 +22,6 @@ #define CCE_STATUSDATA_HH #include "com/centreon/engine/host.hh" -#include "com/centreon/engine/service.hh" #ifdef __cplusplus extern "C" { diff --git a/engine/inc/com/centreon/engine/utils.hh b/engine/inc/com/centreon/engine/utils.hh index 7560738d8b0..98145eae250 100644 --- a/engine/inc/com/centreon/engine/utils.hh +++ b/engine/inc/com/centreon/engine/utils.hh @@ -21,8 +21,6 @@ #ifndef CCE_UTILS_HH #define CCE_UTILS_HH -#include "com/centreon/engine/check_result.hh" -#include "com/centreon/engine/daterange.hh" #include "com/centreon/engine/macros/defines.hh" #ifdef __cplusplus diff --git a/engine/src/broker.cc b/engine/src/broker.cc index b6c25032068..d6b7d7edb5a 100644 --- a/engine/src/broker.cc +++ b/engine/src/broker.cc @@ -671,8 +671,9 @@ int broker_host_check(int type, * * @param[in] type Type. * @param[in] hst Host. + * @param[in] attributes Attributes from status_attribute enumeration. */ -void broker_host_status(int type, host* hst) { +void broker_host_status(int type, host* hst, uint32_t attributes) { // Config check. if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; @@ -681,6 +682,7 @@ void broker_host_status(int type, host* hst) { nebstruct_host_status_data ds; ds.type = type; ds.object_ptr = hst; + ds.attributes = attributes; // Make callbacks. neb_make_callbacks(NEBCALLBACK_HOST_STATUS_DATA, &ds); @@ -877,8 +879,11 @@ int broker_service_check(int type, * * @param[in] type Type. * @param[in] svc Target service. + * @param[in] attributes Attributes from status_attribute enumeration. */ -void broker_service_status(int type, com::centreon::engine::service* svc) { +void broker_service_status(int type, + com::centreon::engine::service* svc, + uint32_t attributes) { // Config check. if (!(config->event_broker_options() & BROKER_STATUS_DATA)) return; @@ -887,6 +892,7 @@ void broker_service_status(int type, com::centreon::engine::service* svc) { nebstruct_service_status_data ds; ds.type = type; ds.object_ptr = svc; + ds.attributes = attributes; // Make callbacks. neb_make_callbacks(NEBCALLBACK_SERVICE_STATUS_DATA, &ds); diff --git a/engine/src/commands/commands.cc b/engine/src/commands/commands.cc index 234d437038a..69d6ccdddb1 100644 --- a/engine/src/commands/commands.cc +++ b/engine/src/commands/commands.cc @@ -1,6 +1,6 @@ /** * Copyright 1999-2008 Ethan Galstad - * Copyright 2011-2013,2015-2022 Centreon + * Copyright 2011-2013,2015-2024 Centreon * * This file is part of Centreon Engine. * @@ -26,9 +26,7 @@ #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/commands/processing.hh" -#include "com/centreon/engine/comment.hh" -#include "com/centreon/engine/configuration/applier/state.hh" -#include "com/centreon/engine/downtimes/downtime.hh" +#include "com/centreon/engine/common.hh" #include "com/centreon/engine/downtimes/downtime_finder.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/events/loop.hh" @@ -540,8 +538,7 @@ int cmd_process_service_check_result(int cmd [[maybe_unused]], runtime_logger->warn( "Warning: Passive check result was received for service '{}' on host " "'{}', but the host could not be found!", - fmt::string_view(svc_description.data(), svc_description.size()), - fmt::string_view(host_name.data(), host_name.size())); + svc_description, host_name); return ERROR; } @@ -2622,7 +2619,7 @@ void acknowledge_host_problem(host* hst, notifier::notification_option_none); /* update the status log with the host info */ - hst->update_status(); + hst->update_status(host::STATUS_ACKNOWLEDGEMENT); /* add a comment for the acknowledgement */ auto com{std::make_shared( @@ -2663,7 +2660,7 @@ void acknowledge_service_problem(service* svc, notifier::notification_option_none); /* update the status log with the service info */ - svc->update_status(); + svc->update_status(service::STATUS_ACKNOWLEDGEMENT); /* add a comment for the acknowledgement */ auto com{std::make_shared( @@ -2679,7 +2676,7 @@ void remove_host_acknowledgement(host* hst) { hst->set_acknowledgement(AckType::NONE); /* update the status log with the host info */ - hst->update_status(); + hst->update_status(host::STATUS_ACKNOWLEDGEMENT); /* remove any non-persistant comments associated with the ack */ comment::delete_host_acknowledgement_comments(hst); @@ -2691,7 +2688,7 @@ void remove_service_acknowledgement(service* svc) { svc->set_acknowledgement(AckType::NONE); /* update the status log with the service info */ - svc->update_status(); + svc->update_status(host::STATUS_ACKNOWLEDGEMENT); /* remove any non-persistant comments associated with the ack */ comment::delete_service_acknowledgement_comments(svc); diff --git a/engine/src/downtimes/host_downtime.cc b/engine/src/downtimes/host_downtime.cc index f594e558b7d..65b499fb32a 100644 --- a/engine/src/downtimes/host_downtime.cc +++ b/engine/src/downtimes/host_downtime.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Centreon (https://www.centreon.com/) + * Copyright 2019-2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ #include "com/centreon/engine/downtimes/host_downtime.hh" #include "com/centreon/engine/broker.hh" -#include "com/centreon/engine/comment.hh" +#include "com/centreon/engine/common.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/events/loop.hh" @@ -468,7 +468,7 @@ int host_downtime::handle() { /* update the status data */ /* Because of the notification the status is sent with CHECK_RESULT level */ - it_hst->second->update_status(); + it_hst->second->update_status(host::STATUS_DOWNTIME_DEPTH); /* schedule an event */ if (!is_fixed()) diff --git a/engine/src/downtimes/service_downtime.cc b/engine/src/downtimes/service_downtime.cc index d5e1671e9dd..93c4e2ac093 100644 --- a/engine/src/downtimes/service_downtime.cc +++ b/engine/src/downtimes/service_downtime.cc @@ -20,7 +20,7 @@ #include "com/centreon/engine/downtimes/service_downtime.hh" #include "com/centreon/engine/broker.hh" -#include "com/centreon/engine/comment.hh" +#include "com/centreon/engine/common.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/events/loop.hh" @@ -186,7 +186,7 @@ int service_downtime::unschedule() { get_triggered_by(), get_duration(), get_downtime_id(), nullptr); found->second->dec_scheduled_downtime_depth(); - found->second->update_status(); + found->second->update_status(service::STATUS_DOWNTIME_DEPTH); /* log a notice - this is parsed by the history CGI */ if (found->second->get_scheduled_downtime_depth() == 0) { @@ -485,8 +485,7 @@ int service_downtime::handle() { SPDLOG_LOGGER_INFO( events_logger, "SERVICE DOWNTIME ALERT: {};{};STARTED; Service has entered a period " - "of scheduled " - "downtime", + "of scheduled downtime", found->second->get_hostname(), found->second->description()); /* send a notification */ @@ -502,7 +501,7 @@ int service_downtime::handle() { /* update the status data */ /* Because of the notification the status is sent with CHECK_RESULT level */ - found->second->update_status(); + found->second->update_status(service::STATUS_DOWNTIME_DEPTH); /* schedule an event */ if (!is_fixed()) diff --git a/engine/src/events/loop.cc b/engine/src/events/loop.cc index 59cd5fe765a..8179e8298d0 100644 --- a/engine/src/events/loop.cc +++ b/engine/src/events/loop.cc @@ -2,7 +2,7 @@ * Copyright 1999-2009 Ethan Galstad * Copyright 2009-2010 Nagios Core Development Team and Community Contributors * Copyright 2011-2013 Merethis - * Copyright 2013-2022 Centreon + * Copyright 2013-2024 Centreon * * This file is part of Centreon Engine. * @@ -21,9 +21,7 @@ */ #include "com/centreon/engine/events/loop.hh" -#include #include -#include #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/command_manager.hh" #include "com/centreon/engine/configuration/applier/state.hh" @@ -329,8 +327,7 @@ void loop::_dispatching() { else temp_service->set_next_check( (time_t)(temp_service->get_next_check() + - (temp_service->check_interval() * - config->interval_length()))); + (temp_service->check_interval() * interval_length))); } temp_event->run_time = temp_service->get_next_check(); reschedule_event(std::move(temp_event), events::loop::low); diff --git a/engine/src/host.cc b/engine/src/host.cc index f6f2b63e693..1586b3f1cfd 100644 --- a/engine/src/host.cc +++ b/engine/src/host.cc @@ -16,13 +16,13 @@ * For more information : contact@centreon.com */ -#include "com/centreon/engine/host.hh" #include #include #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/common.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/configuration/whitelist.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" @@ -30,15 +30,12 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/flapping.hh" #include "com/centreon/engine/globals.hh" -#include "com/centreon/engine/logging.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/macros.hh" -#include "com/centreon/engine/macros/grab_host.hh" #include "com/centreon/engine/neberrors.hh" #include "com/centreon/engine/notification.hh" #include "com/centreon/engine/objects.hh" #include "com/centreon/engine/sehandlers.hh" -#include "com/centreon/engine/shared.hh" #include "com/centreon/engine/statusdata.hh" #include "com/centreon/engine/string.hh" #include "com/centreon/engine/timezone_locker.hh" @@ -2298,9 +2295,12 @@ void host::clear_flap(double percent_change, /** * @brief Updates host status info. Data are sent to event broker. + * + * @param attributes A bits field based on status_attribute enum (default value: + * STATUS_ALL). */ -void host::update_status() { - broker_host_status(NEBTYPE_HOSTSTATUS_UPDATE, this); +void host::update_status(uint32_t attributes) { + broker_host_status(NEBTYPE_HOSTSTATUS_UPDATE, this, attributes); } /** @@ -2317,7 +2317,7 @@ void host::check_for_expired_acknowledgement() { SPDLOG_LOGGER_INFO(events_logger, "Acknowledgement of host '{}' just expired", name()); set_acknowledgement(AckType::NONE); - update_status(); + update_status(STATUS_ACKNOWLEDGEMENT); } } } @@ -2796,7 +2796,8 @@ void host::enable_flap_detection() { check_for_flapping(false, false, true); /* update host status */ - update_status(); + /* FIXME DBO: seems not necessary */ + // update_status(); } /* diff --git a/engine/src/notifier.cc b/engine/src/notifier.cc index 9f99a71a7fa..99eccc92402 100644 --- a/engine/src/notifier.cc +++ b/engine/src/notifier.cc @@ -17,8 +17,6 @@ * . */ -#include "com/centreon/engine/notifier.hh" - #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/common.hh" @@ -31,7 +29,6 @@ #include "com/centreon/engine/neberrors.hh" #include "com/centreon/engine/notification.hh" #include "com/centreon/engine/timezone_locker.hh" -#include "com/centreon/engine/utils.hh" using namespace com::centreon::engine; using namespace com::centreon::engine::logging; @@ -207,7 +204,7 @@ void notifier::set_last_problem_id(unsigned long last_problem_id) noexcept { } /** - * @brief Set the current notification number and update the notifier status. + * @brief Set the current notification number and send this update to Broker. * * @param num The notification number. */ @@ -219,7 +216,7 @@ void notifier::set_notification_number(int num) { _notification_number = num; /* update the status log with the notifier info */ - update_status(); + update_status(STATUS_NOTIFICATION_NUMBER); } bool notifier::_is_notification_viable_normal(reason_type type diff --git a/engine/src/service.cc b/engine/src/service.cc index bbf1ea09fb0..1b506aa9c3b 100644 --- a/engine/src/service.cc +++ b/engine/src/service.cc @@ -17,12 +17,10 @@ * */ -#include "com/centreon/engine/service.hh" #include #include "com/centreon/engine/broker.hh" -#include "com/centreon/engine/checkable.hh" #include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/configuration/whitelist.hh" #include "com/centreon/engine/deleter/listmember.hh" @@ -31,11 +29,8 @@ #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/flapping.hh" #include "com/centreon/engine/globals.hh" -#include "com/centreon/engine/hostdependency.hh" -#include "com/centreon/engine/logging.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/macros.hh" -#include "com/centreon/engine/macros/grab_host.hh" #include "com/centreon/engine/neberrors.hh" #include "com/centreon/engine/notification.hh" #include "com/centreon/engine/objects.hh" @@ -850,7 +845,7 @@ void service::check_for_expired_acknowledgement() { // FIXME DBO: could be improved with something smaller. // We will see later, I don't know if there are many events concerning // acks. - update_status(); + update_status(STATUS_ACKNOWLEDGEMENT); } } } @@ -3074,10 +3069,13 @@ void service::disable_flap_detection() { } /** - * @brief Updates service status info. Send data to event broker. + * @brief Updates the status of the service partially. + * + * @param status_attributes A bits field based on status_attribute enum (default + * value: STATUS_ALL). */ -void service::update_status() { - broker_service_status(NEBTYPE_SERVICESTATUS_UPDATE, this); +void service::update_status(uint32_t status_attributes) { + broker_service_status(NEBTYPE_SERVICESTATUS_UPDATE, this, status_attributes); } /** diff --git a/tests/README.md b/tests/README.md index 90d6b2edba2..9928c3a662b 100644 --- a/tests/README.md +++ b/tests/README.md @@ -13,26 +13,37 @@ These tests are executed from the `centreon-tests/robot` folder and uses the [Ro From a Centreon host, you need to install Robot Framework -On CentOS 7, the following commands should work to initialize your robot tests: +On AlmaLinux, the following commands should work to initialize your robot tests: -``` -pip3 install -U robotframework robotframework-databaselibrary robotframework-examples pymysql robotframework-requests +```bash +dnf install "Development Tools" python3-devel -y + +pip3 install -U robotframework robotframework-databaselibrary robotframework-examples pymysql robotframework-requests psutil robotframework-httpctrl boto3 GitPython unqlite py-cpuinfo -yum install "Development Tools" python3-devel -y -pip3 install grpcio==1.33.2 grpcio_tools==1.33.2 +pip3 install grpcio grpcio_tools #you need also to provide opentelemetry proto files at the project root with this command git clone https://github.com/open-telemetry/opentelemetry-proto.git opentelemetry-proto - + #Then you must have something like that: #root directory/bbdo # /broker # /engine # /opentelemetry-proto # /tests +``` + +We need some perl modules to run the tests, you can install them with the following command: + +```bash +dnf install perl-HTTP-Daemon-SSL +dnf install perl-JSON +``` +Then you can initialize the tests with the following commands: +```bash ./init-proto.sh ./init-sql.sh ``` @@ -145,54 +156,55 @@ Here is the list of the currently implemented tests: 34. [x] **BGRPCSSU3**: Start-Stop with unified_sql one instance of broker configured with grpc and no coredump 35. [x] **BGRPCSSU4**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 1sec interval and no coredump 36. [x] **BGRPCSSU5**: Start-Stop with unified_sql with reversed connection on grpc acceptor with only one instance and no deadlock -37. [x] **BLDIS1**: Start broker with core logs 'disabled' -38. [x] **BLEC1**: Change live the core level log from trace to debug -39. [x] **BLEC2**: Change live the core level log from trace to foo raises an error -40. [x] **BLEC3**: Change live the foo level log to trace raises an error -41. [x] **BSCSS1**: Start-Stop two instances of broker and no coredump -42. [x] **BSCSS2**: Start/Stop 10 times broker with 300ms interval and no coredump -43. [x] **BSCSS3**: Start-Stop one instance of broker and no coredump -44. [x] **BSCSS4**: Start/Stop 10 times broker with 1sec interval and no coredump -45. [x] **BSCSSC1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. -46. [x] **BSCSSC2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. -47. [x] **BSCSSCG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on client side. -48. [x] **BSCSSCGRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. -49. [x] **BSCSSCGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. -50. [x] **BSCSSCRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. Connection reversed with retention. -51. [x] **BSCSSCRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. Connection reversed with retention. -52. [x] **BSCSSG1**: Start-Stop two instances of broker and no coredump -53. [x] **BSCSSG2**: Start/Stop 10 times broker with 300ms interval and no coredump -54. [x] **BSCSSG3**: Start-Stop one instance of broker and no coredump -55. [x] **BSCSSG4**: Start/Stop 10 times broker with 1sec interval and no coredump -56. [x] **BSCSSGA1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server. Error messages are raised. -57. [x] **BSCSSGA2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server and also on the client. All looks ok. -58. [x] **BSCSSGRR1**: Start-Stop two instances of broker and no coredump, reversed and retention, with transport protocol grpc, start-stop 5 times. -59. [x] **BSCSSK1**: Start-Stop two instances of broker, server configured with grpc and client with tcp. No connectrion established and error raised on client side. -60. [x] **BSCSSK2**: Start-Stop two instances of broker, server configured with tcp and client with grpc. No connection established and error raised on client side. -61. [x] **BSCSSP1**: Start-Stop two instances of broker and no coredump. The server contains a listen address -62. [x] **BSCSSPRR1**: Start-Stop two instances of broker and no coredump. The server contains a listen address, reversed and retention. centreon-broker-master-rrd is then a failover. -63. [x] **BSCSSR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client and reversed. -64. [x] **BSCSSRR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client, reversed and retention. centreon-broker-master-rrd is then a failover. -65. [x] **BSCSSRR2**: Start/Stop 10 times broker with 300ms interval and no coredump, reversed and retention. centreon-broker-master-rrd is then a failover. -66. [x] **BSCSST1**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. -67. [x] **BSCSST2**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. -68. [x] **BSCSSTG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. This is not sufficient, then an error is raised. -69. [x] **BSCSSTG2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. -70. [x] **BSCSSTG3**: Start-Stop two instances of broker. The connection cannot be established if the server private key is missing and an error message explains this issue. -71. [x] **BSCSSTGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. Reversed grpc connection with retention. -72. [x] **BSCSSTRR1**: Start-Stop two instances of broker and no coredump. Encryption is enabled. transport protocol is tcp, reversed and retention. -73. [x] **BSCSSTRR2**: Start-Stop two instances of broker and no coredump. Encryption is enabled. -74. [x] **BSS1**: Start-Stop two instances of broker and no coredump -75. [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump -76. [x] **BSS3**: Start-Stop one instance of broker and no coredump -77. [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump -78. [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock -79. [x] **BSSU1**: Start-Stop with unified_sql two instances of broker and no coredump -80. [x] **BSSU2**: Start/Stop with unified_sql 10 times broker with 300ms interval and no coredump -81. [x] **BSSU3**: Start-Stop with unified_sql one instance of broker and no coredump -82. [x] **BSSU4**: Start/Stop with unified_sql 10 times broker with 1sec interval and no coredump -83. [x] **BSSU5**: Start-Stop with unified_sql with reversed connection on TCP acceptor with only one instance and no deadlock -84. [x] **START_STOP_CBD**: restart cbd with unified_sql services state must not be null after restart +37. [x] **BLBD**: Start Broker with loggers levels by default +38. [x] **BLDIS1**: Start broker with core logs 'disabled' +39. [x] **BLEC1**: Change live the core level log from trace to debug +40. [x] **BLEC2**: Change live the core level log from trace to foo raises an error +41. [x] **BLEC3**: Change live the foo level log to trace raises an error +42. [x] **BSCSS1**: Start-Stop two instances of broker and no coredump +43. [x] **BSCSS2**: Start/Stop 10 times broker with 300ms interval and no coredump +44. [x] **BSCSS3**: Start-Stop one instance of broker and no coredump +45. [x] **BSCSS4**: Start/Stop 10 times broker with 1sec interval and no coredump +46. [x] **BSCSSC1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. +47. [x] **BSCSSC2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. +48. [x] **BSCSSCG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on client side. +49. [x] **BSCSSCGRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. +50. [x] **BSCSSCGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. Compression is enabled on output side. Reversed connection with retention and grpc transport protocol. +51. [x] **BSCSSCRR1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is enabled on client side. Connection reversed with retention. +52. [x] **BSCSSCRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with tcp transport protocol. Compression is disabled on client side. Connection reversed with retention. +53. [x] **BSCSSG1**: Start-Stop two instances of broker and no coredump +54. [x] **BSCSSG2**: Start/Stop 10 times broker with 300ms interval and no coredump +55. [x] **BSCSSG3**: Start-Stop one instance of broker and no coredump +56. [x] **BSCSSG4**: Start/Stop 10 times broker with 1sec interval and no coredump +57. [x] **BSCSSGA1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server. Error messages are raised. +58. [x] **BSCSSGA2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with grpc transport protocol. An authorization token is added on the server and also on the client. All looks ok. +59. [x] **BSCSSGRR1**: Start-Stop two instances of broker and no coredump, reversed and retention, with transport protocol grpc, start-stop 5 times. +60. [x] **BSCSSK1**: Start-Stop two instances of broker, server configured with grpc and client with tcp. No connectrion established and error raised on client side. +61. [x] **BSCSSK2**: Start-Stop two instances of broker, server configured with tcp and client with grpc. No connection established and error raised on client side. +62. [x] **BSCSSP1**: Start-Stop two instances of broker and no coredump. The server contains a listen address +63. [x] **BSCSSPRR1**: Start-Stop two instances of broker and no coredump. The server contains a listen address, reversed and retention. centreon-broker-master-rrd is then a failover. +64. [x] **BSCSSR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client and reversed. +65. [x] **BSCSSRR1**: Start-Stop two instances of broker and no coredump. Connection with bbdo_server/bbdo_client, reversed and retention. centreon-broker-master-rrd is then a failover. +66. [x] **BSCSSRR2**: Start/Stop 10 times broker with 300ms interval and no coredump, reversed and retention. centreon-broker-master-rrd is then a failover. +67. [x] **BSCSST1**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. +68. [x] **BSCSST2**: Start-Stop two instances of broker and no coredump. Encryption is enabled on client side. +69. [x] **BSCSSTG1**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. This is not sufficient, then an error is raised. +70. [x] **BSCSSTG2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. +71. [x] **BSCSSTG3**: Start-Stop two instances of broker. The connection cannot be established if the server private key is missing and an error message explains this issue. +72. [x] **BSCSSTGRR2**: Start-Stop two instances of broker. The connection is made by bbdo_client/bbdo_server with encryption enabled. It works with good certificates and keys. Reversed grpc connection with retention. +73. [x] **BSCSSTRR1**: Start-Stop two instances of broker and no coredump. Encryption is enabled. transport protocol is tcp, reversed and retention. +74. [x] **BSCSSTRR2**: Start-Stop two instances of broker and no coredump. Encryption is enabled. +75. [x] **BSS1**: Start-Stop two instances of broker and no coredump +76. [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump +77. [x] **BSS3**: Start-Stop one instance of broker and no coredump +78. [x] **BSS4**: Start/Stop 10 times broker with 1sec interval and no coredump +79. [x] **BSS5**: Start-Stop with reversed connection on TCP acceptor with only one instance and no deadlock +80. [x] **BSSU1**: Start-Stop with unified_sql two instances of broker and no coredump +81. [x] **BSSU2**: Start/Stop with unified_sql 10 times broker with 300ms interval and no coredump +82. [x] **BSSU3**: Start-Stop with unified_sql one instance of broker and no coredump +83. [x] **BSSU4**: Start/Stop with unified_sql 10 times broker with 1sec interval and no coredump +84. [x] **BSSU5**: Start-Stop with unified_sql with reversed connection on TCP acceptor with only one instance and no deadlock +85. [x] **START_STOP_CBD**: restart cbd with unified_sql services state must not be null after restart ### Broker/database 1. [x] **DEDICATED_DB_CONNECTION_${nb_conn}_${store_in_data_bin}**: count database connection @@ -249,250 +261,273 @@ Here is the list of the currently implemented tests: 39. [x] **BEDTHOSTFIXED**: A downtime is set on a host, the total number of downtimes is really 21 (1 for the host and 20 for its 20 services) then we delete this downtime and the number is 0. 40. [x] **BEDTMASS1**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 1050 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 3.0.0 41. [x] **BEDTMASS2**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 1050 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 2.0 -42. [x] **BEDTSVCFIXED**: A downtime is set on a service, the total number of downtimes is really 1 then we delete this downtime and the number of downtime is 0. -43. [x] **BEDTSVCREN1**: A downtime is set on a service then the service is renamed. The downtime is still active on the renamed service. The downtime is removed from the renamed service and it is well removed. -44. [x] **BEEXTCMD1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 -45. [x] **BEEXTCMD10**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo2.0 -46. [x] **BEEXTCMD11**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo3.0 -47. [x] **BEEXTCMD12**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo2.0 -48. [x] **BEEXTCMD13**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo3.0 -49. [x] **BEEXTCMD14**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo2.0 -50. [x] **BEEXTCMD15**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo3.0 -51. [x] **BEEXTCMD16**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo2.0 -52. [x] **BEEXTCMD17**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo3.0 -53. [x] **BEEXTCMD18**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo2.0 -54. [x] **BEEXTCMD19**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo3.0 -55. [x] **BEEXTCMD2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 -56. [x] **BEEXTCMD20**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo2.0 -57. [x] **BEEXTCMD21**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo3.0 -58. [x] **BEEXTCMD22**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo2.0 -59. [x] **BEEXTCMD23**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo3.0 -60. [x] **BEEXTCMD24**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo2.0 -61. [x] **BEEXTCMD25**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo3.0 -62. [x] **BEEXTCMD26**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo2.0 -63. [x] **BEEXTCMD27**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo3.0 -64. [x] **BEEXTCMD28**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo2.0 -65. [x] **BEEXTCMD29**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo3.0 -66. [x] **BEEXTCMD3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 -67. [x] **BEEXTCMD30**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo2.0 -68. [x] **BEEXTCMD31**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo3.0 -69. [x] **BEEXTCMD32**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo2.0 -70. [x] **BEEXTCMD33**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo3.0 -71. [x] **BEEXTCMD34**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo2.0 -72. [x] **BEEXTCMD35**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo3.0 -73. [x] **BEEXTCMD36**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo2.0 -74. [x] **BEEXTCMD37**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo3.0 -75. [x] **BEEXTCMD38**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo2.0 -76. [x] **BEEXTCMD39**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo3.0 -77. [x] **BEEXTCMD4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 -78. [x] **BEEXTCMD40**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo2.0 -79. [x] **BEEXTCMD41**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo3.0 -80. [x] **BEEXTCMD42**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo2.0 -81. [x] **BEEXTCMD5**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo3.0 -82. [x] **BEEXTCMD6**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo2.0 -83. [x] **BEEXTCMD7**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo3.0 -84. [x] **BEEXTCMD8**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo2.0 -85. [x] **BEEXTCMD9**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo3.0 -86. [x] **BEEXTCMD_COMPRESS_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and compressed grpc -87. [x] **BEEXTCMD_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and grpc -88. [x] **BEEXTCMD_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc -89. [x] **BEEXTCMD_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc -90. [x] **BEEXTCMD_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc -91. [x] **BEEXTCMD_REVERSE_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and reversed gRPC -92. [x] **BEEXTCMD_REVERSE_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc reversed -93. [x] **BEEXTCMD_REVERSE_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc reversed -94. [x] **BEEXTCMD_REVERSE_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc reversed -95. [x] **BEHOSTCHECK**: external command CHECK_HOST_RESULT -96. [x] **BEHS1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -97. [x] **BEINSTANCE**: Instance to bdd -98. [x] **BEINSTANCESTATUS**: Instance status to bdd -99. [x] **BENCH_${nb_check}STATUS**: external command CHECK_SERVICE_RESULT 1000 times -100. [x] **BENCH_${nb_check}STATUS_TRACES**: external command CHECK_SERVICE_RESULT ${nb_check} times -101. [x] **BENCH_1000STATUS_100${suffixe}**: external command CHECK_SERVICE_RESULT 100 times with 100 pollers with 20 services -102. [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. -103. [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage -104. [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. -105. [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. -106. [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. -107. [x] **BEPBCVS**: bbdo_version 3 communication of custom variables. -108. [x] **BEPBHostParent**: bbdo_version 3 communication of host parent relations -109. [x] **BEPBINST_CONF**: bbdo_version 3 communication of instance configuration. -110. [x] **BEPBRI1**: bbdo_version 3 use pb_resource new bbdo protobuf ResponsiveInstance message. -111. [x] **BEPB_HOST_DEPENDENCY**: BBDO 3 communication of host dependencies. -112. [x] **BEPB_SERVICE_DEPENDENCY**: bbdo_version 3 communication of host dependencies. -113. [x] **BERD1**: Starting/stopping Broker does not create duplicated events. -114. [x] **BERD2**: Starting/stopping Engine does not create duplicated events. -115. [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases -116. [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases -117. [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -118. [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -119. [x] **BERDUCA300**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker. -120. [x] **BERDUCA301**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker with bbdo 3.0.1. -121. [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql -122. [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql -123. [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -124. [x] **BESERVCHECK**: external command CHECK_SERVICE_RESULT -125. [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first -126. [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first -127. [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first -128. [x] **BESS4**: Start-Stop Broker/Engine - Engine started first - Broker stopped first -129. [x] **BESS5**: Start-Stop Broker/engine - Engine debug level is set to all, it should not hang -130. [x] **BESSBQ1**: A very bad queue file is written for broker. Broker and Engine are then started, Broker must read the file raising an error because of that file and then get data sent by Engine. -131. [x] **BESS_CRYPTED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured -132. [x] **BESS_CRYPTED_GRPC2**: Start-Stop grpc version Broker/Engine only server crypted -133. [x] **BESS_CRYPTED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted -134. [x] **BESS_CRYPTED_REVERSED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured -135. [x] **BESS_CRYPTED_REVERSED_GRPC2**: Start-Stop grpc version Broker/Engine only engine server crypted -136. [x] **BESS_CRYPTED_REVERSED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted -137. [x] **BESS_ENGINE_DELETE_HOST**: once engine and cbd started, stop and restart cbd, delete an host and reload engine, cbd mustn't core -138. [x] **BESS_GRPC1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first -139. [x] **BESS_GRPC2**: Start-Stop grpc version Broker/Engine - Broker started first - Engine stopped first -140. [x] **BESS_GRPC3**: Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first -141. [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first -142. [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang -143. [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped last compression activated -144. [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. -145. [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -146. [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. -147. [x] **BEUTAG10**: some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. -148. [x] **BEUTAG11**: some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. -149. [x] **BEUTAG12**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. -150. [x] **BEUTAG2**: Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. -151. [x] **BEUTAG3**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. -152. [x] **BEUTAG4**: Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. -153. [x] **BEUTAG5**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. -154. [x] **BEUTAG6**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. -155. [x] **BEUTAG7**: Some services are configured with tags on two pollers. Then tags configuration is modified. -156. [x] **BEUTAG8**: Services have tags provided by templates. -157. [x] **BEUTAG9**: hosts have tags provided by templates. -158. [x] **BEUTAG_REMOVE_HOST_FROM_HOSTGROUP**: remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error -159. [x] **BE_DEFAULT_NOTIFCATION_INTERVAL_IS_ZERO_SERVICE_RESOURCE**: default notification_interval must be set to NULL in services, hosts and resources tables. -160. [x] **BE_NOTIF_OVERFLOW**: bbdo 2.0 notification number =40000. make an overflow => notification_number null in db -161. [x] **BE_TIME_NULL_SERVICE_RESOURCE**: With BBDO 3, notification_interval time must be set to NULL on 0 in services, hosts and resources tables. -162. [x] **BRCS1**: Broker reverse connection stopped -163. [x] **BRCTS1**: Broker reverse connection too slow -164. [x] **BRCTSMN**: Broker connected to map with neb filter -165. [x] **BRCTSMNS**: Broker connected to map with neb and storage filters -166. [x] **BRGC1**: Broker good reverse connection -167. [x] **BRRDCDDID1**: RRD metrics deletion from index ids with rrdcached. -168. [x] **BRRDCDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage with rrdcached. -169. [x] **BRRDCDDIDU1**: RRD metrics deletion from index ids with unified sql output with rrdcached. -170. [x] **BRRDCDDM1**: RRD metrics deletion from metric ids with rrdcached. -171. [x] **BRRDCDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage and rrdcached. -172. [x] **BRRDCDDMID1**: RRD deletion of non existing metrics and indexes with rrdcached -173. [x] **BRRDCDDMIDU1**: RRD deletion of non existing metrics and indexes with rrdcached -174. [x] **BRRDCDDMU1**: RRD metric deletion on table metric with unified sql output with rrdcached -175. [x] **BRRDCDRB1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output and rrdcached. -176. [x] **BRRDCDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached -177. [x] **BRRDCDRBU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output and rrdcached. -178. [x] **BRRDCDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached -179. [x] **BRRDDID1**: RRD metrics deletion from index ids. -180. [x] **BRRDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage. -181. [x] **BRRDDIDU1**: RRD metrics deletion from index ids with unified sql output. -182. [x] **BRRDDM1**: RRD metrics deletion from metric ids. -183. [x] **BRRDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage. -184. [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes -185. [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes -186. [x] **BRRDDMU1**: RRD metric deletion on table metric with unified sql output -187. [x] **BRRDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql -188. [x] **BRRDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql -189. [x] **BRRDRM1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output. -190. [x] **BRRDRMU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output. -191. [x] **BRRDUPLICATE**: RRD metric rebuild with a query in centreon_storage and unified sql with duplicate rows in database -192. [x] **BRRDWM1**: We are working with BBDO3. This test checks protobuf metrics and status are sent to cbd RRD. -193. [x] **CBD_RELOAD_AND_FILTERS**: We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. -194. [x] **CBD_RELOAD_AND_FILTERS_WITH_OPR**: We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. -195. [x] **DTIM**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 5250 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 3.0.1 -196. [x] **EBBPS1**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table resources. -197. [x] **EBBPS2**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. -198. [x] **EBDP1**: Four new pollers are started and then we remove Poller3. -199. [x] **EBDP2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -200. [x] **EBDP3**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. -201. [x] **EBDP4**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by broker. -202. [x] **EBDP5**: Four new pollers are started and then we remove Poller3. -203. [x] **EBDP6**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -204. [x] **EBDP7**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. -205. [x] **EBDP8**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by broker. -206. [x] **EBDP_GRPC2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. -207. [x] **EBMSSM**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. -208. [x] **EBNHG1**: New host group with several pollers and connections to DB -209. [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup -210. [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql -211. [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql -212. [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql -213. [x] **EBNHGU4_${test_label}**: New host group with several pollers and connections to DB with broker and rename this hostgroup -214. [x] **EBNSG1**: New service group with several pollers and connections to DB -215. [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql -216. [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql -217. [x] **EBNSGU3_${test_label}**: New service group with several pollers and connections to DB with broker and rename this servicegroup -218. [x] **EBNSVC1**: New services with several pollers -219. [x] **EBPS2**: 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. -220. [x] **EBSAU2**: New services with action_url with more than 2000 characters -221. [x] **EBSN3**: New services with notes with more than 500 characters -222. [x] **EBSNU1**: New services with notes_url with more than 2000 characters -223. [x] **ENRSCHE1**: Verify that next check of a rescheduled host is made at last_check + interval_check -224. [x] **FILTER_ON_LUA_EVENT**: stream connector with a bad configured filter generate a log error message -225. [x] **GRPC_CLOUD_FAILURE**: simulate a broker failure in cloud environment, we provide a muted grpc server and there must remain only one grpc connection. Then we start broker and connection must be ok -226. [x] **GRPC_RECONNECT**: We restart broker and engine must reconnect to it and send data -227. [x] **LCDNU**: the lua cache updates correctly service cache. -228. [x] **LCDNUH**: the lua cache updates correctly host cache -229. [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink -230. [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink -231. [x] **LOGV2DF1**: log-v2 disabled old log enabled check logfile sink -232. [x] **LOGV2DF2**: log-v2 disabled old log disabled check logfile sink -233. [x] **LOGV2EB1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled. -234. [x] **LOGV2EB2**: log-v2 enabled old log enabled check broker sink -235. [x] **LOGV2EBU1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled with bbdo3. -236. [x] **LOGV2EBU2**: Check Broker sink with log-v2 enabled and legacy log enabled with BBDO3. -237. [x] **LOGV2EF1**: log-v2 enabled old log disabled check logfile sink -238. [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink -239. [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink -240. [x] **RLCode**: Test if reloading LUA code in a stream connector applies the changes -241. [x] **RRD1**: RRD metric rebuild asked with gRPC API. Three non existing indexes IDs are selected then an error message is sent. This is done with unified_sql output. -242. [x] **SDER**: The check attempts and the max check attempts of (host_1,service_1) are changed to 280 thanks to the retention.dat file. Then engine and broker are started and broker should write these values in the services and resources tables. We only test the services table because we need a resources table that allows bigger numbers for these two attributes. But we see that broker doesn't crash anymore. -243. [x] **SEVERAL_FILTERS_ON_LUA_EVENT**: Two stream connectors with different filters are configured. -244. [x] **STORAGE_ON_LUA**: The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. -245. [x] **STUPID_FILTER**: Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. -246. [x] **Service_increased_huge_check_interval**: New services with high check interval at creation time. -247. [x] **Services_and_bulks_${id}**: One service is configured with one metric with a name of 150 to 1021 characters. -248. [x] **Start_Stop_Broker_Engine_${id}**: Start-Stop Broker/Engine - Broker started first - Engine stopped first -249. [x] **Start_Stop_Engine_Broker_${id}**: Start-Stop Broker/Engine - Broker started first - Broker stopped first -250. [x] **UNIFIED_SQL_FILTER**: With bbdo version 3.0.1, we watch events written or rejected in unified_sql -251. [x] **VICT_ONE_CHECK_METRIC**: victoria metrics metric output -252. [x] **VICT_ONE_CHECK_METRIC_AFTER_FAILURE**: victoria metrics metric output after victoria shutdown -253. [x] **VICT_ONE_CHECK_STATUS**: victoria metrics status output -254. [x] **Whitelist_Directory_Rights**: log if /etc/centreon-engine-whitelist has not mandatory rights or owner -255. [x] **Whitelist_Empty_Directory**: log if /etc/centreon-engine-whitelist is empty -256. [x] **Whitelist_Host**: test allowed and forbidden commands for hosts -257. [x] **Whitelist_No_Whitelist_Directory**: log if /etc/centreon-engine-whitelist doesn't exist -258. [x] **Whitelist_Perl_Connector**: test allowed and forbidden commands for services -259. [x] **Whitelist_Service**: test allowed and forbidden commands for services -260. [x] **Whitelist_Service_EH**: test allowed and forbidden event handler for services -261. [x] **metric_mapping**: Check if metric name exists using a stream connector -262. [x] **not1**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK HARD state. -263. [x] **not10**: This test case involves scheduling downtime on a down host that already had a critical notification. When The Host return to UP state we should receive a recovery notification. -264. [x] **not11**: This test case involves configuring one service and checking that three alerts are sent for it. -265. [x] **not12**: Escalations -266. [x] **not13**: notification for a dependensies host -267. [x] **not14**: notification for a Service dependency -268. [x] **not15**: several notification commands for the same user. -269. [x] **not16**: notification for a dependensies services group -270. [x] **not17**: notification for a dependensies host group -271. [x] **not18**: notification delay where first notification delay equal retry check -272. [x] **not19**: notification delay where first notification delay greater than retry check -273. [x] **not1_WL_KO**: This test case configures a single service. When it is in non-OK HARD state a notification should be sent but it is not allowed by the whitelist -274. [x] **not1_WL_OK**: This test case configures a single service. When it is in non-OK HARD state a notification is sent because it is allowed by the whitelist -275. [x] **not2**: This test case configures a single service and verifies that a recovery notification is sent -276. [x] **not20**: notification delay where first notification delay samller than retry check -277. [x] **not3**: This test case configures a single service and verifies the notification system's behavior during and after downtime -278. [x] **not4**: This test case configures a single service and verifies the notification system's behavior during and after acknowledgement -279. [x] **not5**: This test case configures two services with two different users being notified when the services transition to a critical state. -280. [x] **not6**: This test case validate the behavior when the notification time period is set to null. -281. [x] **not7**: This test case simulates a host alert scenario. -282. [x] **not8**: This test validates the critical host notification. -283. [x] **not9**: This test case configures a single host and verifies that a recovery notification is sent after the host recovers from a non-OK state. -284. [x] **not_in_timeperiod_with_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is sent outside timeperiod when _send_recovery_notifications_anyways is set -285. [x] **not_in_timeperiod_without_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is not sent outside timeperiod when _send_recovery_notifications_anyways is not set +42. [x] **BEDTRRD1**: A service is forced checked then a downtime is set on this service. The service is forced checked again and the downtime is removed. This test is done with BBDO 3.0.0. Then we should not get any error in cbd RRD of kind 'ignored update error in file...'. +43. [x] **BEDTSVCFIXED**: A downtime is set on a service, the total number of downtimes is really 1 then we delete this downtime and the number of downtime is 0. +44. [x] **BEDTSVCREN1**: A downtime is set on a service then the service is renamed. The downtime is still active on the renamed service. The downtime is removed from the renamed service and it is well removed. +45. [x] **BEEXTCMD1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 +46. [x] **BEEXTCMD10**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo2.0 +47. [x] **BEEXTCMD11**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo3.0 +48. [x] **BEEXTCMD12**: external command CHANGE_MAX_HOST_CHECK_ATTEMPTS on bbdo2.0 +49. [x] **BEEXTCMD13**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo3.0 +50. [x] **BEEXTCMD14**: external command CHANGE_HOST_CHECK_TIMEPERIOD on bbdo2.0 +51. [x] **BEEXTCMD15**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo3.0 +52. [x] **BEEXTCMD16**: external command CHANGE_HOST_NOTIFICATION_TIMEPERIOD on bbdo2.0 +53. [x] **BEEXTCMD17**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo3.0 +54. [x] **BEEXTCMD18**: external command CHANGE_SVC_CHECK_TIMEPERIOD on bbdo2.0 +55. [x] **BEEXTCMD19**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo3.0 +56. [x] **BEEXTCMD2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 +57. [x] **BEEXTCMD20**: external command CHANGE_SVC_NOTIFICATION_TIMEPERIOD on bbdo2.0 +58. [x] **BEEXTCMD21**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo3.0 +59. [x] **BEEXTCMD22**: external command DISABLE_HOST_AND_CHILD_NOTIFICATIONS and ENABLE_HOST_AND_CHILD_NOTIFICATIONS on bbdo2.0 +60. [x] **BEEXTCMD23**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo3.0 +61. [x] **BEEXTCMD24**: external command DISABLE_HOST_CHECK and ENABLE_HOST_CHECK on bbdo2.0 +62. [x] **BEEXTCMD25**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo3.0 +63. [x] **BEEXTCMD26**: external command DISABLE_HOST_EVENT_HANDLER and ENABLE_HOST_EVENT_HANDLER on bbdo2.0 +64. [x] **BEEXTCMD27**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo3.0 +65. [x] **BEEXTCMD28**: external command DISABLE_HOST_FLAP_DETECTION and ENABLE_HOST_FLAP_DETECTION on bbdo2.0 +66. [x] **BEEXTCMD29**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo3.0 +67. [x] **BEEXTCMD3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 +68. [x] **BEEXTCMD30**: external command DISABLE_HOST_NOTIFICATIONS and ENABLE_HOST_NOTIFICATIONS on bbdo2.0 +69. [x] **BEEXTCMD31**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo3.0 +70. [x] **BEEXTCMD32**: external command DISABLE_HOST_SVC_CHECKS and ENABLE_HOST_SVC_CHECKS on bbdo2.0 +71. [x] **BEEXTCMD33**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo3.0 +72. [x] **BEEXTCMD34**: external command DISABLE_HOST_SVC_NOTIFICATIONS and ENABLE_HOST_SVC_NOTIFICATIONS on bbdo2.0 +73. [x] **BEEXTCMD35**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo3.0 +74. [x] **BEEXTCMD36**: external command DISABLE_PASSIVE_HOST_CHECKS and ENABLE_PASSIVE_HOST_CHECKS on bbdo2.0 +75. [x] **BEEXTCMD37**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo3.0 +76. [x] **BEEXTCMD38**: external command DISABLE_PASSIVE_SVC_CHECKS and ENABLE_PASSIVE_SVC_CHECKS on bbdo2.0 +77. [x] **BEEXTCMD39**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo3.0 +78. [x] **BEEXTCMD4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 +79. [x] **BEEXTCMD40**: external command START_OBSESSING_OVER_HOST and STOP_OBSESSING_OVER_HOST on bbdo2.0 +80. [x] **BEEXTCMD41**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo3.0 +81. [x] **BEEXTCMD42**: external command START_OBSESSING_OVER_SVC and STOP_OBSESSING_OVER_SVC on bbdo2.0 +82. [x] **BEEXTCMD5**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo3.0 +83. [x] **BEEXTCMD6**: external command CHANGE_RETRY_SVC_CHECK_INTERVAL on bbdo2.0 +84. [x] **BEEXTCMD7**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo3.0 +85. [x] **BEEXTCMD8**: external command CHANGE_RETRY_HOST_CHECK_INTERVAL on bbdo2.0 +86. [x] **BEEXTCMD9**: external command CHANGE_MAX_SVC_CHECK_ATTEMPTS on bbdo3.0 +87. [x] **BEEXTCMD_COMPRESS_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and compressed grpc +88. [x] **BEEXTCMD_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and grpc +89. [x] **BEEXTCMD_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc +90. [x] **BEEXTCMD_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc +91. [x] **BEEXTCMD_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc +92. [x] **BEEXTCMD_REVERSE_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and reversed gRPC +93. [x] **BEEXTCMD_REVERSE_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc reversed +94. [x] **BEEXTCMD_REVERSE_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc reversed +95. [x] **BEEXTCMD_REVERSE_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc reversed +96. [x] **BEHOSTCHECK**: external command CHECK_HOST_RESULT +97. [x] **BEHS1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) +98. [x] **BEINSTANCE**: Instance to bdd +99. [x] **BEINSTANCESTATUS**: Instance status to bdd +100. [x] **BENCH_${nb_checks}STATUS**: external command CHECK_SERVICE_RESULT 1000 times +101. [x] **BENCH_${nb_checks}STATUS_TRACES**: external command CHECK_SERVICE_RESULT ${nb_checks} times +102. [x] **BENCH_${nb_checks}_REVERSE_SERVICE_STATUS_TRACES_WITHOUT_SQL**: Broker is configured without SQL output. The connection between Engine and Broker is reversed. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. Logs are in trace level. +103. [x] **BENCH_${nb_checks}_REVERSE_SERVICE_STATUS_WITHOUT_SQL**: Broker is configured without SQL output. The connection between Engine and Broker is reversed. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. +104. [x] **BENCH_${nb_checks}_SERVICE_STATUS_TRACES_WITHOUT_SQL**: Broker is configured without SQL output. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. Logs are in trace level. +105. [x] **BENCH_${nb_checks}_SERVICE_STATUS_WITHOUT_SQL**: Broker is configured without SQL output. External command CHECK_SERVICE_RESULT is sent ${nb_checks} times. +106. [x] **BENCH_1000STATUS_100${suffixe}**: external command CHECK_SERVICE_RESULT 100 times with 100 pollers with 20 services +107. [x] **BEOTEL_CENTREON_AGENT_CHECK_HOST**: agent check host and we expect to get it in check result +108. [x] **BEOTEL_CENTREON_AGENT_CHECK_HOST_CRYPTED**: agent check host with encrypted connection and we expect to get it in check result +109. [x] **BEOTEL_CENTREON_AGENT_CHECK_SERVICE**: agent check service and we expect to get it in check result +110. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST**: agent check host with reversed connection and we expect to get it in check result +111. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST_CRYPTED**: agent check host with encrypted reversed connection and we expect to get it in check result +112. [x] **BEOTEL_REVERSE_CENTREON_AGENT_CHECK_SERVICE**: agent check service with reversed connection and we expect to get it in check result +113. [x] **BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED**: we configure engine with a telegraf conf server and we check telegraf conf file +114. [x] **BEOTEL_SERVE_TELEGRAF_CONFIGURATION_NO_CRYPTED**: we configure engine with a telegraf conf server and we check telegraf conf file +115. [x] **BEOTEL_TELEGRAF_CHECK_HOST**: we send nagios telegraf formated datas and we expect to get it in check result +116. [x] **BEOTEL_TELEGRAF_CHECK_SERVICE**: we send nagios telegraf formated datas and we expect to get it in check result +117. [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. +118. [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage +119. [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. +120. [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. +121. [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. +122. [x] **BEPBCVS**: bbdo_version 3 communication of custom variables. +123. [x] **BEPBHostParent**: bbdo_version 3 communication of host parent relations +124. [x] **BEPBINST_CONF**: bbdo_version 3 communication of instance configuration. +125. [x] **BEPBRI1**: bbdo_version 3 use pb_resource new bbdo protobuf ResponsiveInstance message. +126. [x] **BEPB_HOST_DEPENDENCY**: BBDO 3 communication of host dependencies. +127. [x] **BEPB_SERVICE_DEPENDENCY**: bbdo_version 3 communication of host dependencies. +128. [x] **BERD1**: Starting/stopping Broker does not create duplicated events. +129. [x] **BERD2**: Starting/stopping Engine does not create duplicated events. +130. [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases +131. [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases +132. [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +133. [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +134. [x] **BERDUCA300**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker. +135. [x] **BERDUCA301**: Starting/stopping Engine is stopped ; it should emit a stop event and receive an ack event with events to clean from broker with bbdo 3.0.1. +136. [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql +137. [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql +138. [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) +139. [x] **BESERVCHECK**: external command CHECK_SERVICE_RESULT +140. [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first +141. [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first +142. [x] **BESS2U**: Start-Stop Broker/Engine - Broker started first - Engine stopped first. Unified_sql is used. +143. [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first +144. [x] **BESS4**: Start-Stop Broker/Engine - Engine started first - Broker stopped first +145. [x] **BESS5**: Start-Stop Broker/engine - Engine debug level is set to all, it should not hang +146. [x] **BESSBQ1**: A very bad queue file is written for broker. Broker and Engine are then started, Broker must read the file raising an error because of that file and then get data sent by Engine. +147. [x] **BESS_CRYPTED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +148. [x] **BESS_CRYPTED_GRPC2**: Start-Stop grpc version Broker/Engine only server crypted +149. [x] **BESS_CRYPTED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +150. [x] **BESS_CRYPTED_REVERSED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +151. [x] **BESS_CRYPTED_REVERSED_GRPC2**: Start-Stop grpc version Broker/Engine only engine server crypted +152. [x] **BESS_CRYPTED_REVERSED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +153. [x] **BESS_ENGINE_DELETE_HOST**: once engine and cbd started, stop and restart cbd, delete an host and reload engine, cbd mustn't core +154. [x] **BESS_GRPC1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first +155. [x] **BESS_GRPC2**: Start-Stop grpc version Broker/Engine - Broker started first - Engine stopped first +156. [x] **BESS_GRPC3**: Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first +157. [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first +158. [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang +159. [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped last compression activated +160. [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. +161. [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. +162. [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. +163. [x] **BEUTAG10**: some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. +164. [x] **BEUTAG11**: some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. +165. [x] **BEUTAG12**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. +166. [x] **BEUTAG2**: Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. +167. [x] **BEUTAG3**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. +168. [x] **BEUTAG4**: Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. +169. [x] **BEUTAG5**: Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. +170. [x] **BEUTAG6**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. +171. [x] **BEUTAG7**: Some services are configured with tags on two pollers. Then tags configuration is modified. +172. [x] **BEUTAG8**: Services have tags provided by templates. +173. [x] **BEUTAG9**: hosts have tags provided by templates. +174. [x] **BEUTAG_REMOVE_HOST_FROM_HOSTGROUP**: remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error +175. [x] **BE_BACKSLASH_CHECK_RESULT**: external command PROCESS_SERVICE_CHECK_RESULT with \: +176. [x] **BE_DEFAULT_NOTIFCATION_INTERVAL_IS_ZERO_SERVICE_RESOURCE**: default notification_interval must be set to NULL in services, hosts and resources tables. +177. [x] **BE_NOTIF_OVERFLOW**: bbdo 2.0 notification number =40000. make an overflow => notification_number null in db +178. [x] **BE_TIME_NULL_SERVICE_RESOURCE**: With BBDO 3, notification_interval time must be set to NULL on 0 in services, hosts and resources tables. +179. [x] **BRCS1**: Broker reverse connection stopped +180. [x] **BRCTS1**: Broker reverse connection too slow +181. [x] **BRCTSMN**: Broker connected to map with neb filter +182. [x] **BRCTSMNS**: Broker connected to map with neb and storage filters +183. [x] **BRGC1**: Broker good reverse connection +184. [x] **BRRDCDDID1**: RRD metrics deletion from index ids with rrdcached. +185. [x] **BRRDCDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage with rrdcached. +186. [x] **BRRDCDDIDU1**: RRD metrics deletion from index ids with unified sql output with rrdcached. +187. [x] **BRRDCDDM1**: RRD metrics deletion from metric ids with rrdcached. +188. [x] **BRRDCDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage and rrdcached. +189. [x] **BRRDCDDMID1**: RRD deletion of non existing metrics and indexes with rrdcached +190. [x] **BRRDCDDMIDU1**: RRD deletion of non existing metrics and indexes with rrdcached +191. [x] **BRRDCDDMU1**: RRD metric deletion on table metric with unified sql output with rrdcached +192. [x] **BRRDCDRB1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output and rrdcached. +193. [x] **BRRDCDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached +194. [x] **BRRDCDRBU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output and rrdcached. +195. [x] **BRRDCDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql with rrdcached +196. [x] **BRRDDID1**: RRD metrics deletion from index ids. +197. [x] **BRRDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage. +198. [x] **BRRDDIDU1**: RRD metrics deletion from index ids with unified sql output. +199. [x] **BRRDDM1**: RRD metrics deletion from metric ids. +200. [x] **BRRDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage. +201. [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes +202. [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes +203. [x] **BRRDDMU1**: RRD metric deletion on table metric with unified sql output +204. [x] **BRRDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql +205. [x] **BRRDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql +206. [x] **BRRDRM1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with storage/sql sql output. +207. [x] **BRRDRMU1**: RRD metric rebuild with gRPC API. 3 indexes are selected then a message to rebuild them is sent. This is done with unified_sql output. +208. [x] **BRRDSTATUS**: We are working with BBDO3. This test checks status are correctly handled independently from their value. +209. [x] **BRRDSTATUSRETENTION**: We are working with BBDO3. This test checks status are not sent twice after Engine reload. +210. [x] **BRRDUPLICATE**: RRD metric rebuild with a query in centreon_storage and unified sql with duplicate rows in database +211. [x] **BRRDWM1**: We are working with BBDO3. This test checks protobuf metrics and status are sent to cbd RRD. +212. [x] **CBD_RELOAD_AND_FILTERS**: We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. +213. [x] **CBD_RELOAD_AND_FILTERS_WITH_OPR**: We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. +214. [x] **DTIM**: New services with several pollers are created. Then downtimes are set on all configured hosts. This action results on 5250 downtimes if we also count impacted services. Then all these downtimes are removed. This test is done with BBDO 3.0.1 +215. [x] **EBBM1**: A service status contains metrics that do not fit in a float number. +216. [x] **EBBPS1**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table resources. +217. [x] **EBBPS2**: 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. +218. [x] **EBDP1**: Four new pollers are started and then we remove Poller3. +219. [x] **EBDP2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +220. [x] **EBDP3**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. +221. [x] **EBDP4**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by Broker. +222. [x] **EBDP5**: Four new pollers are started and then we remove Poller3. +223. [x] **EBDP6**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +224. [x] **EBDP7**: Three new pollers are started, then they are killed. It is still possible to remove Poller2 if removed from the configuration. +225. [x] **EBDP8**: Four new pollers are started and then we remove Poller3 with its hosts and services. All service status/host status are then refused by broker. +226. [x] **EBDP_GRPC2**: Three new pollers are started, then they are killed. After a simple restart of broker, it is still possible to remove Poller2 if removed from the configuration. +227. [x] **EBMSSM**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. +228. [x] **EBMSSMDBD**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. While metrics are written in the database, we stop the database and then restart it. Broker must recover its connection to the database and continue to write metrics. +229. [x] **EBMSSMPART**: 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. The data_bin table is configured with two partitions p1 and p2 such that p1 contains old data and p2 contains current data. While metrics are written in the database, we remove the p2 partition. Once the p2 partition is recreated, broker must recover its connection to the database and continue to write metrics. To check that last point, we force a last service check and we check that its metrics are written in the database. +230. [x] **EBNHG1**: New host group with several pollers and connections to DB +231. [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup +232. [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql +233. [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql +234. [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql +235. [x] **EBNHGU4_${test_label}**: New host group with several pollers and connections to DB with broker and rename this hostgroup +236. [x] **EBNSG1**: New service group with several pollers and connections to DB +237. [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql +238. [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql +239. [x] **EBNSGU3_${test_label}**: New service group with several pollers and connections to DB with broker and rename this servicegroup +240. [x] **EBNSVC1**: New services with several pollers +241. [x] **EBPS2**: 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. +242. [x] **EBSAU2**: New services with action_url with more than 2000 characters +243. [x] **EBSN3**: New services with notes with more than 500 characters +244. [x] **EBSNU1**: New services with notes_url with more than 2000 characters +245. [x] **ENRSCHE1**: Verify that next check of a rescheduled host is made at last_check + interval_check +246. [x] **FILTER_ON_LUA_EVENT**: stream connector with a bad configured filter generate a log error message +247. [x] **GRPC_CLOUD_FAILURE**: simulate a broker failure in cloud environment, we provide a muted grpc server and there must remain only one grpc connection. Then we start broker and connection must be ok +248. [x] **GRPC_RECONNECT**: We restart broker and engine must reconnect to it and send data +249. [x] **LCDNU**: the lua cache updates correctly service cache. +250. [x] **LCDNUH**: the lua cache updates correctly host cache +251. [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink +252. [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink +253. [x] **LOGV2DF1**: log-v2 disabled old log enabled check logfile sink +254. [x] **LOGV2DF2**: log-v2 disabled old log disabled check logfile sink +255. [x] **LOGV2EB1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled. +256. [x] **LOGV2EB2**: log-v2 enabled old log enabled check broker sink +257. [x] **LOGV2EBU1**: Checking broker sink when log-v2 is enabled and legacy logs are disabled with bbdo3. +258. [x] **LOGV2EBU2**: Check Broker sink with log-v2 enabled and legacy log enabled with BBDO3. +259. [x] **LOGV2EF1**: log-v2 enabled old log disabled check logfile sink +260. [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink +261. [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink +262. [x] **NO_FILTER_NO_ERROR**: no filter configured => no filter error. +263. [x] **RLCode**: Test if reloading LUA code in a stream connector applies the changes +264. [x] **RRD1**: RRD metric rebuild asked with gRPC API. Three non existing indexes IDs are selected then an error message is sent. This is done with unified_sql output. +265. [x] **SDER**: The check attempts and the max check attempts of (host_1,service_1) are changed to 280 thanks to the retention.dat file. Then Engine and Broker are started and Broker should write these values in the services and resources tables. We only test the services table because we need a resources table that allows bigger numbers for these two attributes. But we see that Broker doesn't crash anymore. +266. [x] **SEVERAL_FILTERS_ON_LUA_EVENT**: Two stream connectors with different filters are configured. +267. [x] **STORAGE_ON_LUA**: The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. +268. [x] **STUPID_FILTER**: Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. +269. [x] **Service_increased_huge_check_interval**: New services with high check interval at creation time. +270. [x] **Services_and_bulks_${id}**: One service is configured with one metric with a name of 150 to 1021 characters. +271. [x] **Start_Stop_Broker_Engine_${id}**: Start-Stop Broker/Engine - Broker started first - Engine stopped first +272. [x] **Start_Stop_Engine_Broker_${id}**: Start-Stop Broker/Engine - Broker started first - Broker stopped first +273. [x] **UNIFIED_SQL_FILTER**: With bbdo version 3.0.1, we watch events written or rejected in unified_sql +274. [x] **VICT_ONE_CHECK_METRIC**: victoria metrics metric output +275. [x] **VICT_ONE_CHECK_METRIC_AFTER_FAILURE**: victoria metrics metric output after victoria shutdown +276. [x] **VICT_ONE_CHECK_STATUS**: victoria metrics status output +277. [x] **Whitelist_Directory_Rights**: log if /etc/centreon-engine-whitelist has not mandatory rights or owner +278. [x] **Whitelist_Empty_Directory**: log if /etc/centreon-engine-whitelist is empty +279. [x] **Whitelist_Host**: test allowed and forbidden commands for hosts +280. [x] **Whitelist_No_Whitelist_Directory**: log if /etc/centreon-engine-whitelist doesn't exist +281. [x] **Whitelist_Perl_Connector**: test allowed and forbidden commands for services +282. [x] **Whitelist_Service**: test allowed and forbidden commands for services +283. [x] **Whitelist_Service_EH**: test allowed and forbidden event handler for services +284. [x] **metric_mapping**: Check if metric name exists using a stream connector +285. [x] **not1**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK HARD state. +286. [x] **not10**: This test case involves scheduling downtime on a down host that already had a critical notification. When The Host returns to UP state we should receive a recovery notification. +287. [x] **not11**: This test case involves configuring one service and checking that three alerts are sent for it. +288. [x] **not12**: Escalations +289. [x] **not13**: notification for a dependensies host +290. [x] **not14**: notification for a Service dependency +291. [x] **not15**: several notification commands for the same user. +292. [x] **not16**: notification for dependencies services group +293. [x] **not17**: notification for a dependensies host group +294. [x] **not18**: notification delay where first notification delay equal retry check +295. [x] **not19**: notification delay where first notification delay greater than retry check +296. [x] **not1_WL_KO**: This test case configures a single service. When it is in non-OK HARD state a notification should be sent but it is not allowed by the whitelist +297. [x] **not1_WL_OK**: This test case configures a single service. When it is in non-OK HARD state a notification is sent because it is allowed by the whitelist +298. [x] **not2**: This test case configures a single service and verifies that a recovery notification is sent +299. [x] **not20**: notification delay where first notification delay samller than retry check +300. [x] **not3**: This test case configures a single service and verifies the notification system's behavior during and after downtime +301. [x] **not4**: This test case configures a single service and verifies the notification system's behavior during and after acknowledgement +302. [x] **not5**: This test case configures two services with two different users being notified when the services transition to a critical state. +303. [x] **not6**: This test case validate the behavior when the notification time period is set to null. +304. [x] **not7**: This test case simulates a host alert scenario. +305. [x] **not8**: This test validates the critical host notification. +306. [x] **not9**: This test case configures a single host and verifies that a recovery notification is sent after the host recovers from a non-OK state. +307. [x] **not_in_timeperiod_with_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is sent outside timeperiod when _send_recovery_notifications_anyways is set +308. [x] **not_in_timeperiod_without_send_recovery_notifications_anyways**: This test case configures a single service and verifies that a notification is sent when the service is in a non-OK state and OK is not sent outside timeperiod when _send_recovery_notifications_anyways is not set ### Ccc 1. [x] **BECCC1**: ccc without port fails with an error message @@ -524,12 +559,13 @@ Here is the list of the currently implemented tests: 6. [x] **EMACROS_NOTIF**: macros ADMINEMAIL and ADMINPAGER are replaced in notification commands 7. [x] **EMACROS_SEMICOLON**: Macros with a semicolon are used even if they contain a semicolon. 8. [x] **EPC1**: Check with perl connector -9. [x] **ESS1**: Start-Stop (0s between start/stop) 5 times one instance of engine and no coredump -10. [x] **ESS2**: Start-Stop (300ms between start/stop) 5 times one instance of engine and no coredump -11. [x] **ESS3**: Start-Stop (0s between start/stop) 5 times three instances of engine and no coredump -12. [x] **ESS4**: Start-Stop (300ms between start/stop) 5 times three instances of engine and no coredump -13. [x] **EXT_CONF1**: Engine configuration is overided by json conf -14. [x] **EXT_CONF2**: Engine configuration is overided by json conf after reload +9. [x] **ERL**: Engine is started and writes logs in centengine.log. Then we remove the log file. The file disappears but Engine is still writing into it. Engine is reloaded and the centengine.log should appear again. +10. [x] **ESS1**: Start-Stop (0s between start/stop) 5 times one instance of engine and no coredump +11. [x] **ESS2**: Start-Stop (300ms between start/stop) 5 times one instance of engine and no coredump +12. [x] **ESS3**: Start-Stop (0s between start/stop) 5 times three instances of engine and no coredump +13. [x] **ESS4**: Start-Stop (300ms between start/stop) 5 times three instances of engine and no coredump +14. [x] **EXT_CONF1**: Engine configuration is overided by json conf +15. [x] **EXT_CONF2**: Engine configuration is overided by json conf after reload ### Migration 1. [x] **MIGRATION**: Migration bbdo2 with sql/storage to bbdo2 with unified_sql and then to bbdo3 with unified_sql and then to bbdo2 with unified_sql and then to bbdo2 with sql/storage diff --git a/tests/bam/inherited_downtime.robot b/tests/bam/inherited_downtime.robot index 3791f248092..13b6713fd99 100644 --- a/tests/bam/inherited_downtime.robot +++ b/tests/bam/inherited_downtime.robot @@ -11,7 +11,10 @@ Test Teardown Ctn Save Logs If Failed *** Test Cases *** BEBAMIDT1 - [Documentation] A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. The downtime is removed from the service, the inherited downtime is then deleted. + [Documentation] A BA of type 'worst' with one service is configured. The BA is in critical + ... state, because of its service. Then we set a downtime on this last one. An inherited + ... downtime is set to the BA. The downtime is removed from the service, the inherited + ... downtime is then deleted. [Tags] broker downtime engine bam Ctn Clear Commands Status Ctn Config Broker module @@ -30,15 +33,10 @@ BEBAMIDT1 ${cmd_1} Ctn Get Service Command Id 314 Log To Console service_314 has command id ${cmd_1} Ctn Set Command Status ${cmd_1} 2 - Ctn Start Broker ${start} Get Current Date - Ctn Start engine - # Let's wait for Engine to be ready - ${content} Create List check_for_external_commands() - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True - ... ${result} - ... A message about checking for external commands should have raised. + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} # KPI set to critical Ctn Process Service Result Hard host_16 service_314 2 output critical for service_314 @@ -67,8 +65,8 @@ BEBAMIDT1 ${result} Ctn Check Service Downtime With Timeout _Module_BAM_1 ba_1 0 60 Should Be True ${result} The BA ba_1 is in downtime as it should not - Ctn Stop engine - Ctn Kindly Stop Broker + Ctn Stop Engine + Ctn Kindly Stop Broker only_central=False bbdo2=True BEBAMIDT2 [Documentation] A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. Engine is restarted. Broker is restarted. The two downtimes are still there with no duplicates. The downtime is removed from the service, the inherited downtime is then deleted. @@ -92,7 +90,7 @@ BEBAMIDT2 Ctn Set Command Status ${cmd_1} 2 Ctn Start Broker ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for Engine to be ready ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -122,9 +120,9 @@ BEBAMIDT2 FOR ${i} IN RANGE 2 # Engine is restarted - Ctn Stop engine + Ctn Stop Engine ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for Engine to be ready ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -155,7 +153,7 @@ BEBAMIDT2 Should Be True ${result} The BA ba_1 is in downtime as it should not Log To Console Broker is stopped (end of BEBAMIDT2) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker BEBAMIGNDT1 @@ -195,7 +193,7 @@ BEBAMIGNDT1 Ctn Start Broker ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for the initial service states. ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -265,7 +263,7 @@ BEBAMIGNDT1 Should Be True ${result} The critical service is no more in downtime, the BA should be critical. Log To Console The BA is now critical (no more downtime) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker BEBAMIGNDT2 @@ -294,7 +292,7 @@ BEBAMIGNDT2 Ctn Set Command Status ${cmd_2} 2 Ctn Start Broker ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for the initial service states. ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -356,7 +354,7 @@ BEBAMIGNDT2 Should Be True ${result} The critical service is no more in downtime, the BA should be critical. Log To Console The BA is now critical (no more downtime) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker diff --git a/tests/bam/pb_inherited_downtime.robot b/tests/bam/pb_inherited_downtime.robot index ab150fa9277..a96171ca3dc 100644 --- a/tests/bam/pb_inherited_downtime.robot +++ b/tests/bam/pb_inherited_downtime.robot @@ -18,6 +18,11 @@ BEBAMIDTU1 Ctn Config Broker central Ctn Config Broker rrd Ctn Broker Config Log central bam trace + Log To Console Configuring core logger to info + Ctn Broker Config Log central core info + Ctn Broker Config Log rrd core info + Ctn Broker Config Log module0 core info + Log To Console core logger configured Ctn Config BBDO3 ${1} Ctn Config Engine ${1} @@ -31,15 +36,10 @@ BEBAMIDTU1 ${cmd_1} Ctn Get Service Command Id 314 Log To Console service_314 has command id ${cmd_1} Ctn Set Command Status ${cmd_1} 2 - Ctn Start Broker ${start} Get Current Date - Ctn Start engine - # Let's wait for the initial service states. - ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True - ... ${result} - ... An Initial service state on service (50, 1000) should be raised before we can start external commands. + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} 1 # KPI set to critical Ctn Process Service Result Hard host_16 service_314 2 output critical for 314 @@ -67,7 +67,7 @@ BEBAMIDTU1 ${result} Ctn Check Service Downtime With Timeout _Module_BAM_1 ba_1 0 60 Should Be True ${result} The BA ba_1 is in downtime as it should not - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker BEBAMIDTU2 @@ -94,7 +94,7 @@ BEBAMIDTU2 Ctn Set Command Status ${cmd_1} 2 Ctn Start Broker ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for the initial service states. ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -120,9 +120,9 @@ BEBAMIDTU2 FOR ${i} IN RANGE 2 # Engine is restarted - Ctn Stop engine + Ctn Stop Engine ${start} Get Current Date - Ctn Start engine + Ctn Start Engine # Let's wait for the initial service states. ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -153,20 +153,28 @@ BEBAMIDTU2 Should Be True ${result} We should have no more downtime Log To Console Broker is stopped (end of BEBAMIDT2) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker BEBAMIGNDTU1 - [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. + [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with two services is configured. + ... The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in + ... critical state, because of the second critical service. Then we apply two downtimes on this + ... last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, + ... the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. [Tags] broker downtime engine bam Ctn Clear Commands Status + Ctn Clear Retention Ctn Config Broker module Ctn Config Broker central + Ctn Config Broker rrd Ctn Broker Config Log central bam trace Ctn Broker Config Log central sql trace Ctn Broker Config Flush Log module0 0 Ctn Broker Config Log module0 neb trace - Ctn Config Broker rrd + Ctn Broker Config Log central core info + Ctn Broker Config Log rrd core info + Ctn Broker Config Log module0 core info Ctn Config Broker Sql Output central unified_sql Ctn Config BBDO3 1 Ctn Config Engine ${1} @@ -192,15 +200,10 @@ BEBAMIGNDTU1 Log To Console service_314 has command id ${cmd_2} Ctn Set Command Status ${cmd_2} 2 - Ctn Start Broker ${start} Get Current Date - Ctn Start engine - # Let's wait for the initial service states. - ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True - ... ${result} - ... An Initial service state on service (50, 1000) should be raised before we can start external commands. + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} 1 # KPI set to ok Ctn Process Service Result Hard host_16 service_313 0 output critical for 313 @@ -264,7 +267,7 @@ BEBAMIGNDTU1 Should Be True ${result} The critical service is no more in downtime, the BA should be critical. Log To Console The BA is now critical (no more downtime) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker BEBAMIGNDTU2 @@ -293,15 +296,10 @@ BEBAMIGNDTU2 ${cmd_2} Ctn Get Service Command Id 314 Log To Console service_314 has command id ${cmd_2} Ctn Set Command Status ${cmd_2} 2 - Ctn Start Broker ${start} Get Current Date - Ctn Start engine - # Let's wait for the initial service states. - ${content} Create List INITIAL SERVICE STATE: host_50;service_1000; - ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 - Should Be True - ... ${result} - ... An Initial service state on service (50, 1000) should be raised before we can start external commands. + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} 1 # KPI set to ok Ctn Process Service Result Hard host_16 service_313 0 output critical for 313 @@ -354,7 +352,7 @@ BEBAMIGNDTU2 Should Be True ${result} The critical service is no more in downtime, the BA should be critical. Log To Console The BA is now critical (no more downtime) - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker diff --git a/tests/broker-engine/acknowledgement.robot b/tests/broker-engine/acknowledgement.robot index f3fa3bd6905..20e9732a616 100644 --- a/tests/broker-engine/acknowledgement.robot +++ b/tests/broker-engine/acknowledgement.robot @@ -164,7 +164,7 @@ BEACK4 ... acknowledge it. The centreon_storage.acknowledgements table is then updated with this ... acknowledgement. The acknowledgement is removed and the comment in the comments table has its ... deletion_time column updated. - [Tags] broker engine services extcmd + [Tags] broker engine services extcmd MON-150015 Ctn Config Engine ${1} ${50} ${20} Ctn Config Broker rrd Ctn Config Broker central @@ -262,17 +262,17 @@ BEACK5 BEACK6 [Documentation] Configuration is made with BBDO3. Engine has a critical service. An external command is sent to - ... acknowledge it ; the acknowledgement is sticky. The centreon_storage.acknowledgements table is - ... then updated with this acknowledgement. The service is newly set to WARNING. - ... And the acknowledgement in database is still there. - [Tags] broker engine services extcmd + ... acknowledge it ; the acknowledgement is sticky. The centreon_storage.acknowledgements table is + ... then updated with this acknowledgement. The service is newly set to WARNING. + ... And the acknowledgement in database is still there. + [Tags] broker engine services extcmd MON-150015 Ctn Config Engine ${1} ${50} ${20} Ctn Config Broker rrd Ctn Config Broker central Ctn Config Broker module ${1} Ctn Config BBDO3 ${1} Ctn Broker Config Log module0 neb debug - Ctn Broker Config Log central sql debug + Ctn Broker Config Log central sql trace ${start} Get Current Date Ctn Start Broker @@ -303,7 +303,7 @@ BEACK6 # Acknowledgement is not deleted. ${result} Ctn Check Acknowledgement Is Deleted With Timeout ${ack_id} 10 - Should Be True not ${result} Acknowledgement ${ack_id} should not be deleted. + Should Not Be True ${result} Acknowledgement ${ack_id} should not be deleted. Ctn Remove Service Acknowledgement host_1 service_1 diff --git a/tests/broker-engine/downtimes.robot b/tests/broker-engine/downtimes.robot index abb7f0bc0b2..075d1a6cfd2 100644 --- a/tests/broker-engine/downtimes.robot +++ b/tests/broker-engine/downtimes.robot @@ -330,6 +330,71 @@ DTIM Ctn Kindly Stop Broker +BEDTRRD1 + [Documentation] A service is forced checked then a downtime is set on this service. + ... The service is forced checked again and the downtime is removed. + ... This test is done with BBDO 3.0.0. + ... Then we should not get any error in cbd RRD of kind 'ignored update error in file...'. + [Tags] broker engine services protobuf MON-150015 + Ctn Config Engine ${1} + Ctn Engine Config Set Value ${0} log_level_functions trace + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module ${1} + Ctn Broker Config Log central sql debug + Ctn Broker Config Log module0 neb debug + + Ctn Config BBDO3 1 + Ctn Clear Retention + ${start} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + Ctn Process Service Check Result With Metrics host_1 service_1 2 host_1:service_1 is CRITICAL HARD 20 + Sleep 1s + Ctn Process Service Check Result With Metrics host_1 service_1 2 host_1:service_1 is CRITICAL HARD 20 + Sleep 1s + Ctn Process Service Check Result With Metrics host_1 service_1 2 host_1:service_1 is CRITICAL HARD 20 + Sleep 1s + ${result} Ctn Check Service Resource Status With Timeout host_1 service_1 2 ${60} HARD + Should Be True ${result} The service should be in CRITICAL state HARD. + + ${result} Grep File ${rrdLog} "ignored update error in file" + Should Be Empty + ... ${result} + ... There should not be any error in cbd RRD of kind 'ignored update error in file...' After step 1. + + Ctn Schedule Service Downtime host_1 service_1 ${3600} + ${result} Ctn Check Service Downtime With Timeout host_1 service_1 1 ${60} + Should Be True ${result} The service should be in downtime. + + ${result} Grep File ${rrdLog} "ignored update error in file" + Should Be Empty ${result} There should not be any error in cbd RRD of kind 'ignored update error in file...' After step 2. + + Ctn Process Service Check Result With Metrics host_1 service_1 1 host_1:service_1 is WARNING HARD 20 + Sleep 1s + Ctn Process Service Check Result With Metrics host_1 service_1 1 host_1:service_1 is WARNING HARD 20 + Sleep 1s + Ctn Process Service Check Result With Metrics host_1 service_1 1 host_1:service_1 is WARNING HARD 20 + Sleep 1s + ${result} Ctn Check Service Resource Status With Timeout host_1 service_1 1 ${60} HARD + Should Be True ${result} The service should be in CRITICAL state HARD. + + ${result} Grep File ${rrdLog} "ignored update error in file" + Should Be Empty ${result} There should not be any error in cbd RRD of kind 'ignored update error in file...' After step 3. + + Ctn Delete Service Downtime host_1 service_1 + ${result} Ctn Check Number Of Downtimes ${0} ${start} ${120} + Should Be True ${result} We should have no downtime enabled. + + ${result} Grep File ${rrdLog} "ignored update error in file" + Should Be Empty ${result} There should not be any error in cbd RRD of kind 'ignored update error in file...' After step 4. + + Ctn Stop Engine + Ctn Kindly Stop Broker + + *** Keywords *** Ctn Clean Downtimes Before Suite Ctn Clean Before Suite diff --git a/tests/broker-engine/notifications.robot b/tests/broker-engine/notifications.robot index 83d17067aea..c9effd14553 100644 --- a/tests/broker-engine/notifications.robot +++ b/tests/broker-engine/notifications.robot @@ -1007,7 +1007,6 @@ not15 Ctn Engine Config Set Value In Contacts 0 John_Doe host_notification_commands command_notif,command_notif1 Ctn Engine Config Set Value In Contacts 0 John_Doe service_notification_commands command_notif,command_notif1 - ${start} Get Current Date Ctn Start Broker Ctn Start Engine diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 68669c9faa7..3456dee2ad8 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -1326,7 +1326,7 @@ def ctn_broker_config_log(name, key, value): if name == 'central': filename = "central-broker.json" elif name.startswith('module'): - filename = "central-{}.json".format(name) + filename = f"central-{name}.json" else: filename = "central-rrd.json" with open(f"{ETC_ROOT}/centreon-broker/{filename}", "r") as f: diff --git a/tests/resources/Common.py b/tests/resources/Common.py index b636cb2881f..6d4567cd000 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -652,6 +652,14 @@ def ctn_check_acknowledgement_with_timeout(hostname: str, service_desc: str, ent def ctn_check_acknowledgement_is_deleted_with_timeout(ack_id: int, timeout: int, which='COMMENTS'): + """ + Check if an acknowledgement is deleted in comments, acknowledgements or both + + Args: + ack_id (int): The acknowledgement id + timeout (int): The timeout in seconds + which (str): The table to check. It can be 'comments', 'acknowledgements' or 'BOTH' + """ limit = time.time() + timeout while time.time() < limit: connection = pymysql.connect(host=DB_HOST, @@ -667,7 +675,6 @@ def ctn_check_acknowledgement_is_deleted_with_timeout(ack_id: int, timeout: int, cursor.execute( f"SELECT c.deletion_time, a.entry_time, a.deletion_time FROM comments c LEFT JOIN acknowledgements a ON c.host_id=a.host_id AND c.service_id=a.service_id AND c.entry_time=a.entry_time WHERE c.entry_type=4 AND a.acknowledgement_id={ack_id}") result = cursor.fetchall() - logger.console(f"### {result}") if len(result) > 0 and result[0]['deletion_time'] is not None and int(result[0]['deletion_time']) >= int(result[0]['entry_time']): if which == 'BOTH': if result[0]['a.deletion_time']: @@ -942,9 +949,11 @@ def ctn_check_service_downtime_with_timeout(hostname: str, service_desc: str, en with connection: with connection.cursor() as cursor: if enabled != '0': - cursor.execute("SELECT s.scheduled_downtime_depth FROM downtimes d INNER JOIN hosts h ON d.host_id=h.host_id INNER JOIN services s ON d.service_id=s.service_id WHERE d.deletion_time is null AND s.description='{}' AND h.name='{}'".format( - service_desc, hostname)) + logger.console(f"SELECT s.scheduled_downtime_depth FROM downtimes d INNER JOIN hosts h ON d.host_id=h.host_id INNER JOIN services s ON d.service_id=s.service_id WHERE d.deletion_time is null AND s.description='{service_desc}' AND h.name='{hostname}'") + cursor.execute(f"SELECT s.scheduled_downtime_depth FROM downtimes d INNER JOIN hosts h ON d.host_id=h.host_id INNER JOIN services s ON d.service_id=s.service_id WHERE d.deletion_time is null AND s.description='{service_desc}' AND h.name='{hostname}'") result = cursor.fetchall() + if len(result) > 0: + logger.console(f"scheduled_downtime_depth: {result[0]['scheduled_downtime_depth']}") if len(result) == int(enabled) and result[0]['scheduled_downtime_depth'] is not None and result[0]['scheduled_downtime_depth'] == int(enabled): return True if (len(result) > 0): @@ -1215,7 +1224,7 @@ def ctn_delete_service_downtime(hst: str, svc: str): logger.console(f"delete downtime internal_id={did}") cmd = f"[{now}] DEL_SVC_DOWNTIME;{did}\n" - f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(f"{VAR_ROOT}/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1422,7 +1431,11 @@ def ctn_check_number_of_resources_monitored_by_poller_is(poller: int, value: int def ctn_check_number_of_downtimes(expected: int, start, timeout: int): limit = time.time() + timeout - d = parser.parse(start).timestamp() + try: + d = parser.parse(start) + except: + d = datetime.fromtimestamp(start) + d = d.timestamp() while time.time() < limit: connection = pymysql.connect(host=DB_HOST, user=DB_USER, @@ -1432,6 +1445,8 @@ def ctn_check_number_of_downtimes(expected: int, start, timeout: int): cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: + logger.console( + f"SELECT count(*) FROM downtimes WHERE start_time >= {d} AND deletion_time IS NULL") cursor.execute( f"SELECT count(*) FROM downtimes WHERE start_time >= {d} AND deletion_time IS NULL") result = cursor.fetchall() diff --git a/tests/resources/resources.resource b/tests/resources/resources.resource index ac2e0ae7d50..9b6ed278df5 100644 --- a/tests/resources/resources.resource +++ b/tests/resources/resources.resource @@ -89,7 +89,12 @@ Ctn Reload Broker IF not ${only_central} Send Signal To Process SIGHUP b2 Ctn Kindly Stop Broker - [Arguments] ${only_central}=False + [Documentation] Stop broker and check if it's correctly stopped. + ... If not, generate a coredump and kill the process. + ... If only_central is set to True, only the central broker will be stopped. + ... If bbdo2 is set to True, the rrd broker will not be checked for RRD logs + ... since there are still bugs in this context. + [Arguments] ${only_central}=False ${bbdo2}=False Send Signal To Process SIGTERM b1 IF not ${only_central} Send Signal To Process SIGTERM b2 ${result} Wait For Process b1 timeout=60s @@ -106,6 +111,11 @@ Ctn Kindly Stop Broker # Copy Coredump In Failed Dir b1 /usr/sbin/cbd broker_central Ctn Coredump Info b1 /usr/sbin/cbd broker_central Should Be Equal As Integers ${result.rc} 0 Central Broker not correctly stopped + ELIF not ${bbdo2} + Log To Console "central broker stopped" + # We have to check if central RRD logs contain errors about data sent in the past. + ${result} Grep File ${rrdLog} RRD: ignored update error in file + Should Be Empty ${result} Central RRD logs contain errors about metrics sent in the past. END END diff --git a/tests/update-doc.py b/tests/update-doc.py index 20698ac075d..8f25ec10fb2 100755 --- a/tests/update-doc.py +++ b/tests/update-doc.py @@ -90,26 +90,42 @@ def parse_dir(d): From a Centreon host, you need to install Robot Framework -On CentOS 7, the following commands should work to initialize your robot tests: +On AlmaLinux, the following commands should work to initialize your robot tests: -``` -pip3 install -U robotframework robotframework-databaselibrary robotframework-examples pymysql robotframework-requests +```bash +dnf install "Development Tools" python3-devel -y + +pip3 install -U robotframework \ + robotframework-databaselibrary \ + robotframework-examples pymysql \ + robotframework-requests psutil \ + robotframework-httpctrl boto3 \ + GitPython unqlite py-cpuinfo -yum install "Development Tools" python3-devel -y -pip3 install grpcio==1.33.2 grpcio_tools==1.33.2 +pip3 install grpcio grpcio_tools #you need also to provide opentelemetry proto files at the project root with this command git clone https://github.com/open-telemetry/opentelemetry-proto.git opentelemetry-proto - + #Then you must have something like that: #root directory/bbdo # /broker # /engine # /opentelemetry-proto # /tests +``` + +We need some perl modules to run the tests, you can install them with the following command: + +```bash +dnf install perl-HTTP-Daemon-SSL +dnf install perl-JSON +``` +Then you can initialize the tests with the following commands: +```bash ./init-proto.sh ./init-sql.sh ```