diff --git a/CMakeLists.txt b/CMakeLists.txt index e85a1661333..7b73516a309 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,6 +56,8 @@ set(OSRM_VERSION_MINOR 5) set(OSRM_VERSION_PATCH 0) set(OSRM_VERSION "${OSRM_VERSION_MAJOR}.${OSRM_VERSION_MINOR}.${OSRM_VERSION_PATCH}") +add_definitions(-DOSRM_PROJECT_DIR="${CMAKE_CURRENT_SOURCE_DIR}") + # these two functions build up custom variables: # DEPENDENCIES_INCLUDE_DIRS and OSRM_DEFINES # These variables we want to pass to diff --git a/include/contractor/graph_contractor.hpp b/include/contractor/graph_contractor.hpp index 3b4cd90ae0b..0b831be9ff1 100644 --- a/include/contractor/graph_contractor.hpp +++ b/include/contractor/graph_contractor.hpp @@ -6,8 +6,8 @@ #include "util/deallocating_vector.hpp" #include "util/dynamic_graph.hpp" #include "util/integer_range.hpp" +#include "util/log.hpp" #include "util/percent.hpp" -#include "util/simple_logger.hpp" #include "util/timing_util.hpp" #include "util/typedefs.hpp" #include "util/xor_fast_hash.hpp" @@ -156,11 +156,10 @@ class GraphContractor #ifndef NDEBUG if (static_cast(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10) { - util::SimpleLogger().Write(logWARNING) - << "Edge weight large -> " - << static_cast(std::max(diter->weight, 1)) << " : " - << static_cast(diter->source) << " -> " - << static_cast(diter->target); + util::Log(logWARNING) << "Edge weight large -> " + << static_cast(std::max(diter->weight, 1)) + << " : " << static_cast(diter->source) << " -> " + << static_cast(diter->target); } #endif edges.emplace_back(diter->source, @@ -245,15 +244,14 @@ class GraphContractor } } } - util::SimpleLogger().Write() << "merged " << edges.size() - edge << " edges out of " - << edges.size(); + util::Log() << "merged " << edges.size() - edge << " edges out of " << edges.size(); edges.resize(edge); contractor_graph = std::make_shared(nodes, edges); edges.clear(); edges.shrink_to_fit(); BOOST_ASSERT(0 == edges.capacity()); - util::SimpleLogger().Write() << "contractor finished initalization"; + util::Log() << "contractor finished initalization"; } void Run(double core_factor = 1.0) @@ -270,7 +268,6 @@ class GraphContractor const constexpr size_t DeleteGrainSize = 1; const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); - util::Percent p(number_of_nodes); ThreadDataContainer thread_data_list(number_of_nodes); @@ -292,9 +289,10 @@ class GraphContractor bool use_cached_node_priorities = !node_levels.empty(); if (use_cached_node_priorities) { - std::cout << "using cached node priorities ..." << std::flush; + util::UnbufferedLog log; + log << "using cached node priorities ..."; node_priorities.swap(node_levels); - std::cout << "ok" << std::endl; + log << "ok"; } else { @@ -302,7 +300,8 @@ class GraphContractor node_priorities.resize(number_of_nodes); node_levels.resize(number_of_nodes); - std::cout << "initializing elimination PQ ..." << std::flush; + util::UnbufferedLog log; + log << "initializing elimination PQ ..."; tbb::parallel_for(tbb::blocked_range(0, number_of_nodes, PQGrainSize), [this, &node_priorities, &node_depth, &thread_data_list]( const tbb::blocked_range &range) { @@ -313,11 +312,14 @@ class GraphContractor this->EvaluateNodePriority(data, node_depth[x], x); } }); - std::cout << "ok" << std::endl; + log << "ok"; } BOOST_ASSERT(node_priorities.size() == number_of_nodes); - std::cout << "preprocessing " << number_of_nodes << " nodes ..." << std::flush; + util::Log() << "preprocessing " << number_of_nodes << " nodes ..."; + + util::UnbufferedLog log; + util::Percent p(log, number_of_nodes); unsigned current_level = 0; bool flushed_contractor = false; @@ -331,7 +333,7 @@ class GraphContractor new_edge_set; // this one is not explicitely // cleared since it goes out of // scope anywa - std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush; + log << " [flush " << number_of_contracted_nodes << " nodes] "; // Delete old heap data to free memory that we need for the coming operations thread_data_list.data.clear(); @@ -599,9 +601,8 @@ class GraphContractor is_core_node.clear(); } - util::SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes " - << contractor_graph->GetNumberOfEdges() << " edges." - << std::endl; + util::Log() << "[core] " << remaining_nodes.size() << " nodes " + << contractor_graph->GetNumberOfEdges() << " edges."; thread_data_list.data.clear(); } @@ -618,8 +619,9 @@ class GraphContractor template inline void GetEdges(util::DeallocatingVector &edges) { - util::Percent p(contractor_graph->GetNumberOfNodes()); - util::SimpleLogger().Write() << "Getting edges of minimized graph"; + util::UnbufferedLog log; + log << "Getting edges of minimized graph "; + util::Percent p(log, contractor_graph->GetNumberOfNodes()); const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); if (contractor_graph->GetNumberOfNodes()) { diff --git a/include/engine/datafacade/contiguous_internalmem_datafacade_base.hpp b/include/engine/datafacade/contiguous_internalmem_datafacade_base.hpp index 9ebe18e7545..07df63c2ccd 100644 --- a/include/engine/datafacade/contiguous_internalmem_datafacade_base.hpp +++ b/include/engine/datafacade/contiguous_internalmem_datafacade_base.hpp @@ -12,11 +12,13 @@ #include "util/guidance/turn_lanes.hpp" #include "engine/geospatial_query.hpp" +#include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/guidance/turn_bearing.hpp" +#include "util/log.hpp" #include "util/packed_vector.hpp" #include "util/range_table.hpp" #include "util/rectangle.hpp" -#include "util/simple_logger.hpp" #include "util/static_graph.hpp" #include "util/static_rtree.hpp" #include "util/typedefs.hpp" @@ -116,7 +118,7 @@ class ContiguousInternalMemoryDataFacadeBase : public BaseDataFacade { m_check_sum = *data_layout.GetBlockPtr(memory_block, storage::DataLayout::HSGR_CHECKSUM); - util::SimpleLogger().Write() << "set checksum: " << m_check_sum; + util::Log() << "set checksum: " << m_check_sum; } void InitializeProfilePropertiesPointer(storage::DataLayout &data_layout, char *memory_block) @@ -144,9 +146,9 @@ class ContiguousInternalMemoryDataFacadeBase : public BaseDataFacade file_index_path = boost::filesystem::path(file_index_ptr); if (!boost::filesystem::exists(file_index_path)) { - util::SimpleLogger().Write(logDEBUG) << "Leaf file name " << file_index_path.string(); + util::Log(logDEBUG) << "Leaf file name " << file_index_path.string(); throw util::exception("Could not load " + file_index_path.string() + - "Is any data loaded into shared memory?"); + "Is any data loaded into shared memory?" + SOURCE_REF); } auto tree_ptr = diff --git a/include/engine/datafacade/shared_memory_datafacade.hpp b/include/engine/datafacade/shared_memory_datafacade.hpp index f7fcbdbb1e0..caa0bf812b0 100644 --- a/include/engine/datafacade/shared_memory_datafacade.hpp +++ b/include/engine/datafacade/shared_memory_datafacade.hpp @@ -56,8 +56,7 @@ class SharedMemoryDataFacade : public ContiguousInternalMemoryDataFacadeBase if (current_timestamp->timestamp == shared_timestamp) { - util::SimpleLogger().Write(logDEBUG) << "Retaining data with shared timestamp " - << shared_timestamp; + util::Log(logDEBUG) << "Retaining data with shared timestamp " << shared_timestamp; } else { @@ -74,8 +73,7 @@ class SharedMemoryDataFacade : public ContiguousInternalMemoryDataFacadeBase : shared_barriers(shared_barriers_), layout_region(layout_region_), data_region(data_region_), shared_timestamp(shared_timestamp_) { - util::SimpleLogger().Write(logDEBUG) << "Loading new data with shared timestamp " - << shared_timestamp; + util::Log(logDEBUG) << "Loading new data with shared timestamp " << shared_timestamp; BOOST_ASSERT(storage::SharedMemory::RegionExists(layout_region)); m_layout_memory = storage::makeSharedMemory(layout_region); diff --git a/include/engine/engine.hpp b/include/engine/engine.hpp index d9281c32071..94a41653932 100644 --- a/include/engine/engine.hpp +++ b/include/engine/engine.hpp @@ -18,6 +18,8 @@ #include "engine/plugins/trip.hpp" #include "engine/plugins/viaroute.hpp" #include "engine/status.hpp" +#include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/json_container.hpp" #include diff --git a/include/engine/routing_algorithms/alternative_path.hpp b/include/engine/routing_algorithms/alternative_path.hpp index 424cf5cb7dc..4207574da6a 100644 --- a/include/engine/routing_algorithms/alternative_path.hpp +++ b/include/engine/routing_algorithms/alternative_path.hpp @@ -235,10 +235,10 @@ class AlternativeRouting final } } - // util::SimpleLogger().Write(logDEBUG) << "fwd_search_space size: " << + // util::Log(logDEBUG) << "fwd_search_space size: " << // forward_search_space.size() << ", marked " << approximated_forward_sharing.size() << " // nodes"; - // util::SimpleLogger().Write(logDEBUG) << "rev_search_space size: " << + // util::Log(logDEBUG) << "rev_search_space size: " << // reverse_search_space.size() << ", marked " << approximated_reverse_sharing.size() << " // nodes"; @@ -601,7 +601,7 @@ class AlternativeRouting final // //compute forward sharing // while( (packed_alternate_path[aindex] == packed_shortest_path[aindex]) && // (packed_alternate_path[aindex+1] == packed_shortest_path[aindex+1]) ) { - // // util::SimpleLogger().Write() << "retrieving edge (" << + // // util::Log() << "retrieving edge (" << // packed_alternate_path[aindex] << "," << packed_alternate_path[aindex+1] << ")"; // EdgeID edgeID = facade->FindEdgeInEitherDirection(packed_alternate_path[aindex], // packed_alternate_path[aindex+1]); @@ -640,7 +640,7 @@ class AlternativeRouting final const NodeID node = forward_heap.DeleteMin(); const int weight = forward_heap.GetKey(node); // const NodeID parentnode = forward_heap.GetData(node).parent; - // util::SimpleLogger().Write() << (is_forward_directed ? "[fwd] " : "[rev] ") << "settled + // util::Log() << (is_forward_directed ? "[fwd] " : "[rev] ") << "settled // edge (" // << parentnode << "," << node << "), dist: " << weight; @@ -665,11 +665,11 @@ class AlternativeRouting final { *middle_node = node; *upper_bound_to_shortest_path_weight = new_weight; - // util::SimpleLogger().Write() << "accepted middle_node " << *middle_node + // util::Log() << "accepted middle_node " << *middle_node // << " at // weight " << new_weight; // } else { - // util::SimpleLogger().Write() << "discarded middle_node " << *middle_node + // util::Log() << "discarded middle_node " << *middle_node // << " // at weight " << new_weight; } diff --git a/include/engine/trip/trip_brute_force.hpp b/include/engine/trip/trip_brute_force.hpp index fcf4ebf8c8c..650f2a6677c 100644 --- a/include/engine/trip/trip_brute_force.hpp +++ b/include/engine/trip/trip_brute_force.hpp @@ -2,7 +2,7 @@ #define TRIP_BRUTE_FORCE_HPP #include "util/dist_table_wrapper.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" #include "osrm/json_container.hpp" diff --git a/include/engine/trip/trip_nearest_neighbour.hpp b/include/engine/trip/trip_nearest_neighbour.hpp index b0ec09375b4..6243c871154 100644 --- a/include/engine/trip/trip_nearest_neighbour.hpp +++ b/include/engine/trip/trip_nearest_neighbour.hpp @@ -2,7 +2,7 @@ #define TRIP_NEAREST_NEIGHBOUR_HPP #include "util/dist_table_wrapper.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" #include "osrm/json_container.hpp" diff --git a/include/extractor/guidance/turn_lane_types.hpp b/include/extractor/guidance/turn_lane_types.hpp index dbd4c5b50d8..8501526df81 100644 --- a/include/extractor/guidance/turn_lane_types.hpp +++ b/include/extractor/guidance/turn_lane_types.hpp @@ -12,7 +12,7 @@ #include #include "util/json_container.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" namespace osrm diff --git a/include/extractor/raster_source.hpp b/include/extractor/raster_source.hpp index 05f2e4734a9..602c6f02daa 100644 --- a/include/extractor/raster_source.hpp +++ b/include/extractor/raster_source.hpp @@ -66,13 +66,14 @@ class RasterGrid } catch (std::exception const &ex) { - throw util::exception( - std::string("Failed to read from raster source with exception: ") + ex.what()); + throw util::exception("Failed to read from raster source " + filepath.string() + ": " + + ex.what() + SOURCE_REF); } if (!r || itr != end) { - throw util::exception("Failed to parse raster source correctly."); + throw util::exception("Failed to parse raster source: " + filepath.string() + + SOURCE_REF); } } diff --git a/include/extractor/tarjan_scc.hpp b/include/extractor/tarjan_scc.hpp index 72904eaa7d9..adb5f8cb3cf 100644 --- a/include/extractor/tarjan_scc.hpp +++ b/include/extractor/tarjan_scc.hpp @@ -8,7 +8,7 @@ #include "util/typedefs.hpp" #include "util/integer_range.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/std_hash.hpp" #include "util/timing_util.hpp" @@ -146,8 +146,8 @@ template class TarjanSCC if (size_of_current_component > 1000) { - util::SimpleLogger().Write() << "large component [" << component_index - << "]=" << size_of_current_component; + util::Log() << "large component [" << component_index + << "]=" << size_of_current_component; } ++component_index; @@ -158,7 +158,7 @@ template class TarjanSCC } TIMER_STOP(SCC_RUN); - util::SimpleLogger().Write() << "SCC run took: " << TIMER_MSEC(SCC_RUN) / 1000. << "s"; + util::Log() << "SCC run took: " << TIMER_MSEC(SCC_RUN) / 1000. << "s"; size_one_counter = std::count_if(component_size_vector.begin(), component_size_vector.end(), diff --git a/include/server/server.hpp b/include/server/server.hpp index 1aa3a9e753b..0b944fd4a0f 100644 --- a/include/server/server.hpp +++ b/include/server/server.hpp @@ -6,7 +6,7 @@ #include "server/service_handler.hpp" #include "util/integer_range.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -36,8 +36,7 @@ class Server static std::shared_ptr CreateServer(std::string &ip_address, int ip_port, unsigned requested_num_threads) { - util::SimpleLogger().Write() << "http 1.1 compression handled by zlib version " - << zlibVersion(); + util::Log() << "http 1.1 compression handled by zlib version " << zlibVersion(); const unsigned hardware_threads = std::max(1u, std::thread::hardware_concurrency()); const unsigned real_num_threads = std::min(hardware_threads, requested_num_threads); return std::make_shared(ip_address, ip_port, real_num_threads); @@ -62,7 +61,7 @@ class Server acceptor.bind(endpoint); acceptor.listen(); - util::SimpleLogger().Write() << "Listening on: " << acceptor.local_endpoint(); + util::Log() << "Listening on: " << acceptor.local_endpoint(); acceptor.async_accept( new_connection->socket(), diff --git a/include/storage/io.hpp b/include/storage/io.hpp index e5121e7d656..6c447263396 100644 --- a/include/storage/io.hpp +++ b/include/storage/io.hpp @@ -2,8 +2,9 @@ #define OSRM_STORAGE_IO_HPP_ #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/fingerprint.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -55,7 +56,7 @@ class FileReader if (flag == VerifyFingerprint && !ReadAndCheckFingerprint()) { - throw util::exception("Fingerprint mismatch in " + filepath.string()); + throw util::exception("Fingerprint mismatch in " + filepath_.string() + SOURCE_REF); } } @@ -77,9 +78,9 @@ class FileReader if (result.eof()) { throw util::exception("Error reading from " + filepath.string() + - ": Unexpected end of file"); + ": Unexpected end of file " + SOURCE_REF); } - throw util::exception("Error reading from " + filepath.string()); + throw util::exception("Error reading from " + filepath.string() + " " + SOURCE_REF); } } diff --git a/include/storage/serialization.hpp b/include/storage/serialization.hpp index a01d00342cb..41bbee41e2f 100644 --- a/include/storage/serialization.hpp +++ b/include/storage/serialization.hpp @@ -8,7 +8,7 @@ #include "storage/io.hpp" #include "util/exception.hpp" #include "util/fingerprint.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/static_graph.hpp" #include @@ -44,8 +44,8 @@ inline HSGRHeader readHSGRHeader(io::FileReader &input_file) const auto fingerprint_loaded = input_file.ReadOne(); if (!fingerprint_loaded.TestGraphUtil(fingerprint_valid)) { - util::SimpleLogger().Write(logWARNING) << ".hsgr was prepared with different build.\n" - "Reprocess to get rid of this warning."; + util::Log(logWARNING) << ".hsgr was prepared with different build.\n" + "Reprocess to get rid of this warning."; } HSGRHeader header; diff --git a/include/storage/shared_datatype.hpp b/include/storage/shared_datatype.hpp index a2a2e06270c..e55937ff31c 100644 --- a/include/storage/shared_datatype.hpp +++ b/include/storage/shared_datatype.hpp @@ -2,7 +2,8 @@ #define SHARED_DATA_TYPE_HPP #include "util/exception.hpp" -#include "util/simple_logger.hpp" +#include "util/exception_utils.hpp" +#include "util/log.hpp" #include #include @@ -175,13 +176,13 @@ struct DataLayout bool end_canary_alive = std::equal(CANARY, CANARY + sizeof(CANARY), end_canary_ptr); if (!start_canary_alive) { - throw util::exception(std::string("Start canary of block corrupted. (") + - block_id_to_name[bid] + ")"); + throw util::exception("Start canary of block corrupted. (" + + std::string(block_id_to_name[bid]) + ")" + SOURCE_REF); } if (!end_canary_alive) { - throw util::exception(std::string("End canary of block corrupted. (") + - block_id_to_name[bid] + ")"); + throw util::exception("End canary of block corrupted. (" + + std::string(block_id_to_name[bid]) + ")" + SOURCE_REF); } } diff --git a/include/storage/shared_memory.hpp b/include/storage/shared_memory.hpp index b0f9669fc6f..296552caa08 100644 --- a/include/storage/shared_memory.hpp +++ b/include/storage/shared_memory.hpp @@ -2,7 +2,8 @@ #define SHARED_MEMORY_HPP #include "util/exception.hpp" -#include "util/simple_logger.hpp" +#include "util/exception_utils.hpp" +#include "util/log.hpp" #include #include @@ -62,8 +63,7 @@ class SharedMemory { shm = boost::interprocess::xsi_shared_memory(boost::interprocess::open_only, key); - util::SimpleLogger().Write(logDEBUG) << "opening " << shm.get_shmid() << " from id " - << id; + util::Log(logDEBUG) << "opening " << shm.get_shmid() << " from id " << id; region = boost::interprocess::mapped_region(shm, access); } @@ -72,14 +72,14 @@ class SharedMemory { shm = boost::interprocess::xsi_shared_memory( boost::interprocess::open_or_create, key, size); - util::SimpleLogger().Write(logDEBUG) << "opening/creating " << shm.get_shmid() - << " from id " << id << " with size " << size; + util::Log(logDEBUG) << "opening/creating " << shm.get_shmid() << " from id " << id + << " with size " << size; #ifdef __linux__ if (-1 == shmctl(shm.get_shmid(), SHM_LOCK, nullptr)) { if (ENOMEM == errno) { - util::SimpleLogger().Write(logWARNING) << "could not lock shared memory to RAM"; + util::Log(logWARNING) << "could not lock shared memory to RAM"; } } #endif @@ -133,7 +133,7 @@ class SharedMemory static bool Remove(const boost::interprocess::xsi_key &key) { boost::interprocess::xsi_shared_memory xsi(boost::interprocess::open_only, key); - util::SimpleLogger().Write(logDEBUG) << "deallocating prev memory " << xsi.get_shmid(); + util::Log(logDEBUG) << "deallocating prev memory " << xsi.get_shmid(); return boost::interprocess::xsi_shared_memory::remove(xsi.get_shmid()); } @@ -173,8 +173,7 @@ class SharedMemory shm.truncate(size); region = boost::interprocess::mapped_region(shm, access); - util::SimpleLogger().Write(logDEBUG) << "writeable memory allocated " << size - << " bytes"; + util::Log(logDEBUG) << "writeable memory allocated " << size << " bytes"; } } @@ -221,7 +220,7 @@ class SharedMemory static bool Remove(char *key) { - util::SimpleLogger().Write(logDEBUG) << "deallocating prev memory for key " << key; + util::Log(logDEBUG) << "deallocating prev memory for key " << key; return boost::interprocess::shared_memory_object::remove(key); } @@ -242,7 +241,7 @@ makeSharedMemory(const IdentifierT &id, const uint64_t size = 0, bool read_write { if (0 == size) { - throw util::exception("lock file does not exist, exiting"); + throw util::exception("lock file does not exist, exiting" + SOURCE_REF); } else { @@ -253,9 +252,9 @@ makeSharedMemory(const IdentifierT &id, const uint64_t size = 0, bool read_write } catch (const boost::interprocess::interprocess_exception &e) { - util::SimpleLogger().Write(logWARNING) << "caught exception: " << e.what() << ", code " - << e.get_error_code(); - throw util::exception(e.what()); + util::Log(logERROR) << "Error while attempting to allocate shared memory: " << e.what() + << ", code " << e.get_error_code(); + throw util::exception(e.what() + SOURCE_REF); } } } diff --git a/include/util/exception_utils.hpp b/include/util/exception_utils.hpp new file mode 100644 index 00000000000..978ee84c585 --- /dev/null +++ b/include/util/exception_utils.hpp @@ -0,0 +1,15 @@ +#ifndef SOURCE_MACROS_HPP +#define SOURCE_MACROS_HPP +#include + +// Helper macros, don't use these ones +// STRIP the OSRM_PROJECT_DIR from the front of a filename. Expected to come +// from CMake's CURRENT_SOURCE_DIR, which doesn't have a trailing /, hence the +1 +#define _PROJECT_RELATIVE_PATH(x) std::string(x).substr(strlen(OSRM_PROJECT_DIR) + 1) +// Return the path of a file, relative to the OSRM_PROJECT_DIR +#define _OSRM_SOURCE_FILE _PROJECT_RELATIVE_PATH(__FILE__) + +// This is the macro to use +#define SOURCE_REF std::string(" (at ") + _OSRM_SOURCE_FILE + ":" + std::to_string(__LINE__) + ")" + +#endif // SOURCE_MACROS_HPP \ No newline at end of file diff --git a/include/util/fingerprint_impl.hpp.in b/include/util/fingerprint_impl.hpp.in index 7b31d6dd701..a8efc7b7b23 100644 --- a/include/util/fingerprint_impl.hpp.in +++ b/include/util/fingerprint_impl.hpp.in @@ -1,4 +1,5 @@ #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include #include @@ -57,7 +58,7 @@ bool FingerPrint::TestGraphUtil(const FingerPrint &other) const { if (!IsMagicNumberOK(other)) { - throw exception("hsgr input file misses magic number. Check or reprocess the file"); + throw exception(std::string("hsgr input file misses magic number. Check or reprocess the file") + SOURCE_REF); } return std::equal(md5_graph, md5_graph + 32, other.md5_graph); } @@ -66,7 +67,7 @@ bool FingerPrint::TestContractor(const FingerPrint &other) const { if (!IsMagicNumberOK(other)) { - throw exception("osrm input file misses magic number. Check or reprocess the file"); + throw exception(std::string("osrm input file misses magic number. Check or reprocess the file") + SOURCE_REF); } return std::equal(md5_prepare, md5_prepare + 32, other.md5_prepare); } @@ -75,7 +76,7 @@ bool FingerPrint::TestRTree(const FingerPrint &other) const { if (!IsMagicNumberOK(other)) { - throw exception("r-tree input file misses magic number. Check or reprocess the file"); + throw exception(std::string("r-tree input file misses magic number. Check or reprocess the file") + SOURCE_REF); } return std::equal(md5_tree, md5_tree + 32, other.md5_tree); } @@ -84,7 +85,7 @@ bool FingerPrint::TestQueryObjects(const FingerPrint &other) const { if (!IsMagicNumberOK(other)) { - throw exception("missing magic number. Check or reprocess the file"); + throw exception(std::string("missing magic number. Check or reprocess the file") + SOURCE_REF); } return std::equal(md5_objects, md5_objects + 32, other.md5_objects); } diff --git a/include/util/geojson_debug_logger.hpp b/include/util/geojson_debug_logger.hpp index 353a4342ef1..dae53dd6419 100644 --- a/include/util/geojson_debug_logger.hpp +++ b/include/util/geojson_debug_logger.hpp @@ -7,7 +7,7 @@ #include "util/json_container.hpp" #include "util/json_renderer.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" namespace osrm { @@ -92,7 +92,7 @@ class GeojsonLogger // out on log output. Such a sad life if (ofs.is_open()) { - util::SimpleLogger().Write(logWARNING) + util::Log(logWARNING) << "Overwriting " << logfile << ". Is this desired behaviour? If this message occurs more than once rethink the " "location of your Logger Guard."; diff --git a/include/util/graph_loader.hpp b/include/util/graph_loader.hpp index d8437386fd9..cfeb4636c71 100644 --- a/include/util/graph_loader.hpp +++ b/include/util/graph_loader.hpp @@ -8,7 +8,7 @@ #include "storage/io.hpp" #include "util/exception.hpp" #include "util/fingerprint.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" #include @@ -60,7 +60,7 @@ NodeID loadNodesFromFile(storage::io::FileReader &file_reader, std::vector &node_array) { NodeID number_of_nodes = file_reader.ReadElementCount32(); - SimpleLogger().Write() << "Importing number_of_nodes new = " << number_of_nodes << " nodes "; + Log() << "Importing number_of_nodes new = " << number_of_nodes << " nodes "; node_array.resize(number_of_nodes); @@ -99,14 +99,14 @@ inline NodeID loadEdgesFromFile(storage::io::FileReader &file_reader, BOOST_ASSERT(sizeof(EdgeID) == sizeof(number_of_edges)); edge_list.resize(number_of_edges); - SimpleLogger().Write() << " and " << number_of_edges << " edges "; + Log() << " and " << number_of_edges << " edges "; file_reader.ReadInto(edge_list.data(), number_of_edges); BOOST_ASSERT(edge_list.size() > 0); #ifndef NDEBUG - SimpleLogger().Write() << "Validating loaded edges..."; + Log() << "Validating loaded edges..."; tbb::parallel_sort( edge_list.begin(), edge_list.end(), @@ -129,7 +129,7 @@ inline NodeID loadEdgesFromFile(storage::io::FileReader &file_reader, } #endif - SimpleLogger().Write() << "Graph loaded ok and has " << edge_list.size() << " edges"; + Log() << "Graph loaded ok and has " << edge_list.size() << " edges"; return number_of_edges; } diff --git a/include/util/guidance/toolkit.hpp b/include/util/guidance/toolkit.hpp index 36c6d5270cb..c884ae656eb 100644 --- a/include/util/guidance/toolkit.hpp +++ b/include/util/guidance/toolkit.hpp @@ -11,8 +11,8 @@ #include "util/attributes.hpp" #include "util/guidance/bearing_class.hpp" #include "util/guidance/entry_class.hpp" +#include "util/log.hpp" #include "util/name_table.hpp" -#include "util/simple_logger.hpp" #include #include diff --git a/include/util/io.hpp b/include/util/io.hpp index 28596cc41b0..f450efc1e33 100644 --- a/include/util/io.hpp +++ b/include/util/io.hpp @@ -1,7 +1,7 @@ #ifndef OSRM_INCLUDE_UTIL_IO_HPP_ #define OSRM_INCLUDE_UTIL_IO_HPP_ -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -125,9 +125,10 @@ void deserializeAdjacencyArray(const std::string &filename, // offsets have to match up with the size of the data if (offsets.empty() || (offsets.back() != boost::numeric_cast(data.size()))) - throw util::exception("Error in " + filename + (offsets.empty() - ? "Offsets are empty" - : "Offset and data size do not match")); + throw util::exception( + "Error in " + filename + + (offsets.empty() ? "Offsets are empty" : "Offset and data size do not match") + + SOURCE_REF); } inline bool serializeFlags(const boost::filesystem::path &path, const std::vector &flags) @@ -153,8 +154,7 @@ inline bool serializeFlags(const boost::filesystem::path &path, const std::vecto ++chunk_count; flag_stream.write(reinterpret_cast(&chunk), sizeof(chunk)); } - SimpleLogger().Write() << "Wrote " << number_of_bits << " bits in " << chunk_count - << " chunks (Flags)."; + Log() << "Wrote " << number_of_bits << " bits in " << chunk_count << " chunks (Flags)."; return static_cast(flag_stream); } diff --git a/include/util/log.hpp b/include/util/log.hpp new file mode 100644 index 00000000000..dc08e929cb7 --- /dev/null +++ b/include/util/log.hpp @@ -0,0 +1,70 @@ +#ifndef LOG_HPP +#define LOG_HPP + +#include +#include +#include + +enum LogLevel +{ + logINFO, + logWARNING, + logERROR, + logDEBUG +}; + +namespace osrm +{ +namespace util +{ + +class LogPolicy +{ + public: + void Unmute(); + + void Mute(); + + bool IsMute() const; + + static LogPolicy &GetInstance(); + + LogPolicy(const LogPolicy &) = delete; + LogPolicy &operator=(const LogPolicy &) = delete; + + private: + LogPolicy() : m_is_mute(true) {} + std::atomic m_is_mute; +}; + +class Log +{ + public: + Log(LogLevel level_ = logINFO); + Log(LogLevel level_, std::ostream &ostream); + + virtual ~Log(); + std::mutex &get_mutex(); + + template inline std::ostream &operator<<(const T &data) { return stream << data; } + + protected: + LogLevel level; + std::ostringstream buffer; + std::ostream &stream; +}; + +/** + * Modified logger - this one doesn't buffer - it writes directly to stdout, + * and the final newline is only printed when the object is destructed. + * Useful for logging situations where you don't want to newline right away + */ +class UnbufferedLog : public Log +{ + public: + UnbufferedLog(LogLevel level_ = logINFO); +}; +} +} + +#endif /* LOG_HPP */ diff --git a/include/util/percent.hpp b/include/util/percent.hpp index eeee33ad11d..9bf11832ef8 100644 --- a/include/util/percent.hpp +++ b/include/util/percent.hpp @@ -5,6 +5,7 @@ #include #include "util/isatty.hpp" +#include "util/log.hpp" namespace osrm { @@ -13,8 +14,13 @@ namespace util class Percent { + Log &log; + public: - explicit Percent(unsigned max_value, unsigned step = 5) { Reinit(max_value, step); } + explicit Percent(Log &log_, unsigned max_value, unsigned step = 5) : log{log_} + { + Reinit(max_value, step); + } // Reinitializes void Reinit(unsigned max_value, unsigned step = 5) @@ -36,7 +42,7 @@ class Percent PrintPercent(current_value / static_cast(m_max_value) * 100.); } if (current_value + 1 == m_max_value) - std::cout << " 100%" << std::endl; + log << " 100%"; } void PrintIncrement() @@ -67,19 +73,17 @@ class Percent m_last_percent += m_step; if (m_last_percent % 10 == 0) { - std::cout << " " << m_last_percent << "% "; + log << " " << m_last_percent << "% "; } else { - std::cout << "."; + log << "."; } // When not on a TTY, print newlines after each progress indicator so // so that progress is visible to line-buffered logging systems if (!IsStdoutATTY()) - std::cout << std::endl; - - std::cout.flush(); + log << "" << std::endl; } } }; diff --git a/include/util/shared_memory_vector_wrapper.hpp b/include/util/shared_memory_vector_wrapper.hpp index 2e31f0b8bac..6da3b0eb01a 100644 --- a/include/util/shared_memory_vector_wrapper.hpp +++ b/include/util/shared_memory_vector_wrapper.hpp @@ -3,7 +3,7 @@ #include -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include diff --git a/include/util/simple_logger.hpp b/include/util/simple_logger.hpp deleted file mode 100644 index f71f0ff7de1..00000000000 --- a/include/util/simple_logger.hpp +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef SIMPLE_LOGGER_HPP -#define SIMPLE_LOGGER_HPP - -#include -#include -#include - -enum LogLevel -{ - logINFO, - logWARNING, - logDEBUG -}; - -namespace osrm -{ -namespace util -{ - -class LogPolicy -{ - public: - void Unmute(); - - void Mute(); - - bool IsMute() const; - - static LogPolicy &GetInstance(); - - LogPolicy(const LogPolicy &) = delete; - LogPolicy &operator=(const LogPolicy &) = delete; - - private: - LogPolicy() : m_is_mute(true) {} - std::atomic m_is_mute; -}; - -class SimpleLogger -{ - public: - SimpleLogger(); - - virtual ~SimpleLogger(); - std::mutex &get_mutex(); - std::ostringstream &Write(LogLevel l = logINFO) noexcept; - - private: - std::ostringstream os; - LogLevel level; -}; -} -} - -#endif /* SIMPLE_LOGGER_HPP */ diff --git a/include/util/static_rtree.hpp b/include/util/static_rtree.hpp index 1daf85ed121..f979cd51574 100644 --- a/include/util/static_rtree.hpp +++ b/include/util/static_rtree.hpp @@ -378,7 +378,8 @@ class StaticRTree catch (const std::exception &exc) { throw exception(boost::str(boost::format("Leaf file %1% mapping failed: %2%") % - leaf_file % exc.what())); + leaf_file % exc.what()) + + SOURCE_REF); } } diff --git a/scripts/tidy.sh b/scripts/tidy.sh index a9d1a1a0e4d..660cba1672c 100755 --- a/scripts/tidy.sh +++ b/scripts/tidy.sh @@ -7,12 +7,16 @@ set -o nounset # Runs the Clang Tidy Tool in parallel on the code base. # Requires a compilation database in the build directory. +# This works on both OSX and Linux, it's a POSIX thingy +NPROC=$(getconf _NPROCESSORS_ONLN) -find src include unit_tests -type f -name '*.hpp' -o -name '*.cpp' \ + +find src include unit_tests -type f -name '*.hpp' -o -name '*.cpp' -print0 \ | xargs \ + -0 \ -I{} \ - -P $(nproc) \ - clang-tidy \ + -n 1 \ + ./clang+llvm-3.9.0-x86_64-apple-darwin/bin/clang-tidy \ -p build \ -header-filter='.*' \ {} diff --git a/src/contractor/contractor.cpp b/src/contractor/contractor.cpp index 5ba2e0cbbb6..54985472e55 100644 --- a/src/contractor/contractor.cpp +++ b/src/contractor/contractor.cpp @@ -9,10 +9,11 @@ #include "storage/io.hpp" #include "storage/io.hpp" #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/graph_loader.hpp" #include "util/integer_range.hpp" #include "util/io.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/static_graph.hpp" #include "util/static_rtree.hpp" #include "util/string_util.hpp" @@ -105,13 +106,13 @@ EdgeWeight getNewWeight(IterType speed_iter, if (old_weight >= (new_segment_weight * log_edge_updates_factor)) { auto speed_file = segment_speed_filenames.at(speed_iter->speed_source.source - 1); - util::SimpleLogger().Write(logWARNING) - << "[weight updates] Edge weight update from " << old_secs << "s to " << new_secs - << "s New speed: " << speed_iter->speed_source.speed << " kph" - << ". Old speed: " << approx_original_speed << " kph" - << ". Segment length: " << segment_length << " m" - << ". Segment: " << speed_iter->segment.from << "," << speed_iter->segment.to - << " based on " << speed_file; + util::Log(logWARNING) << "[weight updates] Edge weight update from " << old_secs + << "s to " << new_secs + << "s New speed: " << speed_iter->speed_source.speed << " kph" + << ". Old speed: " << approx_original_speed << " kph" + << ". Segment length: " << segment_length << " m" + << ". Segment: " << speed_iter->segment.from << "," + << speed_iter->segment.to << " based on " << speed_file; } } @@ -131,12 +132,12 @@ int Contractor::Run() if (config.core_factor > 1.0 || config.core_factor < 0) { - throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)"); + throw util::exception("Core factor must be between 0.0 to 1.0 (inclusive)" + SOURCE_REF); } TIMER_START(preparing); - util::SimpleLogger().Write() << "Loading edge-expanded graph representation"; + util::Log() << "Loading edge-expanded graph representation"; util::DeallocatingVector edge_based_edge_list; @@ -163,7 +164,7 @@ int Contractor::Run() ReadNodeLevels(node_levels); } - util::SimpleLogger().Write() << "Reading node weights."; + util::Log() << "Reading node weights."; std::vector node_weights; std::string node_file_name = config.osrm_input_path.string() + ".enw"; @@ -172,7 +173,7 @@ int Contractor::Run() storage::io::FileReader::VerifyFingerprint); node_file.DeserializeVector(node_weights); } - util::SimpleLogger().Write() << "Done reading node weights."; + util::Log() << "Done reading node weights."; util::DeallocatingVector contracted_edge_list; ContractGraph(max_edge_id, @@ -183,7 +184,7 @@ int Contractor::Run() node_levels); TIMER_STOP(contraction); - util::SimpleLogger().Write() << "Contraction took " << TIMER_SEC(contraction) << " sec"; + util::Log() << "Contraction took " << TIMER_SEC(contraction) << " sec"; std::size_t number_of_used_edges = WriteContractedGraph(max_edge_id, contracted_edge_list); WriteCoreNodeMarker(std::move(is_core_node)); @@ -199,11 +200,11 @@ int Contractor::Run() const auto edges_per_second = static_cast(number_of_used_edges / TIMER_SEC(contraction)); - util::SimpleLogger().Write() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; - util::SimpleLogger().Write() << "Contraction: " << nodes_per_second << " nodes/sec and " - << edges_per_second << " edges/sec"; + util::Log() << "Preprocessing : " << TIMER_SEC(preparing) << " seconds"; + util::Log() << "Contraction: " << nodes_per_second << " nodes/sec and " << edges_per_second + << " edges/sec"; - util::SimpleLogger().Write() << "finished preprocessing"; + util::Log() << "finished preprocessing"; return 0; } @@ -309,10 +310,13 @@ parse_segment_lookup_from_csv_files(const std::vector &segment_spee std::uint64_t to_node_id{}; unsigned speed{}; + std::size_t line_number = 0; + std::for_each( segment_speed_file_reader.GetLineIteratorBegin(), segment_speed_file_reader.GetLineIteratorEnd(), [&](const std::string &line) { + ++line_number; using namespace boost::spirit::qi; @@ -329,7 +333,11 @@ parse_segment_lookup_from_csv_files(const std::vector &segment_spee speed); // if (!ok || it != last) - throw util::exception{"Segment speed file " + filename + " malformed"}; + { + const std::string message{"Segment speed file " + filename + + " malformed on line " + std::to_string(line_number)}; + throw util::exception(message + SOURCE_REF); + } SegmentSpeedSource val{{OSMNodeID{from_node_id}, OSMNodeID{to_node_id}}, {speed, static_cast(file_id)}}; @@ -337,8 +345,7 @@ parse_segment_lookup_from_csv_files(const std::vector &segment_spee local.push_back(std::move(val)); }); - util::SimpleLogger().Write() << "Loaded speed file " << filename << " with " << local.size() - << " speeds"; + util::Log() << "Loaded speed file " << filename << " with " << local.size() << " speeds"; { Mutex::scoped_lock _{flatten_mutex}; @@ -349,7 +356,14 @@ parse_segment_lookup_from_csv_files(const std::vector &segment_spee } }; - tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file); + try + { + tbb::parallel_for(std::size_t{0}, segment_speed_filenames.size(), parse_segment_speed_file); + } + catch (const tbb::captured_exception &e) + { + throw util::exception(e.what() + SOURCE_REF); + } // With flattened map-ish view of all the files, sort and unique them on from,to,source // The greater '>' is used here since we want to give files later on higher precedence @@ -370,9 +384,8 @@ parse_segment_lookup_from_csv_files(const std::vector &segment_spee flatten.erase(it, end(flatten)); - util::SimpleLogger().Write() << "In total loaded " << segment_speed_filenames.size() - << " speed file(s) with a total of " << flatten.size() - << " unique values"; + util::Log() << "In total loaded " << segment_speed_filenames.size() + << " speed file(s) with a total of " << flatten.size() << " unique values"; return flatten; } @@ -399,10 +412,13 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector &turn_pe std::uint64_t to_node_id{}; double penalty{}; + std::size_t line_number = 0; + std::for_each( turn_penalty_file_reader.GetLineIteratorBegin(), turn_penalty_file_reader.GetLineIteratorEnd(), [&](const std::string &line) { + ++line_number; using namespace boost::spirit::qi; @@ -420,7 +436,11 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector &turn_pe penalty); // if (!ok || it != last) - throw util::exception{"Turn penalty file " + filename + " malformed"}; + { + const std::string message{"Turn penalty file " + filename + + " malformed on line " + std::to_string(line_number)}; + throw util::exception(message + SOURCE_REF); + } TurnPenaltySource val{ {OSMNodeID{from_node_id}, OSMNodeID{via_node_id}, OSMNodeID{to_node_id}}, @@ -428,8 +448,8 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector &turn_pe local.push_back(std::move(val)); }); - util::SimpleLogger().Write() << "Loaded penalty file " << filename << " with " - << local.size() << " turn penalties"; + util::Log() << "Loaded penalty file " << filename << " with " << local.size() + << " turn penalties"; { Mutex::scoped_lock _{flatten_mutex}; @@ -440,7 +460,14 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector &turn_pe } }; - tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file); + try + { + tbb::parallel_for(std::size_t{0}, turn_penalty_filenames.size(), parse_turn_penalty_file); + } + catch (const tbb::captured_exception &e) + { + throw util::exception(e.what() + SOURCE_REF); + } // With flattened map-ish view of all the files, sort and unique them on from,to,source // The greater '>' is used here since we want to give files later on higher precedence @@ -463,9 +490,8 @@ parse_turn_penalty_lookup_from_csv_files(const std::vector &turn_pe map.erase(it, end(map)); - util::SimpleLogger().Write() << "In total loaded " << turn_penalty_filenames.size() - << " turn penalty file(s) with a total of " << map.size() - << " unique values"; + util::Log() << "In total loaded " << turn_penalty_filenames.size() + << " turn penalty file(s) with a total of " << map.size() << " unique values"; return map; } @@ -486,9 +512,10 @@ EdgeID Contractor::LoadEdgeExpandedGraph( const double log_edge_updates_factor) { if (segment_speed_filenames.size() > 255 || turn_penalty_filenames.size() > 255) - throw util::exception("Limit of 255 segment speed and turn penalty files each reached"); + throw util::exception("Limit of 255 segment speed and turn penalty files each reached" + + SOURCE_REF); - util::SimpleLogger().Write() << "Opening " << edge_based_graph_filename; + util::Log() << "Opening " << edge_based_graph_filename; auto mmap_file = [](const std::string &filename) { using boost::interprocess::file_mapping; @@ -542,8 +569,7 @@ EdgeID Contractor::LoadEdgeExpandedGraph( graph_header.fingerprint.TestContractor(fingerprint_valid); edge_based_edge_list.resize(graph_header.number_of_edges); - util::SimpleLogger().Write() << "Reading " << graph_header.number_of_edges - << " edges from the edge based graph"; + util::Log() << "Reading " << graph_header.number_of_edges << " edges from the edge based graph"; SegmentSpeedSourceFlatMap segment_speed_lookup; TurnPenaltySourceFlatMap turn_penalty_lookup; @@ -735,15 +761,15 @@ EdgeID Contractor::LoadEdgeExpandedGraph( { if (i == LUA_SOURCE) { - util::SimpleLogger().Write() << "Used " << merged_counters[LUA_SOURCE] - << " speeds from LUA profile or input map"; + util::Log() << "Used " << merged_counters[LUA_SOURCE] + << " speeds from LUA profile or input map"; } else { // segments_speeds_counters has 0 as LUA, segment_speed_filenames not, thus we need // to susbstract 1 to avoid off-by-one error - util::SimpleLogger().Write() << "Used " << merged_counters[i] << " speeds from " - << segment_speed_filenames[i - 1]; + util::Log() << "Used " << merged_counters[i] << " speeds from " + << segment_speed_filenames[i - 1]; } } } @@ -756,7 +782,8 @@ EdgeID Contractor::LoadEdgeExpandedGraph( std::ofstream geometry_stream(geometry_filename, std::ios::binary); if (!geometry_stream) { - throw util::exception("Failed to open " + geometry_filename + " for writing"); + const std::string message{"Failed to open " + geometry_filename + " for writing"}; + throw util::exception(message + SOURCE_REF); } const unsigned number_of_indices = m_geometry_indices.size(); const unsigned number_of_compressed_geometries = m_geometry_node_list.size(); @@ -777,7 +804,9 @@ EdgeID Contractor::LoadEdgeExpandedGraph( std::ofstream datasource_stream(datasource_indexes_filename, std::ios::binary); if (!datasource_stream) { - throw util::exception("Failed to open " + datasource_indexes_filename + " for writing"); + const std::string message{"Failed to open " + datasource_indexes_filename + + " for writing"}; + throw util::exception(message + SOURCE_REF); } std::uint64_t number_of_datasource_entries = m_geometry_datasource.size(); datasource_stream.write(reinterpret_cast(&number_of_datasource_entries), @@ -793,7 +822,9 @@ EdgeID Contractor::LoadEdgeExpandedGraph( std::ofstream datasource_stream(datasource_names_filename, std::ios::binary); if (!datasource_stream) { - throw util::exception("Failed to open " + datasource_names_filename + " for writing"); + const std::string message{"Failed to open " + datasource_names_filename + + " for writing"}; + throw util::exception(message + SOURCE_REF); } datasource_stream << "lua profile" << std::endl; for (auto const &name : segment_speed_filenames) @@ -894,11 +925,11 @@ EdgeID Contractor::LoadEdgeExpandedGraph( if (new_turn_weight + new_weight < compressed_edge_nodes) { - util::SimpleLogger().Write(logWARNING) - << "turn penalty " << turn_iter->penalty_source.penalty << " for turn " - << penaltyblock->from_id << ", " << penaltyblock->via_id << ", " - << penaltyblock->to_id << " is too negative: clamping turn weight to " - << compressed_edge_nodes; + util::Log(logWARNING) << "turn penalty " << turn_iter->penalty_source.penalty + << " for turn " << penaltyblock->from_id << ", " + << penaltyblock->via_id << ", " << penaltyblock->to_id + << " is too negative: clamping turn weight to " + << compressed_edge_nodes; } inbuffer.weight = std::max(new_turn_weight + new_weight, compressed_edge_nodes); @@ -915,7 +946,7 @@ EdgeID Contractor::LoadEdgeExpandedGraph( edge_based_edge_list.emplace_back(std::move(inbuffer)); } - util::SimpleLogger().Write() << "Done reading edges"; + util::Log() << "Done reading edges"; return graph_header.max_edge_id; } @@ -964,8 +995,7 @@ Contractor::WriteContractedGraph(unsigned max_node_id, // Sorting contracted edges in a way that the static query graph can read some in in-place. tbb::parallel_sort(contracted_edge_list.begin(), contracted_edge_list.end()); const std::uint64_t contracted_edge_count = contracted_edge_list.size(); - util::SimpleLogger().Write() << "Serializing compacted graph of " << contracted_edge_count - << " edges"; + util::Log() << "Serializing compacted graph of " << contracted_edge_count << " edges"; const util::FingerPrint fingerprint = util::FingerPrint::GetValid(); boost::filesystem::ofstream hsgr_output_stream(config.graph_output_path, std::ios::binary); @@ -982,15 +1012,14 @@ Contractor::WriteContractedGraph(unsigned max_node_id, return tmp_max; }(); - util::SimpleLogger().Write(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes"; - util::SimpleLogger().Write(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) - << " nodes"; + util::Log(logDEBUG) << "input graph has " << (max_node_id + 1) << " nodes"; + util::Log(logDEBUG) << "contracted graph has " << (max_used_node_id + 1) << " nodes"; std::vector::NodeArrayEntry> node_array; // make sure we have at least one sentinel node_array.resize(max_node_id + 2); - util::SimpleLogger().Write() << "Building node array"; + util::Log() << "Building node array"; util::StaticGraph::EdgeIterator edge = 0; util::StaticGraph::EdgeIterator position = 0; util::StaticGraph::EdgeIterator last_edge; @@ -1014,11 +1043,11 @@ Contractor::WriteContractedGraph(unsigned max_node_id, node_array[sentinel_counter].first_edge = contracted_edge_count; } - util::SimpleLogger().Write() << "Serializing node array"; + util::Log() << "Serializing node array"; RangebasedCRC32 crc32_calculator; const unsigned edges_crc32 = crc32_calculator(contracted_edge_list); - util::SimpleLogger().Write() << "Writing CRC32: " << edges_crc32; + util::Log() << "Writing CRC32: " << edges_crc32; const std::uint64_t node_array_size = node_array.size(); // serialize crc32, aka checksum @@ -1036,7 +1065,7 @@ Contractor::WriteContractedGraph(unsigned max_node_id, } // serialize all edges - util::SimpleLogger().Write() << "Building edge array"; + util::Log() << "Building edge array"; std::size_t number_of_used_edges = 0; util::StaticGraph::EdgeArrayEntry current_edge; @@ -1055,15 +1084,15 @@ Contractor::WriteContractedGraph(unsigned max_node_id, #ifndef NDEBUG if (current_edge.data.weight <= 0) { - util::SimpleLogger().Write(logWARNING) - << "Edge: " << edge << ",source: " << contracted_edge_list[edge].source - << ", target: " << contracted_edge_list[edge].target - << ", weight: " << current_edge.data.weight; - - util::SimpleLogger().Write(logWARNING) << "Failed at adjacency list of node " - << contracted_edge_list[edge].source << "/" - << node_array.size() - 1; - return 1; + util::Log(logWARNING) << "Edge: " << edge + << ",source: " << contracted_edge_list[edge].source + << ", target: " << contracted_edge_list[edge].target + << ", weight: " << current_edge.data.weight; + + util::Log(logWARNING) << "Failed at adjacency list of node " + << contracted_edge_list[edge].source << "/" + << node_array.size() - 1; + throw util::exception("Edge weight is <= 0" + SOURCE_REF); } #endif hsgr_output_stream.write((char *)¤t_edge, diff --git a/src/engine/engine.cpp b/src/engine/engine.cpp index a34d1a12b04..9ebe9bf2abc 100644 --- a/src/engine/engine.cpp +++ b/src/engine/engine.cpp @@ -7,7 +7,7 @@ #include "engine/datafacade/shared_memory_datafacade.hpp" #include "storage/shared_barriers.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -68,7 +68,9 @@ Engine::Engine(const EngineConfig &config) if (!DataWatchdog::TryConnect()) { throw util::exception( - "No shared memory blocks found, have you forgotten to run osrm-datastore?"); + std::string( + "No shared memory blocks found, have you forgotten to run osrm-datastore?") + + SOURCE_REF); } watchdog = std::make_unique(); @@ -78,7 +80,7 @@ Engine::Engine(const EngineConfig &config) { if (!config.storage_config.IsValid()) { - throw util::exception("Invalid file paths given!"); + throw util::exception("Invalid file paths given!" + SOURCE_REF); } immutable_data_facade = std::make_shared(config.storage_config); diff --git a/src/extractor/compressed_edge_container.cpp b/src/extractor/compressed_edge_container.cpp index aa11da87d6e..8332e271204 100644 --- a/src/extractor/compressed_edge_container.cpp +++ b/src/extractor/compressed_edge_container.cpp @@ -1,5 +1,5 @@ #include "extractor/compressed_edge_container.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -300,14 +300,13 @@ void CompressedEdgeContainer::PrintStatistics() const longest_chain_length = std::max(longest_chain_length, (uint64_t)current_vector.size()); } - util::SimpleLogger().Write() - << "Geometry successfully removed:" - "\n compressed edges: " - << compressed_edges << "\n compressed geometries: " << compressed_geometries - << "\n longest chain length: " << longest_chain_length << "\n cmpr ratio: " - << ((float)compressed_edges / std::max(compressed_geometries, (uint64_t)1)) - << "\n avg chain length: " - << (float)compressed_geometries / std::max((uint64_t)1, compressed_edges); + util::Log() << "Geometry successfully removed:" + "\n compressed edges: " + << compressed_edges << "\n compressed geometries: " << compressed_geometries + << "\n longest chain length: " << longest_chain_length << "\n cmpr ratio: " + << ((float)compressed_edges / std::max(compressed_geometries, (uint64_t)1)) + << "\n avg chain length: " + << (float)compressed_geometries / std::max((uint64_t)1, compressed_edges); } const CompressedEdgeContainer::OnewayEdgeBucket & diff --git a/src/extractor/edge_based_graph_factory.cpp b/src/extractor/edge_based_graph_factory.cpp index 432b0b75c98..a2abaf21783 100644 --- a/src/extractor/edge_based_graph_factory.cpp +++ b/src/extractor/edge_based_graph_factory.cpp @@ -6,8 +6,8 @@ #include "util/exception.hpp" #include "util/guidance/turn_bearing.hpp" #include "util/integer_range.hpp" +#include "util/log.hpp" #include "util/percent.hpp" -#include "util/simple_logger.hpp" #include "util/timing_util.hpp" #include "extractor/guidance/toolkit.hpp" @@ -213,10 +213,10 @@ void EdgeBasedGraphFactory::Run(ScriptingEnvironment &scripting_environment, TIMER_STOP(generate_edges); - util::SimpleLogger().Write() << "Timing statistics for edge-expanded graph:"; - util::SimpleLogger().Write() << "Renumbering edges: " << TIMER_SEC(renumber) << "s"; - util::SimpleLogger().Write() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s"; - util::SimpleLogger().Write() << "Generating edges: " << TIMER_SEC(generate_edges) << "s"; + util::Log() << "Timing statistics for edge-expanded graph:"; + util::Log() << "Renumbering edges: " << TIMER_SEC(renumber) << "s"; + util::Log() << "Generating nodes: " << TIMER_SEC(generate_nodes) << "s"; + util::Log() << "Generating edges: " << TIMER_SEC(generate_edges) << "s"; } /// Renumbers all _forward_ edges and sets the edge_id. @@ -258,40 +258,44 @@ unsigned EdgeBasedGraphFactory::RenumberEdges() /// Creates the nodes in the edge expanded graph from edges in the node-based graph. void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes() { - util::Percent progress(m_node_based_graph->GetNumberOfNodes()); + util::Log() << "Generating edge expanded nodes ... "; + { + util::UnbufferedLog log; + util::Percent progress(log, m_node_based_graph->GetNumberOfNodes()); - m_compressed_edge_container.InitializeBothwayVector(); + m_compressed_edge_container.InitializeBothwayVector(); - // loop over all edges and generate new set of nodes - for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes())) - { - BOOST_ASSERT(node_u != SPECIAL_NODEID); - BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes()); - progress.PrintStatus(node_u); - for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) + // loop over all edges and generate new set of nodes + for (const auto node_u : util::irange(0u, m_node_based_graph->GetNumberOfNodes())) { - const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1); - BOOST_ASSERT(e1 != SPECIAL_EDGEID); - const NodeID node_v = m_node_based_graph->GetTarget(e1); - - BOOST_ASSERT(SPECIAL_NODEID != node_v); - // pick only every other edge, since we have every edge as an outgoing - // and incoming egde - if (node_u > node_v) + BOOST_ASSERT(node_u != SPECIAL_NODEID); + BOOST_ASSERT(node_u < m_node_based_graph->GetNumberOfNodes()); + progress.PrintStatus(node_u); + for (EdgeID e1 : m_node_based_graph->GetAdjacentEdgeRange(node_u)) { - continue; - } + const EdgeData &edge_data = m_node_based_graph->GetEdgeData(e1); + BOOST_ASSERT(e1 != SPECIAL_EDGEID); + const NodeID node_v = m_node_based_graph->GetTarget(e1); + + BOOST_ASSERT(SPECIAL_NODEID != node_v); + // pick only every other edge, since we have every edge as an outgoing + // and incoming egde + if (node_u > node_v) + { + continue; + } - BOOST_ASSERT(node_u < node_v); + BOOST_ASSERT(node_u < node_v); - // if we found a non-forward edge reverse and try again - if (edge_data.edge_id == SPECIAL_NODEID) - { - InsertEdgeBasedNode(node_v, node_u); - } - else - { - InsertEdgeBasedNode(node_u, node_v); + // if we found a non-forward edge reverse and try again + if (edge_data.edge_id == SPECIAL_NODEID) + { + InsertEdgeBasedNode(node_v, node_u); + } + else + { + InsertEdgeBasedNode(node_u, node_v); + } } } } @@ -299,8 +303,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedNodes() BOOST_ASSERT(m_edge_based_node_list.size() == m_edge_based_node_is_startpoint.size()); BOOST_ASSERT(m_max_edge_id + 1 == m_edge_based_node_weights.size()); - util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() - << " nodes in edge-expanded graph"; + util::Log() << "Generated " << m_edge_based_node_list.size() << " nodes in edge-expanded graph"; } /// Actually it also generates OriginalEdgeData and serializes them... @@ -312,7 +315,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( const std::string &edge_fixed_penalties_filename, const bool generate_edge_lookup) { - util::SimpleLogger().Write() << "generating edge-expanded edges"; + util::Log() << "Generating edge-expanded edges "; std::size_t node_based_edge_counter = 0; std::size_t original_edges_counter = 0; @@ -341,7 +344,6 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( // Loop over all turns and generate new set of edges. // Three nested loop look super-linear, but we are dealing with a (kind of) // linear number of turns only. - util::Percent progress(m_node_based_graph->GetNumberOfNodes()); SuffixTable street_name_suffix_table(scripting_environment); guidance::TurnAnalysis turn_analysis(*m_node_based_graph, m_node_info_list, @@ -363,260 +365,275 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( bearing_class_by_node_based_node.resize(m_node_based_graph->GetNumberOfNodes(), std::numeric_limits::max()); - // going over all nodes (which form the center of an intersection), we compute all - // possible turns along these intersections. - for (const auto node_at_center_of_intersection : - util::irange(0u, m_node_based_graph->GetNumberOfNodes())) { - progress.PrintStatus(node_at_center_of_intersection); - - const auto shape_result = - turn_analysis.ComputeIntersectionShapes(node_at_center_of_intersection); - - // all nodes in the graph are connected in both directions. We check all outgoing nodes to - // find the incoming edge. This is a larger search overhead, but the cost we need to pay to - // generate edges here is worth the additional search overhead. - // - // a -> b <-> c - // | - // v - // d - // - // will have: - // a: b,rev=0 - // b: a,rev=1 c,rev=0 d,rev=0 - // c: b,rev=0 - // - // From the flags alone, we cannot determine which nodes are connected to `b` by an outgoing - // edge. Therefore, we have to search all connected edges for edges entering `b` - for (const EdgeID outgoing_edge : - m_node_based_graph->GetAdjacentEdgeRange(node_at_center_of_intersection)) - { - const NodeID node_along_road_entering = m_node_based_graph->GetTarget(outgoing_edge); - - const auto incoming_edge = m_node_based_graph->FindEdge(node_along_road_entering, - node_at_center_of_intersection); + util::UnbufferedLog log; - if (m_node_based_graph->GetEdgeData(incoming_edge).reversed) - continue; - - ++node_based_edge_counter; - - auto intersection_with_flags_and_angles = - turn_analysis.GetIntersectionGenerator().TransformIntersectionShapeIntoView( - node_along_road_entering, - incoming_edge, - shape_result.normalised_intersection_shape, - shape_result.intersection_shape, - shape_result.merging_map); - - auto intersection = turn_analysis.AssignTurnTypes( - node_along_road_entering, incoming_edge, intersection_with_flags_and_angles); - - BOOST_ASSERT(intersection.valid()); - - intersection = turn_lane_handler.assignTurnLanes( - node_along_road_entering, incoming_edge, std::move(intersection)); - - // the entry class depends on the turn, so we have to classify the interesction for - // every edge - const auto turn_classification = classifyIntersection(intersection); - - const auto entry_class_id = [&](const util::guidance::EntryClass entry_class) { - if (0 == entry_class_hash.count(entry_class)) - { - const auto id = static_cast(entry_class_hash.size()); - entry_class_hash[entry_class] = id; - return id; - } - else - { - return entry_class_hash.find(entry_class)->second; - } - }(turn_classification.first); - - const auto bearing_class_id = [&](const util::guidance::BearingClass bearing_class) { - if (0 == bearing_class_hash.count(bearing_class)) - { - const auto id = static_cast(bearing_class_hash.size()); - bearing_class_hash[bearing_class] = id; - return id; - } - else - { - return bearing_class_hash.find(bearing_class)->second; - } - }(turn_classification.second); - bearing_class_by_node_based_node[node_at_center_of_intersection] = bearing_class_id; - - for (const auto &turn : intersection) + util::Percent progress(log, m_node_based_graph->GetNumberOfNodes()); + // going over all nodes (which form the center of an intersection), we compute all + // possible turns along these intersections. + for (const auto node_at_center_of_intersection : + util::irange(0u, m_node_based_graph->GetNumberOfNodes())) + { + progress.PrintStatus(node_at_center_of_intersection); + + const auto shape_result = + turn_analysis.ComputeIntersectionShapes(node_at_center_of_intersection); + + // all nodes in the graph are connected in both directions. We check all outgoing nodes + // to + // find the incoming edge. This is a larger search overhead, but the cost we need to pay + // to + // generate edges here is worth the additional search overhead. + // + // a -> b <-> c + // | + // v + // d + // + // will have: + // a: b,rev=0 + // b: a,rev=1 c,rev=0 d,rev=0 + // c: b,rev=0 + // + // From the flags alone, we cannot determine which nodes are connected to `b` by an + // outgoing + // edge. Therefore, we have to search all connected edges for edges entering `b` + for (const EdgeID outgoing_edge : + m_node_based_graph->GetAdjacentEdgeRange(node_at_center_of_intersection)) { - // only keep valid turns - if (!turn.entry_allowed) - continue; + const NodeID node_along_road_entering = + m_node_based_graph->GetTarget(outgoing_edge); - // only add an edge if turn is not prohibited - const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(incoming_edge); - const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(turn.eid); + const auto incoming_edge = m_node_based_graph->FindEdge( + node_along_road_entering, node_at_center_of_intersection); - BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id); - BOOST_ASSERT(!edge_data1.reversed); - BOOST_ASSERT(!edge_data2.reversed); + if (m_node_based_graph->GetEdgeData(incoming_edge).reversed) + continue; - // the following is the core of the loop. - unsigned distance = edge_data1.distance; - if (m_traffic_lights.find(node_at_center_of_intersection) != m_traffic_lights.end()) - { - distance += profile_properties.traffic_signal_penalty; - } + ++node_based_edge_counter; - const int32_t turn_penalty = - scripting_environment.GetTurnPenalty(180. - turn.angle); + auto intersection_with_flags_and_angles = + turn_analysis.GetIntersectionGenerator().TransformIntersectionShapeIntoView( + node_along_road_entering, + incoming_edge, + shape_result.normalised_intersection_shape, + shape_result.intersection_shape, + shape_result.merging_map); - const auto turn_instruction = turn.instruction; - if (turn_instruction.direction_modifier == guidance::DirectionModifier::UTurn) - { - distance += profile_properties.u_turn_penalty; - } + auto intersection = turn_analysis.AssignTurnTypes( + node_along_road_entering, incoming_edge, intersection_with_flags_and_angles); - // don't add turn penalty if it is not an actual turn. This heuristic is necessary - // since OSRM cannot handle looping roads/parallel roads - if (turn_instruction.type != guidance::TurnType::NoTurn) - distance += turn_penalty; - - const bool is_encoded_forwards = - m_compressed_edge_container.HasZippedEntryForForwardID(incoming_edge); - const bool is_encoded_backwards = - m_compressed_edge_container.HasZippedEntryForReverseID(incoming_edge); - BOOST_ASSERT(is_encoded_forwards || is_encoded_backwards); - if (is_encoded_forwards) - { - original_edge_data_vector.emplace_back( - GeometryID{m_compressed_edge_container.GetZippedPositionForForwardID( - incoming_edge), - true}, - edge_data1.name_id, - turn.lane_data_id, - turn_instruction, - entry_class_id, - edge_data1.travel_mode, - util::guidance::TurnBearing(intersection[0].bearing), - util::guidance::TurnBearing(turn.bearing)); - } - else if (is_encoded_backwards) - { - original_edge_data_vector.emplace_back( - GeometryID{m_compressed_edge_container.GetZippedPositionForReverseID( - incoming_edge), - false}, - edge_data1.name_id, - turn.lane_data_id, - turn_instruction, - entry_class_id, - edge_data1.travel_mode, - util::guidance::TurnBearing(intersection[0].bearing), - util::guidance::TurnBearing(turn.bearing)); - } + BOOST_ASSERT(intersection.valid()); - ++original_edges_counter; + intersection = turn_lane_handler.assignTurnLanes( + node_along_road_entering, incoming_edge, std::move(intersection)); - if (original_edge_data_vector.size() > 1024 * 1024 * 10) - { - FlushVectorToStream(edge_data_file, original_edge_data_vector); - } + // the entry class depends on the turn, so we have to classify the interesction for + // every edge + const auto turn_classification = classifyIntersection(intersection); - BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id); - BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id); - - // NOTE: potential overflow here if we hit 2^32 routable edges - BOOST_ASSERT(m_edge_based_edge_list.size() <= std::numeric_limits::max()); - m_edge_based_edge_list.emplace_back(edge_data1.edge_id, - edge_data2.edge_id, - m_edge_based_edge_list.size(), - distance, - true, - false); - BOOST_ASSERT(original_edges_counter == m_edge_based_edge_list.size()); - - // Here is where we write out the mapping between the edge-expanded edges, and - // the node-based edges that are originally used to calculate the `distance` - // for the edge-expanded edges. About 40 lines back, there is: - // - // unsigned distance = edge_data1.distance; - // - // This tells us that the weight for an edge-expanded-edge is based on the weight - // of the *source* node-based edge. Therefore, we will look up the individual - // segments of the source node-based edge, and write out a mapping between - // those and the edge-based-edge ID. - // External programs can then use this mapping to quickly perform - // updates to the edge-expanded-edge based directly on its ID. - if (generate_edge_lookup) + const auto entry_class_id = [&](const util::guidance::EntryClass entry_class) { + if (0 == entry_class_hash.count(entry_class)) + { + const auto id = static_cast(entry_class_hash.size()); + entry_class_hash[entry_class] = id; + return id; + } + else + { + return entry_class_hash.find(entry_class)->second; + } + }(turn_classification.first); + + const auto bearing_class_id = + [&](const util::guidance::BearingClass bearing_class) { + if (0 == bearing_class_hash.count(bearing_class)) + { + const auto id = static_cast(bearing_class_hash.size()); + bearing_class_hash[bearing_class] = id; + return id; + } + else + { + return bearing_class_hash.find(bearing_class)->second; + } + }(turn_classification.second); + bearing_class_by_node_based_node[node_at_center_of_intersection] = bearing_class_id; + + for (const auto &turn : intersection) { - const auto node_based_edges = - m_compressed_edge_container.GetBucketReference(incoming_edge); - NodeID previous = node_along_road_entering; - - const unsigned node_count = node_based_edges.size() + 1; - const QueryNode &first_node = m_node_info_list[previous]; + // only keep valid turns + if (!turn.entry_allowed) + continue; + + // only add an edge if turn is not prohibited + const EdgeData &edge_data1 = m_node_based_graph->GetEdgeData(incoming_edge); + const EdgeData &edge_data2 = m_node_based_graph->GetEdgeData(turn.eid); + + BOOST_ASSERT(edge_data1.edge_id != edge_data2.edge_id); + BOOST_ASSERT(!edge_data1.reversed); + BOOST_ASSERT(!edge_data2.reversed); + + // the following is the core of the loop. + unsigned distance = edge_data1.distance; + if (m_traffic_lights.find(node_at_center_of_intersection) != + m_traffic_lights.end()) + { + distance += profile_properties.traffic_signal_penalty; + } - lookup::SegmentHeaderBlock header = {node_count, first_node.node_id}; + const int32_t turn_penalty = + scripting_environment.GetTurnPenalty(180. - turn.angle); - edge_segment_file.write(reinterpret_cast(&header), - sizeof(header)); + const auto turn_instruction = turn.instruction; + if (turn_instruction.direction_modifier == guidance::DirectionModifier::UTurn) + { + distance += profile_properties.u_turn_penalty; + } - for (auto target_node : node_based_edges) + // don't add turn penalty if it is not an actual turn. This heuristic is + // necessary + // since OSRM cannot handle looping roads/parallel roads + if (turn_instruction.type != guidance::TurnType::NoTurn) + distance += turn_penalty; + + const bool is_encoded_forwards = + m_compressed_edge_container.HasZippedEntryForForwardID(incoming_edge); + const bool is_encoded_backwards = + m_compressed_edge_container.HasZippedEntryForReverseID(incoming_edge); + BOOST_ASSERT(is_encoded_forwards || is_encoded_backwards); + if (is_encoded_forwards) + { + original_edge_data_vector.emplace_back( + GeometryID{m_compressed_edge_container.GetZippedPositionForForwardID( + incoming_edge), + true}, + edge_data1.name_id, + turn.lane_data_id, + turn_instruction, + entry_class_id, + edge_data1.travel_mode, + util::guidance::TurnBearing(intersection[0].bearing), + util::guidance::TurnBearing(turn.bearing)); + } + else if (is_encoded_backwards) { - const QueryNode &from = m_node_info_list[previous]; - const QueryNode &to = m_node_info_list[target_node.node_id]; - const double segment_length = - util::coordinate_calculation::greatCircleDistance(from, to); + original_edge_data_vector.emplace_back( + GeometryID{m_compressed_edge_container.GetZippedPositionForReverseID( + incoming_edge), + false}, + edge_data1.name_id, + turn.lane_data_id, + turn_instruction, + entry_class_id, + edge_data1.travel_mode, + util::guidance::TurnBearing(intersection[0].bearing), + util::guidance::TurnBearing(turn.bearing)); + } - lookup::SegmentBlock nodeblock = { - to.node_id, segment_length, target_node.weight}; + ++original_edges_counter; - edge_segment_file.write(reinterpret_cast(&nodeblock), - sizeof(nodeblock)); - previous = target_node.node_id; + if (original_edge_data_vector.size() > 1024 * 1024 * 10) + { + FlushVectorToStream(edge_data_file, original_edge_data_vector); } - // We also now write out the mapping between the edge-expanded edges and the - // original nodes. Since each edge represents a possible maneuver, external - // programs can use this to quickly perform updates to edge weights in order - // to penalize certain turns. - - // If this edge is 'trivial' -- where the compressed edge corresponds - // exactly to an original OSM segment -- we can pull the turn's preceding - // node ID directly with `node_along_road_entering`; otherwise, we need to look - // up the node - // immediately preceding the turn from the compressed edge container. - const bool isTrivial = m_compressed_edge_container.IsTrivial(incoming_edge); - - const auto &from_node = - isTrivial - ? m_node_info_list[node_along_road_entering] - : m_node_info_list[m_compressed_edge_container.GetLastEdgeSourceID( - incoming_edge)]; - const auto &via_node = - m_node_info_list[m_compressed_edge_container.GetLastEdgeTargetID( - incoming_edge)]; - const auto &to_node = - m_node_info_list[m_compressed_edge_container.GetFirstEdgeTargetID( - turn.eid)]; - - const unsigned fixed_penalty = distance - edge_data1.distance; - lookup::PenaltyBlock penaltyblock = { - fixed_penalty, from_node.node_id, via_node.node_id, to_node.node_id}; - edge_penalty_file.write(reinterpret_cast(&penaltyblock), - sizeof(penaltyblock)); + BOOST_ASSERT(SPECIAL_NODEID != edge_data1.edge_id); + BOOST_ASSERT(SPECIAL_NODEID != edge_data2.edge_id); + + // NOTE: potential overflow here if we hit 2^32 routable edges + BOOST_ASSERT(m_edge_based_edge_list.size() <= + std::numeric_limits::max()); + m_edge_based_edge_list.emplace_back(edge_data1.edge_id, + edge_data2.edge_id, + m_edge_based_edge_list.size(), + distance, + true, + false); + BOOST_ASSERT(original_edges_counter == m_edge_based_edge_list.size()); + + // Here is where we write out the mapping between the edge-expanded edges, and + // the node-based edges that are originally used to calculate the `distance` + // for the edge-expanded edges. About 40 lines back, there is: + // + // unsigned distance = edge_data1.distance; + // + // This tells us that the weight for an edge-expanded-edge is based on the + // weight + // of the *source* node-based edge. Therefore, we will look up the individual + // segments of the source node-based edge, and write out a mapping between + // those and the edge-based-edge ID. + // External programs can then use this mapping to quickly perform + // updates to the edge-expanded-edge based directly on its ID. + if (generate_edge_lookup) + { + const auto node_based_edges = + m_compressed_edge_container.GetBucketReference(incoming_edge); + NodeID previous = node_along_road_entering; + + const unsigned node_count = node_based_edges.size() + 1; + const QueryNode &first_node = m_node_info_list[previous]; + + lookup::SegmentHeaderBlock header = {node_count, first_node.node_id}; + + edge_segment_file.write(reinterpret_cast(&header), + sizeof(header)); + + for (auto target_node : node_based_edges) + { + const QueryNode &from = m_node_info_list[previous]; + const QueryNode &to = m_node_info_list[target_node.node_id]; + const double segment_length = + util::coordinate_calculation::greatCircleDistance(from, to); + + lookup::SegmentBlock nodeblock = { + to.node_id, segment_length, target_node.weight}; + + edge_segment_file.write(reinterpret_cast(&nodeblock), + sizeof(nodeblock)); + previous = target_node.node_id; + } + + // We also now write out the mapping between the edge-expanded edges and the + // original nodes. Since each edge represents a possible maneuver, external + // programs can use this to quickly perform updates to edge weights in order + // to penalize certain turns. + + // If this edge is 'trivial' -- where the compressed edge corresponds + // exactly to an original OSM segment -- we can pull the turn's preceding + // node ID directly with `node_along_road_entering`; otherwise, we need to + // look + // up the node + // immediately preceding the turn from the compressed edge container. + const bool isTrivial = m_compressed_edge_container.IsTrivial(incoming_edge); + + const auto &from_node = + isTrivial + ? m_node_info_list[node_along_road_entering] + : m_node_info_list[m_compressed_edge_container.GetLastEdgeSourceID( + incoming_edge)]; + const auto &via_node = + m_node_info_list[m_compressed_edge_container.GetLastEdgeTargetID( + incoming_edge)]; + const auto &to_node = + m_node_info_list[m_compressed_edge_container.GetFirstEdgeTargetID( + turn.eid)]; + + const unsigned fixed_penalty = distance - edge_data1.distance; + lookup::PenaltyBlock penaltyblock = { + fixed_penalty, from_node.node_id, via_node.node_id, to_node.node_id}; + edge_penalty_file.write(reinterpret_cast(&penaltyblock), + sizeof(penaltyblock)); + } } } } } - util::SimpleLogger().Write() << "Created " << entry_class_hash.size() << " entry classes and " - << bearing_class_hash.size() << " Bearing Classes"; + util::Log() << "Created " << entry_class_hash.size() << " entry classes and " + << bearing_class_hash.size() << " Bearing Classes"; - util::SimpleLogger().Write() << "Writing Turn Lane Data to File..."; + util::Log() << "Writing Turn Lane Data to File..."; std::ofstream turn_lane_data_file(turn_lane_data_filename.c_str(), std::ios::binary); std::vector lane_data(lane_data_map.size()); // extract lane data sorted by ID @@ -630,7 +647,7 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( turn_lane_data_file.write(reinterpret_cast(&lane_data[0]), sizeof(util::guidance::LaneTupleIdPair) * lane_data.size()); - util::SimpleLogger().Write() << "done."; + util::Log() << "done."; FlushVectorToStream(edge_data_file, original_edge_data_vector); @@ -642,18 +659,15 @@ void EdgeBasedGraphFactory::GenerateEdgeExpandedEdges( edge_data_file.write(reinterpret_cast(&length_prefix), sizeof(length_prefix)); - util::SimpleLogger().Write() << "Generated " << m_edge_based_node_list.size() - << " edge based nodes"; - util::SimpleLogger().Write() << "Node-based graph contains " << node_based_edge_counter - << " edges"; - util::SimpleLogger().Write() << "Edge-expanded graph ..."; - util::SimpleLogger().Write() << " contains " << m_edge_based_edge_list.size() << " edges"; - util::SimpleLogger().Write() << " skips " << restricted_turns_counter << " turns, " - "defined by " - << m_restriction_map->size() << " restrictions"; - util::SimpleLogger().Write() << " skips " << skipped_uturns_counter << " U turns"; - util::SimpleLogger().Write() << " skips " << skipped_barrier_turns_counter - << " turns over barriers"; + util::Log() << "Generated " << m_edge_based_node_list.size() << " edge based nodes"; + util::Log() << "Node-based graph contains " << node_based_edge_counter << " edges"; + util::Log() << "Edge-expanded graph ..."; + util::Log() << " contains " << m_edge_based_edge_list.size() << " edges"; + util::Log() << " skips " << restricted_turns_counter << " turns, " + "defined by " + << m_restriction_map->size() << " restrictions"; + util::Log() << " skips " << skipped_uturns_counter << " U turns"; + util::Log() << " skips " << skipped_barrier_turns_counter << " turns over barriers"; } std::vector EdgeBasedGraphFactory::GetBearingClasses() const diff --git a/src/extractor/extraction_containers.cpp b/src/extractor/extraction_containers.cpp index 04fa2055215..bb4a2a6c042 100644 --- a/src/extractor/extraction_containers.cpp +++ b/src/extractor/extraction_containers.cpp @@ -5,9 +5,10 @@ #include "util/range_table.hpp" #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/fingerprint.hpp" #include "util/io.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/timing_util.hpp" #include @@ -21,6 +22,7 @@ #include #include #include +#include namespace { @@ -170,7 +172,8 @@ void ExtractionContainers::PrepareData(ScriptingEnvironment &scripting_environme void ExtractionContainers::WriteCharData(const std::string &file_name) { - std::cout << "[extractor] writing street name index ... " << std::flush; + util::UnbufferedLog log; + log << "writing street name index ... "; TIMER_START(write_index); boost::filesystem::ofstream file_stream(file_name, std::ios::binary); @@ -211,254 +214,289 @@ void ExtractionContainers::WriteCharData(const std::string &file_name) file_stream.write(write_buffer, buffer_len); TIMER_STOP(write_index); - std::cout << "ok, after " << TIMER_SEC(write_index) << "s" << std::endl; + log << "ok, after " << TIMER_SEC(write_index) << "s"; } void ExtractionContainers::PrepareNodes() { - std::cout << "[extractor] Sorting used nodes ... " << std::flush; - TIMER_START(sorting_used_nodes); - stxxl::sort( - used_node_id_list.begin(), used_node_id_list.end(), OSMNodeIDSTXXLLess(), stxxl_memory); - TIMER_STOP(sorting_used_nodes); - std::cout << "ok, after " << TIMER_SEC(sorting_used_nodes) << "s" << std::endl; - - std::cout << "[extractor] Erasing duplicate nodes ... " << std::flush; - TIMER_START(erasing_dups); - auto new_end = std::unique(used_node_id_list.begin(), used_node_id_list.end()); - used_node_id_list.resize(new_end - used_node_id_list.begin()); - TIMER_STOP(erasing_dups); - std::cout << "ok, after " << TIMER_SEC(erasing_dups) << "s" << std::endl; - - std::cout << "[extractor] Sorting all nodes ... " << std::flush; - TIMER_START(sorting_nodes); - stxxl::sort(all_nodes_list.begin(), - all_nodes_list.end(), - ExternalMemoryNodeSTXXLCompare(), - stxxl_memory); - TIMER_STOP(sorting_nodes); - std::cout << "ok, after " << TIMER_SEC(sorting_nodes) << "s" << std::endl; - - std::cout << "[extractor] Building node id map ... " << std::flush; - TIMER_START(id_map); - external_to_internal_node_id_map.reserve(used_node_id_list.size()); - auto node_iter = all_nodes_list.begin(); - auto ref_iter = used_node_id_list.begin(); - const auto all_nodes_list_end = all_nodes_list.end(); - const auto used_node_id_list_end = used_node_id_list.end(); - // Note: despite being able to handle 64 bit OSM node ids, we can't - // handle > uint32_t actual usable nodes. This should be OK for a while - // because we usually route on a *lot* less than 2^32 of the OSM - // graph nodes. - std::uint64_t internal_id = 0; - - // compute the intersection of nodes that were referenced and nodes we actually have - while (node_iter != all_nodes_list_end && ref_iter != used_node_id_list_end) { - if (node_iter->node_id < *ref_iter) + util::UnbufferedLog log; + log << "Sorting used nodes ... " << std::flush; + TIMER_START(sorting_used_nodes); + stxxl::sort( + used_node_id_list.begin(), used_node_id_list.end(), OSMNodeIDSTXXLLess(), stxxl_memory); + TIMER_STOP(sorting_used_nodes); + log << "ok, after " << TIMER_SEC(sorting_used_nodes) << "s"; + } + + { + util::UnbufferedLog log; + log << "Erasing duplicate nodes ... " << std::flush; + TIMER_START(erasing_dups); + auto new_end = std::unique(used_node_id_list.begin(), used_node_id_list.end()); + used_node_id_list.resize(new_end - used_node_id_list.begin()); + TIMER_STOP(erasing_dups); + log << "ok, after " << TIMER_SEC(erasing_dups) << "s"; + } + + { + util::UnbufferedLog log; + log << "Sorting all nodes ... " << std::flush; + TIMER_START(sorting_nodes); + stxxl::sort(all_nodes_list.begin(), + all_nodes_list.end(), + ExternalMemoryNodeSTXXLCompare(), + stxxl_memory); + TIMER_STOP(sorting_nodes); + log << "ok, after " << TIMER_SEC(sorting_nodes) << "s"; + } + + { + util::UnbufferedLog log; + log << "Building node id map ... " << std::flush; + TIMER_START(id_map); + external_to_internal_node_id_map.reserve(used_node_id_list.size()); + auto node_iter = all_nodes_list.begin(); + auto ref_iter = used_node_id_list.begin(); + const auto all_nodes_list_end = all_nodes_list.end(); + const auto used_node_id_list_end = used_node_id_list.end(); + // Note: despite being able to handle 64 bit OSM node ids, we can't + // handle > uint32_t actual usable nodes. This should be OK for a while + // because we usually route on a *lot* less than 2^32 of the OSM + // graph nodes. + std::uint64_t internal_id = 0; + + // compute the intersection of nodes that were referenced and nodes we actually have + while (node_iter != all_nodes_list_end && ref_iter != used_node_id_list_end) { + if (node_iter->node_id < *ref_iter) + { + node_iter++; + continue; + } + if (node_iter->node_id > *ref_iter) + { + ref_iter++; + continue; + } + BOOST_ASSERT(node_iter->node_id == *ref_iter); + external_to_internal_node_id_map[*ref_iter] = static_cast(internal_id++); node_iter++; - continue; + ref_iter++; } - if (node_iter->node_id > *ref_iter) + if (internal_id > std::numeric_limits::max()) { - ref_iter++; - continue; + throw util::exception("There are too many nodes remaining after filtering, OSRM only " + "supports 2^32 unique nodes, but there were " + + std::to_string(internal_id) + SOURCE_REF); } - BOOST_ASSERT(node_iter->node_id == *ref_iter); - external_to_internal_node_id_map[*ref_iter] = static_cast(internal_id++); - node_iter++; - ref_iter++; + max_internal_node_id = boost::numeric_cast(internal_id); + TIMER_STOP(id_map); + log << "ok, after " << TIMER_SEC(id_map) << "s"; } - if (internal_id > std::numeric_limits::max()) - { - throw util::exception("There are too many nodes remaining after filtering, OSRM only " - "supports 2^32 unique nodes"); - } - max_internal_node_id = boost::numeric_cast(internal_id); - TIMER_STOP(id_map); - std::cout << "ok, after " << TIMER_SEC(id_map) << "s" << std::endl; } void ExtractionContainers::PrepareEdges(ScriptingEnvironment &scripting_environment) { // Sort edges by start. - std::cout << "[extractor] Sorting edges by start ... " << std::flush; - TIMER_START(sort_edges_by_start); - stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMStartID(), stxxl_memory); - TIMER_STOP(sort_edges_by_start); - std::cout << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s" << std::endl; - - std::cout << "[extractor] Setting start coords ... " << std::flush; - TIMER_START(set_start_coords); - // Traverse list of edges and nodes in parallel and set start coord - auto node_iterator = all_nodes_list.begin(); - auto edge_iterator = all_edges_list.begin(); - - const auto all_edges_list_end = all_edges_list.end(); - const auto all_nodes_list_end = all_nodes_list.end(); - - while (edge_iterator != all_edges_list_end && node_iterator != all_nodes_list_end) { - if (edge_iterator->result.osm_source_id < node_iterator->node_id) - { - util::SimpleLogger().Write(LogLevel::logDEBUG) << "Found invalid node reference " - << edge_iterator->result.source; - edge_iterator->result.source = SPECIAL_NODEID; - ++edge_iterator; - continue; - } - if (edge_iterator->result.osm_source_id > node_iterator->node_id) - { - node_iterator++; - continue; - } + util::UnbufferedLog log; + log << "Sorting edges by start ... " << std::flush; + TIMER_START(sort_edges_by_start); + stxxl::sort( + all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMStartID(), stxxl_memory); + TIMER_STOP(sort_edges_by_start); + log << "ok, after " << TIMER_SEC(sort_edges_by_start) << "s"; + } - // remove loops - if (edge_iterator->result.osm_source_id == edge_iterator->result.osm_target_id) - { - edge_iterator->result.source = SPECIAL_NODEID; - edge_iterator->result.target = SPECIAL_NODEID; - ++edge_iterator; - continue; - } + { + util::UnbufferedLog log; + log << "Setting start coords ... " << std::flush; + TIMER_START(set_start_coords); + // Traverse list of edges and nodes in parallel and set start coord + auto node_iterator = all_nodes_list.begin(); + auto edge_iterator = all_edges_list.begin(); - BOOST_ASSERT(edge_iterator->result.osm_source_id == node_iterator->node_id); + const auto all_edges_list_end = all_edges_list.end(); + const auto all_nodes_list_end = all_nodes_list.end(); - // assign new node id - auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id); - BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); - edge_iterator->result.source = id_iter->second; + while (edge_iterator != all_edges_list_end && node_iterator != all_nodes_list_end) + { + if (edge_iterator->result.osm_source_id < node_iterator->node_id) + { + util::Log(logDEBUG) << "Found invalid node reference " + << edge_iterator->result.source; + edge_iterator->result.source = SPECIAL_NODEID; + ++edge_iterator; + continue; + } + if (edge_iterator->result.osm_source_id > node_iterator->node_id) + { + node_iterator++; + continue; + } - edge_iterator->source_coordinate.lat = node_iterator->lat; - edge_iterator->source_coordinate.lon = node_iterator->lon; - ++edge_iterator; - } + // remove loops + if (edge_iterator->result.osm_source_id == edge_iterator->result.osm_target_id) + { + edge_iterator->result.source = SPECIAL_NODEID; + edge_iterator->result.target = SPECIAL_NODEID; + ++edge_iterator; + continue; + } - // Remove all remaining edges. They are invalid because there are no corresponding nodes for - // them. This happens when using osmosis with bbox or polygon to extract smaller areas. - auto markSourcesInvalid = [](InternalExtractorEdge &edge) { - util::SimpleLogger().Write(LogLevel::logDEBUG) << "Found invalid node reference " - << edge.result.source; - edge.result.source = SPECIAL_NODEID; - edge.result.osm_source_id = SPECIAL_OSM_NODEID; - }; - std::for_each(edge_iterator, all_edges_list_end, markSourcesInvalid); - TIMER_STOP(set_start_coords); - std::cout << "ok, after " << TIMER_SEC(set_start_coords) << "s" << std::endl; - - // Sort Edges by target - std::cout << "[extractor] Sorting edges by target ... " << std::flush; - TIMER_START(sort_edges_by_target); - stxxl::sort(all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMTargetID(), stxxl_memory); - TIMER_STOP(sort_edges_by_target); - std::cout << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s" << std::endl; - - // Compute edge weights - std::cout << "[extractor] Computing edge weights ... " << std::flush; - TIMER_START(compute_weights); - node_iterator = all_nodes_list.begin(); - edge_iterator = all_edges_list.begin(); - const auto all_edges_list_end_ = all_edges_list.end(); - const auto all_nodes_list_end_ = all_nodes_list.end(); - - while (edge_iterator != all_edges_list_end_ && node_iterator != all_nodes_list_end_) - { - // skip all invalid edges - if (edge_iterator->result.source == SPECIAL_NODEID) - { - ++edge_iterator; - continue; - } + BOOST_ASSERT(edge_iterator->result.osm_source_id == node_iterator->node_id); - if (edge_iterator->result.osm_target_id < node_iterator->node_id) - { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Found invalid node reference " - << static_cast(edge_iterator->result.osm_target_id); - edge_iterator->result.target = SPECIAL_NODEID; + // assign new node id + auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id); + BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); + edge_iterator->result.source = id_iter->second; + + edge_iterator->source_coordinate.lat = node_iterator->lat; + edge_iterator->source_coordinate.lon = node_iterator->lon; ++edge_iterator; - continue; - } - if (edge_iterator->result.osm_target_id > node_iterator->node_id) - { - ++node_iterator; - continue; } - BOOST_ASSERT(edge_iterator->result.osm_target_id == node_iterator->node_id); - BOOST_ASSERT(edge_iterator->weight_data.speed >= 0); - BOOST_ASSERT(edge_iterator->source_coordinate.lat != - util::FixedLatitude{std::numeric_limits::min()}); - BOOST_ASSERT(edge_iterator->source_coordinate.lon != - util::FixedLongitude{std::numeric_limits::min()}); - - const double distance = util::coordinate_calculation::greatCircleDistance( - edge_iterator->source_coordinate, - util::Coordinate(node_iterator->lon, node_iterator->lat)); + // Remove all remaining edges. They are invalid because there are no corresponding nodes for + // them. This happens when using osmosis with bbox or polygon to extract smaller areas. + auto markSourcesInvalid = [](InternalExtractorEdge &edge) { + util::Log(logDEBUG) << "Found invalid node reference " << edge.result.source; + edge.result.source = SPECIAL_NODEID; + edge.result.osm_source_id = SPECIAL_OSM_NODEID; + }; + std::for_each(edge_iterator, all_edges_list_end, markSourcesInvalid); + TIMER_STOP(set_start_coords); + log << "ok, after " << TIMER_SEC(set_start_coords) << "s"; + } - scripting_environment.ProcessSegment( - edge_iterator->source_coordinate, *node_iterator, distance, edge_iterator->weight_data); + { + // Sort Edges by target + util::UnbufferedLog log; + log << "Sorting edges by target ... " << std::flush; + TIMER_START(sort_edges_by_target); + stxxl::sort( + all_edges_list.begin(), all_edges_list.end(), CmpEdgeByOSMTargetID(), stxxl_memory); + TIMER_STOP(sort_edges_by_target); + log << "ok, after " << TIMER_SEC(sort_edges_by_target) << "s"; + } - const double weight = [distance](const InternalExtractorEdge::WeightData &data) { - switch (data.type) + { + // Compute edge weights + util::UnbufferedLog log; + log << "Computing edge weights ... " << std::flush; + TIMER_START(compute_weights); + auto node_iterator = all_nodes_list.begin(); + auto edge_iterator = all_edges_list.begin(); + const auto all_edges_list_end_ = all_edges_list.end(); + const auto all_nodes_list_end_ = all_nodes_list.end(); + + while (edge_iterator != all_edges_list_end_ && node_iterator != all_nodes_list_end_) + { + // skip all invalid edges + if (edge_iterator->result.source == SPECIAL_NODEID) { - case InternalExtractorEdge::WeightType::EDGE_DURATION: - case InternalExtractorEdge::WeightType::WAY_DURATION: - return data.duration * 10.; - break; - case InternalExtractorEdge::WeightType::SPEED: - return (distance * 10.) / (data.speed / 3.6); - break; - case InternalExtractorEdge::WeightType::INVALID: - util::exception("invalid weight type"); + ++edge_iterator; + continue; } - return -1.0; - }(edge_iterator->weight_data); - - auto &edge = edge_iterator->result; - edge.weight = std::max(1, static_cast(std::floor(weight + .5))); - // assign new node id - auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id); - BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); - edge.target = id_iter->second; + if (edge_iterator->result.osm_target_id < node_iterator->node_id) + { + util::Log(logDEBUG) << "Found invalid node reference " + << static_cast(edge_iterator->result.osm_target_id); + edge_iterator->result.target = SPECIAL_NODEID; + ++edge_iterator; + continue; + } + if (edge_iterator->result.osm_target_id > node_iterator->node_id) + { + ++node_iterator; + continue; + } - // orient edges consistently: source id < target id - // important for multi-edge removal - if (edge.source > edge.target) - { - std::swap(edge.source, edge.target); + BOOST_ASSERT(edge_iterator->result.osm_target_id == node_iterator->node_id); + BOOST_ASSERT(edge_iterator->weight_data.speed >= 0); + BOOST_ASSERT(edge_iterator->source_coordinate.lat != + util::FixedLatitude{std::numeric_limits::min()}); + BOOST_ASSERT(edge_iterator->source_coordinate.lon != + util::FixedLongitude{std::numeric_limits::min()}); + + const double distance = util::coordinate_calculation::greatCircleDistance( + edge_iterator->source_coordinate, + util::Coordinate(node_iterator->lon, node_iterator->lat)); + + scripting_environment.ProcessSegment(edge_iterator->source_coordinate, + *node_iterator, + distance, + edge_iterator->weight_data); + + const double weight = [distance, edge_iterator, node_iterator]( + const InternalExtractorEdge::WeightData &data) { + switch (data.type) + { + case InternalExtractorEdge::WeightType::EDGE_DURATION: + case InternalExtractorEdge::WeightType::WAY_DURATION: + return data.duration * 10.; + break; + case InternalExtractorEdge::WeightType::SPEED: + return (distance * 10.) / (data.speed / 3.6); + break; + case InternalExtractorEdge::WeightType::INVALID: + std::stringstream coordstring; + coordstring << edge_iterator->source_coordinate << " to " << node_iterator->lon + << "," << node_iterator->lat; + util::exception("Encountered invalid weight at segment " + coordstring.str() + + SOURCE_REF); + } + return -1.0; + }(edge_iterator->weight_data); + + auto &edge = edge_iterator->result; + edge.weight = std::max(1, static_cast(std::floor(weight + .5))); + + // assign new node id + auto id_iter = external_to_internal_node_id_map.find(node_iterator->node_id); + BOOST_ASSERT(id_iter != external_to_internal_node_id_map.end()); + edge.target = id_iter->second; + + // orient edges consistently: source id < target id + // important for multi-edge removal + if (edge.source > edge.target) + { + std::swap(edge.source, edge.target); - // std::swap does not work with bit-fields - bool temp = edge.forward; - edge.forward = edge.backward; - edge.backward = temp; + // std::swap does not work with bit-fields + bool temp = edge.forward; + edge.forward = edge.backward; + edge.backward = temp; + } + ++edge_iterator; } - ++edge_iterator; - } - // Remove all remaining edges. They are invalid because there are no corresponding nodes for - // them. This happens when using osmosis with bbox or polygon to extract smaller areas. - auto markTargetsInvalid = [](InternalExtractorEdge &edge) { - util::SimpleLogger().Write(LogLevel::logDEBUG) << "Found invalid node reference " - << edge.result.target; - edge.result.target = SPECIAL_NODEID; - }; - std::for_each(edge_iterator, all_edges_list_end_, markTargetsInvalid); - TIMER_STOP(compute_weights); - std::cout << "ok, after " << TIMER_SEC(compute_weights) << "s" << std::endl; + // Remove all remaining edges. They are invalid because there are no corresponding nodes for + // them. This happens when using osmosis with bbox or polygon to extract smaller areas. + auto markTargetsInvalid = [](InternalExtractorEdge &edge) { + util::Log(logDEBUG) << "Found invalid node reference " << edge.result.target; + edge.result.target = SPECIAL_NODEID; + }; + std::for_each(edge_iterator, all_edges_list_end_, markTargetsInvalid); + TIMER_STOP(compute_weights); + log << "ok, after " << TIMER_SEC(compute_weights) << "s"; + } // Sort edges by start. - std::cout << "[extractor] Sorting edges by renumbered start ... " << std::flush; - TIMER_START(sort_edges_by_renumbered_start); - std::mutex name_data_mutex; - stxxl::sort(all_edges_list.begin(), - all_edges_list.end(), - CmpEdgeByInternalSourceTargetAndName{name_data_mutex, name_char_data, name_offsets}, - stxxl_memory); - TIMER_STOP(sort_edges_by_renumbered_start); - std::cout << "ok, after " << TIMER_SEC(sort_edges_by_renumbered_start) << "s" << std::endl; + { + util::UnbufferedLog log; + log << "Sorting edges by renumbered start ... "; + TIMER_START(sort_edges_by_renumbered_start); + std::mutex name_data_mutex; + stxxl::sort( + all_edges_list.begin(), + all_edges_list.end(), + CmpEdgeByInternalSourceTargetAndName{name_data_mutex, name_char_data, name_offsets}, + stxxl_memory); + TIMER_STOP(sort_edges_by_renumbered_start); + log << "ok, after " << TIMER_SEC(sort_edges_by_renumbered_start) << "s"; + } BOOST_ASSERT(all_edges_list.size() > 0); for (unsigned i = 0; i < all_edges_list.size();) @@ -553,85 +591,101 @@ void ExtractionContainers::PrepareEdges(ScriptingEnvironment &scripting_environm void ExtractionContainers::WriteEdges(std::ofstream &file_out_stream) const { - std::cout << "[extractor] Writing used edges ... " << std::flush; - TIMER_START(write_edges); - // Traverse list of edges and nodes in parallel and set target coord + + std::size_t start_position = 0; std::uint64_t used_edges_counter = 0; std::uint32_t used_edges_counter_buffer = 0; + { + util::UnbufferedLog log; + log << "Writing used edges ... " << std::flush; + TIMER_START(write_edges); + // Traverse list of edges and nodes in parallel and set target coord - auto start_position = file_out_stream.tellp(); - file_out_stream.write((char *)&used_edges_counter_buffer, sizeof(used_edges_counter_buffer)); + start_position = file_out_stream.tellp(); + file_out_stream.write((char *)&used_edges_counter_buffer, + sizeof(used_edges_counter_buffer)); - for (const auto &edge : all_edges_list) - { - if (edge.result.source == SPECIAL_NODEID || edge.result.target == SPECIAL_NODEID) + for (const auto &edge : all_edges_list) { - continue; + if (edge.result.source == SPECIAL_NODEID || edge.result.target == SPECIAL_NODEID) + { + continue; + } + + // IMPORTANT: here, we're using slicing to only write the data from the base + // class of NodeBasedEdgeWithOSM + NodeBasedEdge tmp = edge.result; + file_out_stream.write((char *)&tmp, sizeof(NodeBasedEdge)); + used_edges_counter++; } - // IMPORTANT: here, we're using slicing to only write the data from the base - // class of NodeBasedEdgeWithOSM - NodeBasedEdge tmp = edge.result; - file_out_stream.write((char *)&tmp, sizeof(NodeBasedEdge)); - used_edges_counter++; + if (used_edges_counter > std::numeric_limits::max()) + { + throw util::exception("There are too many edges, OSRM only supports 2^32" + SOURCE_REF); + } + TIMER_STOP(write_edges); + log << "ok, after " << TIMER_SEC(write_edges) << "s"; } - if (used_edges_counter > std::numeric_limits::max()) { - throw util::exception("There are too many edges, OSRM only supports 2^32"); - } - TIMER_STOP(write_edges); - std::cout << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl; - - std::cout << "[extractor] setting number of edges ... " << std::flush; + util::UnbufferedLog log; + log << "setting number of edges ... " << std::flush; - used_edges_counter_buffer = boost::numeric_cast(used_edges_counter); + used_edges_counter_buffer = boost::numeric_cast(used_edges_counter); - file_out_stream.seekp(start_position); - file_out_stream.write((char *)&used_edges_counter_buffer, sizeof(used_edges_counter_buffer)); - std::cout << "ok" << std::endl; + file_out_stream.seekp(start_position); + file_out_stream.write((char *)&used_edges_counter_buffer, + sizeof(used_edges_counter_buffer)); + log << "ok"; + } - util::SimpleLogger().Write() << "Processed " << used_edges_counter << " edges"; + util::Log() << "Processed " << used_edges_counter << " edges"; } void ExtractionContainers::WriteNodes(std::ofstream &file_out_stream) const { - // write dummy value, will be overwritten later - std::cout << "[extractor] setting number of nodes ... " << std::flush; - file_out_stream.write((char *)&max_internal_node_id, sizeof(unsigned)); - std::cout << "ok" << std::endl; - - std::cout << "[extractor] Confirming/Writing used nodes ... " << std::flush; - TIMER_START(write_nodes); - // identify all used nodes by a merging step of two sorted lists - auto node_iterator = all_nodes_list.begin(); - auto node_id_iterator = used_node_id_list.begin(); - const auto used_node_id_list_end = used_node_id_list.end(); - const auto all_nodes_list_end = all_nodes_list.end(); - - while (node_id_iterator != used_node_id_list_end && node_iterator != all_nodes_list_end) { - if (*node_id_iterator < node_iterator->node_id) + // write dummy value, will be overwritten later + util::UnbufferedLog log; + log << "setting number of nodes ... " << std::flush; + file_out_stream.write((char *)&max_internal_node_id, sizeof(unsigned)); + log << "ok"; + } + + { + util::UnbufferedLog log; + log << "Confirming/Writing used nodes ... "; + TIMER_START(write_nodes); + // identify all used nodes by a merging step of two sorted lists + auto node_iterator = all_nodes_list.begin(); + auto node_id_iterator = used_node_id_list.begin(); + const auto used_node_id_list_end = used_node_id_list.end(); + const auto all_nodes_list_end = all_nodes_list.end(); + + while (node_id_iterator != used_node_id_list_end && node_iterator != all_nodes_list_end) { + if (*node_id_iterator < node_iterator->node_id) + { + ++node_id_iterator; + continue; + } + if (*node_id_iterator > node_iterator->node_id) + { + ++node_iterator; + continue; + } + BOOST_ASSERT(*node_id_iterator == node_iterator->node_id); + + file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode)); + ++node_id_iterator; - continue; - } - if (*node_id_iterator > node_iterator->node_id) - { ++node_iterator; - continue; } - BOOST_ASSERT(*node_id_iterator == node_iterator->node_id); - - file_out_stream.write((char *)&(*node_iterator), sizeof(ExternalMemoryNode)); - - ++node_id_iterator; - ++node_iterator; + TIMER_STOP(write_nodes); + log << "ok, after " << TIMER_SEC(write_nodes) << "s"; } - TIMER_STOP(write_nodes); - std::cout << "ok, after " << TIMER_SEC(write_nodes) << "s" << std::endl; - util::SimpleLogger().Write() << "Processed " << max_internal_node_id << " nodes"; + util::Log() << "Processed " << max_internal_node_id << " nodes"; } void ExtractionContainers::WriteRestrictions(const std::string &path) const @@ -658,203 +712,210 @@ void ExtractionContainers::WriteRestrictions(const std::string &path) const } restrictions_out_stream.seekp(count_position); restrictions_out_stream.write((char *)&written_restriction_count, sizeof(unsigned)); - util::SimpleLogger().Write() << "usable restrictions: " << written_restriction_count; + util::Log() << "usable restrictions: " << written_restriction_count; } void ExtractionContainers::PrepareRestrictions() { - std::cout << "[extractor] Sorting used ways ... " << std::flush; - TIMER_START(sort_ways); - stxxl::sort(way_start_end_id_list.begin(), - way_start_end_id_list.end(), - FirstAndLastSegmentOfWayStxxlCompare(), - stxxl_memory); - TIMER_STOP(sort_ways); - std::cout << "ok, after " << TIMER_SEC(sort_ways) << "s" << std::endl; - - std::cout << "[extractor] Sorting " << restrictions_list.size() << " restriction. by from... " - << std::flush; - TIMER_START(sort_restrictions); - stxxl::sort(restrictions_list.begin(), - restrictions_list.end(), - CmpRestrictionContainerByFrom(), - stxxl_memory); - TIMER_STOP(sort_restrictions); - std::cout << "ok, after " << TIMER_SEC(sort_restrictions) << "s" << std::endl; - - std::cout << "[extractor] Fixing restriction starts ... " << std::flush; - TIMER_START(fix_restriction_starts); - auto restrictions_iterator = restrictions_list.begin(); - auto way_start_and_end_iterator = way_start_end_id_list.cbegin(); - const auto restrictions_list_end = restrictions_list.end(); - const auto way_start_end_id_list_end = way_start_end_id_list.cend(); - - while (way_start_and_end_iterator != way_start_end_id_list_end && - restrictions_iterator != restrictions_list_end) { - if (way_start_and_end_iterator->way_id < - OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}) - { - ++way_start_and_end_iterator; - continue; - } - - if (way_start_and_end_iterator->way_id > - OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}) - { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Restriction references invalid way: " - << restrictions_iterator->restriction.from.way; - restrictions_iterator->restriction.from.node = SPECIAL_NODEID; - ++restrictions_iterator; - continue; - } + util::UnbufferedLog log; + log << "Sorting used ways ... "; + TIMER_START(sort_ways); + stxxl::sort(way_start_end_id_list.begin(), + way_start_end_id_list.end(), + FirstAndLastSegmentOfWayStxxlCompare(), + stxxl_memory); + TIMER_STOP(sort_ways); + log << "ok, after " << TIMER_SEC(sort_ways) << "s"; + } - BOOST_ASSERT( - way_start_and_end_iterator->way_id == - OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}); - // we do not remap the via id yet, since we will need it for the to node as well - const OSMNodeID via_node_id = OSMNodeID{restrictions_iterator->restriction.via.node}; + { + util::UnbufferedLog log; + log << "Sorting " << restrictions_list.size() << " restriction. by from... "; + TIMER_START(sort_restrictions); + stxxl::sort(restrictions_list.begin(), + restrictions_list.end(), + CmpRestrictionContainerByFrom(), + stxxl_memory); + TIMER_STOP(sort_restrictions); + log << "ok, after " << TIMER_SEC(sort_restrictions) << "s"; + } - // check if via is actually valid, if not invalidate - auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); - if (via_id_iter == external_to_internal_node_id_map.end()) + { + util::UnbufferedLog log; + log << "Fixing restriction starts ... " << std::flush; + TIMER_START(fix_restriction_starts); + auto restrictions_iterator = restrictions_list.begin(); + auto way_start_and_end_iterator = way_start_end_id_list.cbegin(); + const auto restrictions_list_end = restrictions_list.end(); + const auto way_start_end_id_list_end = way_start_end_id_list.cend(); + + while (way_start_and_end_iterator != way_start_end_id_list_end && + restrictions_iterator != restrictions_list_end) { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Restriction references invalid node: " - << restrictions_iterator->restriction.via.node; - restrictions_iterator->restriction.via.node = SPECIAL_NODEID; - ++restrictions_iterator; - continue; - } + if (way_start_and_end_iterator->way_id < + OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}) + { + ++way_start_and_end_iterator; + continue; + } - if (way_start_and_end_iterator->first_segment_source_id == via_node_id) - { - // assign new from node id - auto id_iter = external_to_internal_node_id_map.find( - way_start_and_end_iterator->first_segment_target_id); - if (id_iter == external_to_internal_node_id_map.end()) + if (way_start_and_end_iterator->way_id > + OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}) { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Way references invalid node: " - << way_start_and_end_iterator->first_segment_target_id; + util::Log(logDEBUG) << "Restriction references invalid way: " + << restrictions_iterator->restriction.from.way; restrictions_iterator->restriction.from.node = SPECIAL_NODEID; ++restrictions_iterator; - ++way_start_and_end_iterator; continue; } - restrictions_iterator->restriction.from.node = id_iter->second; - } - else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) - { - // assign new from node id - auto id_iter = external_to_internal_node_id_map.find( - way_start_and_end_iterator->last_segment_source_id); - if (id_iter == external_to_internal_node_id_map.end()) + + BOOST_ASSERT( + way_start_and_end_iterator->way_id == + OSMWayID{static_cast(restrictions_iterator->restriction.from.way)}); + // we do not remap the via id yet, since we will need it for the to node as well + const OSMNodeID via_node_id = OSMNodeID{restrictions_iterator->restriction.via.node}; + + // check if via is actually valid, if not invalidate + auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); + if (via_id_iter == external_to_internal_node_id_map.end()) { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Way references invalid node: " - << way_start_and_end_iterator->last_segment_target_id; - restrictions_iterator->restriction.from.node = SPECIAL_NODEID; + util::Log(logDEBUG) << "Restriction references invalid node: " + << restrictions_iterator->restriction.via.node; + restrictions_iterator->restriction.via.node = SPECIAL_NODEID; ++restrictions_iterator; - ++way_start_and_end_iterator; continue; } - restrictions_iterator->restriction.from.node = id_iter->second; - } - ++restrictions_iterator; - } - TIMER_STOP(fix_restriction_starts); - std::cout << "ok, after " << TIMER_SEC(fix_restriction_starts) << "s" << std::endl; - - std::cout << "[extractor] Sorting restrictions. by to ... " << std::flush; - TIMER_START(sort_restrictions_to); - stxxl::sort(restrictions_list.begin(), - restrictions_list.end(), - CmpRestrictionContainerByTo(), - stxxl_memory); - TIMER_STOP(sort_restrictions_to); - std::cout << "ok, after " << TIMER_SEC(sort_restrictions_to) << "s" << std::endl; - - std::cout << "[extractor] Fixing restriction ends ... " << std::flush; - TIMER_START(fix_restriction_ends); - restrictions_iterator = restrictions_list.begin(); - way_start_and_end_iterator = way_start_end_id_list.cbegin(); - const auto way_start_end_id_list_end_ = way_start_end_id_list.cend(); - const auto restrictions_list_end_ = restrictions_list.end(); - - while (way_start_and_end_iterator != way_start_end_id_list_end_ && - restrictions_iterator != restrictions_list_end_) - { - if (way_start_and_end_iterator->way_id < - OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}) - { - ++way_start_and_end_iterator; - continue; - } - if (restrictions_iterator->restriction.from.node == SPECIAL_NODEID || - restrictions_iterator->restriction.via.node == SPECIAL_NODEID) - { + if (way_start_and_end_iterator->first_segment_source_id == via_node_id) + { + // assign new from node id + auto id_iter = external_to_internal_node_id_map.find( + way_start_and_end_iterator->first_segment_target_id); + if (id_iter == external_to_internal_node_id_map.end()) + { + util::Log(logDEBUG) << "Way references invalid node: " + << way_start_and_end_iterator->first_segment_target_id; + restrictions_iterator->restriction.from.node = SPECIAL_NODEID; + ++restrictions_iterator; + ++way_start_and_end_iterator; + continue; + } + restrictions_iterator->restriction.from.node = id_iter->second; + } + else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) + { + // assign new from node id + auto id_iter = external_to_internal_node_id_map.find( + way_start_and_end_iterator->last_segment_source_id); + if (id_iter == external_to_internal_node_id_map.end()) + { + util::Log(logDEBUG) << "Way references invalid node: " + << way_start_and_end_iterator->last_segment_target_id; + restrictions_iterator->restriction.from.node = SPECIAL_NODEID; + ++restrictions_iterator; + ++way_start_and_end_iterator; + continue; + } + restrictions_iterator->restriction.from.node = id_iter->second; + } ++restrictions_iterator; - continue; } - if (way_start_and_end_iterator->way_id > - OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}) - { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Restriction references invalid way: " - << restrictions_iterator->restriction.to.way; - restrictions_iterator->restriction.to.way = SPECIAL_NODEID; - ++restrictions_iterator; - continue; - } - BOOST_ASSERT( - way_start_and_end_iterator->way_id == - OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}); - const OSMNodeID via_node_id = OSMNodeID{restrictions_iterator->restriction.via.node}; - // assign new via node id - auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); - BOOST_ASSERT(via_id_iter != external_to_internal_node_id_map.end()); - restrictions_iterator->restriction.via.node = via_id_iter->second; + TIMER_STOP(fix_restriction_starts); + log << "ok, after " << TIMER_SEC(fix_restriction_starts) << "s"; + } + + { + util::UnbufferedLog log; + log << "Sorting restrictions. by to ... " << std::flush; + TIMER_START(sort_restrictions_to); + stxxl::sort(restrictions_list.begin(), + restrictions_list.end(), + CmpRestrictionContainerByTo(), + stxxl_memory); + TIMER_STOP(sort_restrictions_to); + log << "ok, after " << TIMER_SEC(sort_restrictions_to) << "s"; + } - if (way_start_and_end_iterator->first_segment_source_id == via_node_id) + { + util::UnbufferedLog log; + log << "Fixing restriction ends ... " << std::flush; + TIMER_START(fix_restriction_ends); + auto restrictions_iterator = restrictions_list.begin(); + auto way_start_and_end_iterator = way_start_end_id_list.cbegin(); + const auto way_start_end_id_list_end_ = way_start_end_id_list.cend(); + const auto restrictions_list_end_ = restrictions_list.end(); + + while (way_start_and_end_iterator != way_start_end_id_list_end_ && + restrictions_iterator != restrictions_list_end_) { - auto to_id_iter = external_to_internal_node_id_map.find( - way_start_and_end_iterator->first_segment_target_id); - if (to_id_iter == external_to_internal_node_id_map.end()) + if (way_start_and_end_iterator->way_id < + OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}) { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Way references invalid node: " - << way_start_and_end_iterator->first_segment_source_id; - restrictions_iterator->restriction.to.node = SPECIAL_NODEID; - ++restrictions_iterator; ++way_start_and_end_iterator; continue; } - restrictions_iterator->restriction.to.node = to_id_iter->second; - } - else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) - { - auto to_id_iter = external_to_internal_node_id_map.find( - way_start_and_end_iterator->last_segment_source_id); - if (to_id_iter == external_to_internal_node_id_map.end()) + if (restrictions_iterator->restriction.from.node == SPECIAL_NODEID || + restrictions_iterator->restriction.via.node == SPECIAL_NODEID) { - util::SimpleLogger().Write(LogLevel::logDEBUG) - << "Way references invalid node: " - << way_start_and_end_iterator->last_segment_source_id; - restrictions_iterator->restriction.to.node = SPECIAL_NODEID; ++restrictions_iterator; - ++way_start_and_end_iterator; continue; } - restrictions_iterator->restriction.to.node = to_id_iter->second; + if (way_start_and_end_iterator->way_id > + OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}) + { + util::Log(logDEBUG) << "Restriction references invalid way: " + << restrictions_iterator->restriction.to.way; + restrictions_iterator->restriction.to.way = SPECIAL_NODEID; + ++restrictions_iterator; + continue; + } + BOOST_ASSERT( + way_start_and_end_iterator->way_id == + OSMWayID{static_cast(restrictions_iterator->restriction.to.way)}); + const OSMNodeID via_node_id = OSMNodeID{restrictions_iterator->restriction.via.node}; + + // assign new via node id + auto via_id_iter = external_to_internal_node_id_map.find(via_node_id); + BOOST_ASSERT(via_id_iter != external_to_internal_node_id_map.end()); + restrictions_iterator->restriction.via.node = via_id_iter->second; + + if (way_start_and_end_iterator->first_segment_source_id == via_node_id) + { + auto to_id_iter = external_to_internal_node_id_map.find( + way_start_and_end_iterator->first_segment_target_id); + if (to_id_iter == external_to_internal_node_id_map.end()) + { + util::Log(logDEBUG) << "Way references invalid node: " + << way_start_and_end_iterator->first_segment_source_id; + restrictions_iterator->restriction.to.node = SPECIAL_NODEID; + ++restrictions_iterator; + ++way_start_and_end_iterator; + continue; + } + restrictions_iterator->restriction.to.node = to_id_iter->second; + } + else if (way_start_and_end_iterator->last_segment_target_id == via_node_id) + { + auto to_id_iter = external_to_internal_node_id_map.find( + way_start_and_end_iterator->last_segment_source_id); + if (to_id_iter == external_to_internal_node_id_map.end()) + { + util::Log(logDEBUG) << "Way references invalid node: " + << way_start_and_end_iterator->last_segment_source_id; + restrictions_iterator->restriction.to.node = SPECIAL_NODEID; + ++restrictions_iterator; + ++way_start_and_end_iterator; + continue; + } + restrictions_iterator->restriction.to.node = to_id_iter->second; + } + ++restrictions_iterator; } - ++restrictions_iterator; + TIMER_STOP(fix_restriction_ends); + log << "ok, after " << TIMER_SEC(fix_restriction_ends) << "s"; } - TIMER_STOP(fix_restriction_ends); - std::cout << "ok, after " << TIMER_SEC(fix_restriction_ends) << "s" << std::endl; } } } diff --git a/src/extractor/extractor.cpp b/src/extractor/extractor.cpp index 98de089beb4..38e96d149b9 100644 --- a/src/extractor/extractor.cpp +++ b/src/extractor/extractor.cpp @@ -11,11 +11,13 @@ #include "extractor/raster_source.hpp" #include "storage/io.hpp" #include "storage/io.hpp" +#include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/graph_loader.hpp" #include "util/io.hpp" +#include "util/log.hpp" #include "util/name_table.hpp" #include "util/range_table.hpp" -#include "util/simple_logger.hpp" #include "util/timing_util.hpp" #include "extractor/compressed_edge_container.hpp" @@ -119,12 +121,12 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) tbb::task_scheduler_init init(number_of_threads); { - util::SimpleLogger().Write() << "Input file: " << config.input_path.filename().string(); + util::Log() << "Input file: " << config.input_path.filename().string(); if (!config.profile_path.empty()) { - util::SimpleLogger().Write() << "Profile: " << config.profile_path.filename().string(); + util::Log() << "Profile: " << config.profile_path.filename().string(); } - util::SimpleLogger().Write() << "Threads: " << number_of_threads; + util::Log() << "Threads: " << number_of_threads; ExtractionContainers extraction_containers; auto extractor_callbacks = std::make_unique(extraction_containers); @@ -137,7 +139,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) unsigned number_of_ways = 0; unsigned number_of_relations = 0; - util::SimpleLogger().Write() << "Parsing in progress.."; + util::Log() << "Parsing in progress.."; TIMER_START(parsing); // setup raster sources @@ -148,7 +150,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) { generator = "unknown tool"; } - util::SimpleLogger().Write() << "input file generated by " << generator; + util::Log() << "input file generated by " << generator; // write .timestamp data file std::string timestamp = header.get("osmosis_replication_timestamp"); @@ -156,7 +158,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) { timestamp = "n/a"; } - util::SimpleLogger().Write() << "timestamp: " << timestamp; + util::Log() << "timestamp: " << timestamp; boost::filesystem::ofstream timestamp_out(config.timestamp_file_name); timestamp_out.write(timestamp.c_str(), timestamp.length()); @@ -210,12 +212,10 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) } } TIMER_STOP(parsing); - util::SimpleLogger().Write() << "Parsing finished after " << TIMER_SEC(parsing) - << " seconds"; + util::Log() << "Parsing finished after " << TIMER_SEC(parsing) << " seconds"; - util::SimpleLogger().Write() << "Raw input contains " << number_of_nodes << " nodes, " - << number_of_ways << " ways, and " << number_of_relations - << " relations"; + util::Log() << "Raw input contains " << number_of_nodes << " nodes, " << number_of_ways + << " ways, and " << number_of_relations << " relations"; // take control over the turn lane map turn_lane_map = extractor_callbacks->moveOutLaneDescriptionMap(); @@ -224,8 +224,8 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) if (extraction_containers.all_edges_list.empty()) { - util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting."; - return 1; + throw util::exception(std::string("There are no edges remaining after parsing.") + + SOURCE_REF); } extraction_containers.PrepareData(scripting_environment, @@ -237,15 +237,14 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) scripting_environment.GetProfileProperties()); TIMER_STOP(extracting); - util::SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting) - << "s"; + util::Log() << "extraction finished after " << TIMER_SEC(extracting) << "s"; } { // Transform the node-based graph that OSM is based on into an edge-based graph // that is better for routing. Every edge becomes a node, and every valid // movement (e.g. turn from A->B, and B->A) becomes an edge - util::SimpleLogger().Write() << "Generating edge-expanded graph representation"; + util::Log() << "Generating edge-expanded graph representation"; TIMER_START(expansion); @@ -267,17 +266,16 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) TIMER_STOP(expansion); - util::SimpleLogger().Write() << "Saving edge-based node weights to file."; + util::Log() << "Saving edge-based node weights to file."; TIMER_START(timer_write_node_weights); util::serializeVector(config.edge_based_node_weights_output_path, edge_based_node_weights); TIMER_STOP(timer_write_node_weights); - util::SimpleLogger().Write() << "Done writing. (" << TIMER_SEC(timer_write_node_weights) - << ")"; + util::Log() << "Done writing. (" << TIMER_SEC(timer_write_node_weights) << ")"; - util::SimpleLogger().Write() << "Computing strictly connected components ..."; + util::Log() << "Computing strictly connected components ..."; FindComponents(max_edge_id, edge_based_edge_list, edge_based_node_list); - util::SimpleLogger().Write() << "Building r-tree ..."; + util::Log() << "Building r-tree ..."; TIMER_START(rtree); BuildRTree(std::move(edge_based_node_list), std::move(node_is_startpoint), @@ -285,7 +283,7 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) TIMER_STOP(rtree); - util::SimpleLogger().Write() << "Writing node map ..."; + util::Log() << "Writing node map ..."; WriteNodeMapping(internal_to_external_node_map); WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list); @@ -295,10 +293,10 @@ int Extractor::run(ScriptingEnvironment &scripting_environment) const auto edges_per_second = static_cast((max_edge_id + 1) / TIMER_SEC(expansion)); - util::SimpleLogger().Write() << "Expansion: " << nodes_per_second << " nodes/sec and " - << edges_per_second << " edges/sec"; - util::SimpleLogger().Write() << "To prepare the data for routing, run: " - << "./osrm-contract " << config.output_file_name << std::endl; + util::Log() << "Expansion: " << nodes_per_second << " nodes/sec and " << edges_per_second + << " edges/sec"; + util::Log() << "To prepare the data for routing, run: " + << "./osrm-contract " << config.output_file_name; } return 0; @@ -310,7 +308,7 @@ void Extractor::WriteProfileProperties(const std::string &output_path, boost::filesystem::ofstream out_stream(output_path); if (!out_stream) { - throw util::exception("Could not open " + output_path + " for writing."); + throw util::exception("Could not open " + output_path + " for writing." + SOURCE_REF); } out_stream.write(reinterpret_cast(&properties), sizeof(properties)); @@ -406,7 +404,7 @@ std::shared_ptr Extractor::LoadRestrictionMap() util::loadRestrictionsFromFile(file_reader, restriction_list); - util::SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions."; + util::Log() << " - " << restriction_list.size() << " restrictions."; return std::make_shared(restriction_list); } @@ -428,16 +426,16 @@ Extractor::LoadNodeBasedGraph(std::unordered_set &barriers, NodeID number_of_node_based_nodes = util::loadNodesFromFile( file_reader, barriers_iter, traffic_signals_iter, internal_to_external_node_map); - util::SimpleLogger().Write() << " - " << barriers.size() << " bollard nodes, " - << traffic_signals.size() << " traffic lights"; + util::Log() << " - " << barriers.size() << " bollard nodes, " << traffic_signals.size() + << " traffic lights"; std::vector edge_list; util::loadEdgesFromFile(file_reader, edge_list); if (edge_list.empty()) { - util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting."; - return std::shared_ptr(); + throw util::exception("Node-based-graph (" + config.output_file_name + + ") contains no edges." + SOURCE_REF); } return util::NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list); @@ -541,9 +539,9 @@ void Extractor::BuildRTree(std::vector node_based_edge_list, std::vector node_is_startpoint, const std::vector &internal_to_external_node_map) { - util::SimpleLogger().Write() << "constructing r-tree of " << node_based_edge_list.size() - << " edge elements build on-top of " - << internal_to_external_node_map.size() << " coordinates"; + util::Log() << "constructing r-tree of " << node_based_edge_list.size() + << " edge elements build on-top of " << internal_to_external_node_map.size() + << " coordinates"; BOOST_ASSERT(node_is_startpoint.size() == node_based_edge_list.size()); @@ -564,7 +562,8 @@ void Extractor::BuildRTree(std::vector node_based_edge_list, if (new_size == 0) { throw util::exception("There are no snappable edges left after processing. Are you " - "setting travel modes correctly in the profile? Cannot continue."); + "setting travel modes correctly in the profile? Cannot continue." + + SOURCE_REF); } node_based_edge_list.resize(new_size); @@ -575,8 +574,7 @@ void Extractor::BuildRTree(std::vector node_based_edge_list, internal_to_external_node_map); TIMER_STOP(construction); - util::SimpleLogger().Write() << "finished r-tree construction in " << TIMER_SEC(construction) - << " seconds"; + util::Log() << "finished r-tree construction in " << TIMER_SEC(construction) << " seconds"; } void Extractor::WriteEdgeBasedGraph( @@ -590,8 +588,7 @@ void Extractor::WriteEdgeBasedGraph( const util::FingerPrint fingerprint = util::FingerPrint::GetValid(); file_out_stream.write((char *)&fingerprint, sizeof(util::FingerPrint)); - util::SimpleLogger().Write() << "[extractor] Writing edge-based-graph edges ... " - << std::flush; + util::Log() << "Writing edge-based-graph edges ... " << std::flush; TIMER_START(write_edges); std::uint64_t number_of_used_edges = edge_based_edge_list.size(); @@ -604,9 +601,9 @@ void Extractor::WriteEdgeBasedGraph( } TIMER_STOP(write_edges); - util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl; + util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s"; - util::SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges"; + util::Log() << "Processed " << number_of_used_edges << " edges"; } void Extractor::WriteIntersectionClassificationData( @@ -618,12 +615,11 @@ void Extractor::WriteIntersectionClassificationData( std::ofstream file_out_stream(output_file_name.c_str(), std::ios::binary); if (!file_out_stream) { - util::SimpleLogger().Write(logWARNING) << "Failed to open " << output_file_name - << " for writing"; + util::Log(logERROR) << "Failed to open " << output_file_name << " for writing"; return; } - util::SimpleLogger().Write() << "Writing Intersection Classification Data"; + util::Log() << "Writing Intersection Classification Data"; TIMER_START(write_edges); util::writeFingerprint(file_out_stream); util::serializeVector(file_out_stream, node_based_intersection_classes); @@ -652,16 +648,15 @@ void Extractor::WriteIntersectionClassificationData( if (!static_cast(file_out_stream)) { - throw util::exception("Failed to write to " + output_file_name + "."); + throw util::exception("Failed to write to " + output_file_name + "." + SOURCE_REF); } util::serializeVector(file_out_stream, entry_classes); TIMER_STOP(write_edges); - util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s for " - << node_based_intersection_classes.size() << " Indices into " - << bearing_classes.size() << " bearing classes and " - << entry_classes.size() << " entry classes and " << total_bearings - << " bearing values." << std::endl; + util::Log() << "ok, after " << TIMER_SEC(write_edges) << "s for " + << node_based_intersection_classes.size() << " Indices into " + << bearing_classes.size() << " bearing classes and " << entry_classes.size() + << " entry classes and " << total_bearings << " bearing values."; } void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const @@ -671,27 +666,26 @@ void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const std::vector turn_lane_masks; std::tie(turn_lane_offsets, turn_lane_masks) = transformTurnLaneMapIntoArrays(turn_lane_map); - util::SimpleLogger().Write() << "Writing turn lane masks..."; + util::Log() << "Writing turn lane masks..."; TIMER_START(turn_lane_timer); std::ofstream ofs(turn_lane_file, std::ios::binary); if (!ofs) - throw osrm::util::exception("Failed to open " + turn_lane_file + " for writing."); + throw osrm::util::exception("Failed to open " + turn_lane_file + " for writing." + + SOURCE_REF); if (!util::serializeVector(ofs, turn_lane_offsets)) { - util::SimpleLogger().Write(logWARNING) << "Error while writing."; - return; + throw util::exception("Error while writing to " + turn_lane_file + SOURCE_REF); } if (!util::serializeVector(ofs, turn_lane_masks)) { - util::SimpleLogger().Write(logWARNING) << "Error while writing."; - return; + throw util::exception("Error while writing to " + turn_lane_file + SOURCE_REF); } TIMER_STOP(turn_lane_timer); - util::SimpleLogger().Write() << "done (" << TIMER_SEC(turn_lane_timer) << ")"; + util::Log() << "done (" << TIMER_SEC(turn_lane_timer) << ")"; } } // namespace extractor diff --git a/src/extractor/extractor_callbacks.cpp b/src/extractor/extractor_callbacks.cpp index 676462086b3..16a65eb9cf8 100644 --- a/src/extractor/extractor_callbacks.cpp +++ b/src/extractor/extractor_callbacks.cpp @@ -8,7 +8,7 @@ #include "util/for_each_pair.hpp" #include "util/guidance/turn_lanes.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -62,7 +62,7 @@ void ExtractorCallbacks::ProcessRestriction( if (restriction) { external_memory.restrictions_list.push_back(restriction.get()); - // util::SimpleLogger().Write() << "from: " << restriction.get().restriction.from.node << + // util::Log() << "from: " << restriction.get().restriction.from.node << // ",via: " << restriction.get().restriction.via.node << // ", to: " << restriction.get().restriction.to.node << // ", only: " << (restriction.get().restriction.flags.is_only ? @@ -96,8 +96,8 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti if (std::numeric_limits::max() == input_way.id()) { - util::SimpleLogger().Write(logDEBUG) << "found bogus way with id: " << input_way.id() - << " of size " << input_way.nodes().size(); + util::Log(logDEBUG) << "found bogus way with id: " << input_way.id() << " of size " + << input_way.nodes().size(); return; } @@ -135,8 +135,7 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti if (forward_weight_data.type == InternalExtractorEdge::WeightType::INVALID && backward_weight_data.type == InternalExtractorEdge::WeightType::INVALID) { - util::SimpleLogger().Write(logDEBUG) << "found way with bogus speed, id: " - << input_way.id(); + util::Log(logDEBUG) << "found way with bogus speed, id: " << input_way.id(); return; } @@ -196,8 +195,7 @@ void ExtractorCallbacks::ProcessWay(const osmium::Way &input_way, const Extracti if (translated_mask == TurnLaneType::empty) { // if we have unsupported tags, don't handle them - util::SimpleLogger().Write(logDEBUG) << "Unsupported lane tag found: \"" - << *token_itr << "\""; + util::Log(logDEBUG) << "Unsupported lane tag found: \"" << *token_itr << "\""; return {}; } diff --git a/src/extractor/graph_compressor.cpp b/src/extractor/graph_compressor.cpp index 1e166f225d6..4339cda18c9 100644 --- a/src/extractor/graph_compressor.cpp +++ b/src/extractor/graph_compressor.cpp @@ -6,7 +6,7 @@ #include "util/node_based_graph.hpp" #include "util/percent.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" namespace osrm { @@ -22,175 +22,185 @@ void GraphCompressor::Compress(const std::unordered_set &barrier_nodes, const unsigned original_number_of_nodes = graph.GetNumberOfNodes(); const unsigned original_number_of_edges = graph.GetNumberOfEdges(); - util::Percent progress(original_number_of_nodes); - - for (const NodeID node_v : util::irange(0u, original_number_of_nodes)) { - progress.PrintStatus(node_v); + util::UnbufferedLog log; + util::Percent progress(log, original_number_of_nodes); - // only contract degree 2 vertices - if (2 != graph.GetOutDegree(node_v)) + for (const NodeID node_v : util::irange(0u, original_number_of_nodes)) { - continue; - } - - // don't contract barrier node - if (barrier_nodes.end() != barrier_nodes.find(node_v)) - { - continue; - } + progress.PrintStatus(node_v); - // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node - if (restriction_map.IsViaNode(node_v)) - { - continue; - } - - // reverse_e2 forward_e2 - // u <---------- v -----------> w - // ----------> <----------- - // forward_e1 reverse_e1 - // - // Will be compressed to: - // - // reverse_e1 - // u <---------- w - // ----------> - // forward_e1 - // - // If the edges are compatible. - const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed; - const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order; - BOOST_ASSERT(SPECIAL_EDGEID != forward_e2); - BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) && forward_e2 < graph.EndEdges(node_v)); - const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order; - BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2); - BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) && reverse_e2 < graph.EndEdges(node_v)); - - const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2); - const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2); - - const NodeID node_w = graph.GetTarget(forward_e2); - BOOST_ASSERT(SPECIAL_NODEID != node_w); - BOOST_ASSERT(node_v != node_w); - const NodeID node_u = graph.GetTarget(reverse_e2); - BOOST_ASSERT(SPECIAL_NODEID != node_u); - BOOST_ASSERT(node_u != node_v); - - const EdgeID forward_e1 = graph.FindEdge(node_u, node_v); - BOOST_ASSERT(SPECIAL_EDGEID != forward_e1); - BOOST_ASSERT(node_v == graph.GetTarget(forward_e1)); - const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v); - BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1); - BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1)); - - const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1); - const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1); - - if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID) - { - continue; - } + // only contract degree 2 vertices + if (2 != graph.GetOutDegree(node_v)) + { + continue; + } - // this case can happen if two ways with different names overlap - if (fwd_edge_data1.name_id != rev_edge_data1.name_id || - fwd_edge_data2.name_id != rev_edge_data2.name_id) - { - continue; - } + // don't contract barrier node + if (barrier_nodes.end() != barrier_nodes.find(node_v)) + { + continue; + } - if (fwd_edge_data1.CanCombineWith(fwd_edge_data2) && - rev_edge_data1.CanCombineWith(rev_edge_data2)) - { - BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id == - graph.GetEdgeData(reverse_e1).name_id); - BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id == - graph.GetEdgeData(reverse_e2).name_id); - - // Do not compress edge if it crosses a traffic signal. - // This can't be done in CanCombineWith, becase we only store the - // traffic signals in the `traffic_lights` list, which EdgeData - // doesn't have access to. - const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end(); - if (has_node_penalty) + // check if v is a via node for a turn restriction, i.e. a 'directed' barrier node + if (restriction_map.IsViaNode(node_v)) + { continue; + } + + // reverse_e2 forward_e2 + // u <---------- v -----------> w + // ----------> <----------- + // forward_e1 reverse_e1 + // + // Will be compressed to: + // + // reverse_e1 + // u <---------- w + // ----------> + // forward_e1 + // + // If the edges are compatible. + const bool reverse_edge_order = graph.GetEdgeData(graph.BeginEdges(node_v)).reversed; + const EdgeID forward_e2 = graph.BeginEdges(node_v) + reverse_edge_order; + BOOST_ASSERT(SPECIAL_EDGEID != forward_e2); + BOOST_ASSERT(forward_e2 >= graph.BeginEdges(node_v) && + forward_e2 < graph.EndEdges(node_v)); + const EdgeID reverse_e2 = graph.BeginEdges(node_v) + 1 - reverse_edge_order; + BOOST_ASSERT(SPECIAL_EDGEID != reverse_e2); + BOOST_ASSERT(reverse_e2 >= graph.BeginEdges(node_v) && + reverse_e2 < graph.EndEdges(node_v)); + + const EdgeData &fwd_edge_data2 = graph.GetEdgeData(forward_e2); + const EdgeData &rev_edge_data2 = graph.GetEdgeData(reverse_e2); + + const NodeID node_w = graph.GetTarget(forward_e2); + BOOST_ASSERT(SPECIAL_NODEID != node_w); + BOOST_ASSERT(node_v != node_w); + const NodeID node_u = graph.GetTarget(reverse_e2); + BOOST_ASSERT(SPECIAL_NODEID != node_u); + BOOST_ASSERT(node_u != node_v); + + const EdgeID forward_e1 = graph.FindEdge(node_u, node_v); + BOOST_ASSERT(SPECIAL_EDGEID != forward_e1); + BOOST_ASSERT(node_v == graph.GetTarget(forward_e1)); + const EdgeID reverse_e1 = graph.FindEdge(node_w, node_v); + BOOST_ASSERT(SPECIAL_EDGEID != reverse_e1); + BOOST_ASSERT(node_v == graph.GetTarget(reverse_e1)); + + const EdgeData &fwd_edge_data1 = graph.GetEdgeData(forward_e1); + const EdgeData &rev_edge_data1 = graph.GetEdgeData(reverse_e1); + + if (graph.FindEdgeInEitherDirection(node_u, node_w) != SPECIAL_EDGEID) + { + continue; + } - // Get distances before graph is modified - const int forward_weight1 = graph.GetEdgeData(forward_e1).distance; - const int forward_weight2 = graph.GetEdgeData(forward_e2).distance; - - BOOST_ASSERT(0 != forward_weight1); - BOOST_ASSERT(0 != forward_weight2); - - const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance; - const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance; - - BOOST_ASSERT(0 != reverse_weight1); - BOOST_ASSERT(0 != reverse_weight2); - - // add weight of e2's to e1 - graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance; - graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance; - - // extend e1's to targets of e2's - graph.SetTarget(forward_e1, node_w); - graph.SetTarget(reverse_e1, node_u); - - /* - * Remember Lane Data for compressed parts. This handles scenarios where lane-data is - * only kept up until a traffic light. - * - * | | - * ---------------- | - * -^ | | - * ----------- | - * -v | | - * --------------- | - * | | - * - * u ------- v ---- w - * - * Since the edge is compressable, we can transfer: - * "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the compressed - * edge. - * Doing so, we might mess up the point from where the lanes are shown. It should be - * reasonable, since the announcements have to come early anyhow. So there is a - * potential danger in here, but it saves us from adding a lot of additional edges for - * turn-lanes. Without this,we would have to treat any turn-lane beginning/ending just - * like a barrier. - */ - const auto selectLaneID = [](const LaneDescriptionID front, - const LaneDescriptionID back) { - // A lane has tags: u - (front) - v - (back) - w - // During contraction, we keep only one of the tags. Usually the one closer to the - // intersection is preferred. If its empty, however, we keep the non-empty one - if (back == INVALID_LANE_DESCRIPTIONID) - return front; - return back; - }; - graph.GetEdgeData(forward_e1).lane_description_id = - selectLaneID(graph.GetEdgeData(forward_e1).lane_description_id, - fwd_edge_data2.lane_description_id); - graph.GetEdgeData(reverse_e1).lane_description_id = - selectLaneID(graph.GetEdgeData(reverse_e1).lane_description_id, - rev_edge_data2.lane_description_id); - - // remove e2's (if bidir, otherwise only one) - graph.DeleteEdge(node_v, forward_e2); - graph.DeleteEdge(node_v, reverse_e2); - - // update any involved turn restrictions - restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w); - restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph); - - restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u); - restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph); - - // store compressed geometry in container - geometry_compressor.CompressEdge( - forward_e1, forward_e2, node_v, node_w, forward_weight1, forward_weight2); - geometry_compressor.CompressEdge( - reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2); + // this case can happen if two ways with different names overlap + if (fwd_edge_data1.name_id != rev_edge_data1.name_id || + fwd_edge_data2.name_id != rev_edge_data2.name_id) + { + continue; + } + + if (fwd_edge_data1.CanCombineWith(fwd_edge_data2) && + rev_edge_data1.CanCombineWith(rev_edge_data2)) + { + BOOST_ASSERT(graph.GetEdgeData(forward_e1).name_id == + graph.GetEdgeData(reverse_e1).name_id); + BOOST_ASSERT(graph.GetEdgeData(forward_e2).name_id == + graph.GetEdgeData(reverse_e2).name_id); + + // Do not compress edge if it crosses a traffic signal. + // This can't be done in CanCombineWith, becase we only store the + // traffic signals in the `traffic_lights` list, which EdgeData + // doesn't have access to. + const bool has_node_penalty = traffic_lights.find(node_v) != traffic_lights.end(); + if (has_node_penalty) + continue; + + // Get distances before graph is modified + const int forward_weight1 = graph.GetEdgeData(forward_e1).distance; + const int forward_weight2 = graph.GetEdgeData(forward_e2).distance; + + BOOST_ASSERT(0 != forward_weight1); + BOOST_ASSERT(0 != forward_weight2); + + const int reverse_weight1 = graph.GetEdgeData(reverse_e1).distance; + const int reverse_weight2 = graph.GetEdgeData(reverse_e2).distance; + + BOOST_ASSERT(0 != reverse_weight1); + BOOST_ASSERT(0 != reverse_weight2); + + // add weight of e2's to e1 + graph.GetEdgeData(forward_e1).distance += fwd_edge_data2.distance; + graph.GetEdgeData(reverse_e1).distance += rev_edge_data2.distance; + + // extend e1's to targets of e2's + graph.SetTarget(forward_e1, node_w); + graph.SetTarget(reverse_e1, node_u); + + /* + * Remember Lane Data for compressed parts. This handles scenarios where lane-data + * is + * only kept up until a traffic light. + * + * | | + * ---------------- | + * -^ | | + * ----------- | + * -v | | + * --------------- | + * | | + * + * u ------- v ---- w + * + * Since the edge is compressable, we can transfer: + * "left|right" (uv) and "" (uw) into a string with "left|right" (uw) for the + * compressed + * edge. + * Doing so, we might mess up the point from where the lanes are shown. It should be + * reasonable, since the announcements have to come early anyhow. So there is a + * potential danger in here, but it saves us from adding a lot of additional edges + * for + * turn-lanes. Without this,we would have to treat any turn-lane beginning/ending + * just + * like a barrier. + */ + const auto selectLaneID = [](const LaneDescriptionID front, + const LaneDescriptionID back) { + // A lane has tags: u - (front) - v - (back) - w + // During contraction, we keep only one of the tags. Usually the one closer to + // the + // intersection is preferred. If its empty, however, we keep the non-empty one + if (back == INVALID_LANE_DESCRIPTIONID) + return front; + return back; + }; + graph.GetEdgeData(forward_e1).lane_description_id = + selectLaneID(graph.GetEdgeData(forward_e1).lane_description_id, + fwd_edge_data2.lane_description_id); + graph.GetEdgeData(reverse_e1).lane_description_id = + selectLaneID(graph.GetEdgeData(reverse_e1).lane_description_id, + rev_edge_data2.lane_description_id); + + // remove e2's (if bidir, otherwise only one) + graph.DeleteEdge(node_v, forward_e2); + graph.DeleteEdge(node_v, reverse_e2); + + // update any involved turn restrictions + restriction_map.FixupStartingTurnRestriction(node_u, node_v, node_w); + restriction_map.FixupArrivingTurnRestriction(node_u, node_v, node_w, graph); + + restriction_map.FixupStartingTurnRestriction(node_w, node_v, node_u); + restriction_map.FixupArrivingTurnRestriction(node_w, node_v, node_u, graph); + + // store compressed geometry in container + geometry_compressor.CompressEdge( + forward_e1, forward_e2, node_v, node_w, forward_weight1, forward_weight2); + geometry_compressor.CompressEdge( + reverse_e1, reverse_e2, node_v, node_u, reverse_weight1, reverse_weight2); + } } } @@ -226,10 +236,8 @@ void GraphCompressor::PrintStatistics(unsigned original_number_of_nodes, new_edge_count += (graph.EndEdges(i) - graph.BeginEdges(i)); } } - util::SimpleLogger().Write() << "Node compression ratio: " - << new_node_count / (double)original_number_of_nodes; - util::SimpleLogger().Write() << "Edge compression ratio: " - << new_edge_count / (double)original_number_of_edges; + util::Log() << "Node compression ratio: " << new_node_count / (double)original_number_of_nodes; + util::Log() << "Edge compression ratio: " << new_edge_count / (double)original_number_of_edges; } } } diff --git a/src/extractor/guidance/intersection_handler.cpp b/src/extractor/guidance/intersection_handler.cpp index 84e1e19209c..a15c31224d2 100644 --- a/src/extractor/guidance/intersection_handler.cpp +++ b/src/extractor/guidance/intersection_handler.cpp @@ -4,7 +4,7 @@ #include "util/coordinate_calculation.hpp" #include "util/guidance/toolkit.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include diff --git a/src/extractor/guidance/motorway_handler.cpp b/src/extractor/guidance/motorway_handler.cpp index f629fa82c4a..dbd53b580ac 100644 --- a/src/extractor/guidance/motorway_handler.cpp +++ b/src/extractor/guidance/motorway_handler.cpp @@ -4,7 +4,7 @@ #include "extractor/guidance/toolkit.hpp" #include "util/guidance/toolkit.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -197,9 +197,9 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in else if (countValid(intersection) > 0) // check whether turns exist at all { // FALLBACK, this should hopefully never be reached - util::SimpleLogger().Write(logDEBUG) - << "Fallback reached from motorway, no continue angle, " << intersection.size() - << " roads, " << countValid(intersection) << " valid ones."; + util::Log(logDEBUG) << "Fallback reached from motorway, no continue angle, " + << intersection.size() << " roads, " << countValid(intersection) + << " valid ones."; return fallback(std::move(intersection)); } } @@ -275,7 +275,7 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in via_eid, isThroughStreet(1, intersection), intersection[1]); - util::SimpleLogger().Write(logDEBUG) << "Disabled U-Turn on a freeway"; + util::Log(logDEBUG) << "Disabled U-Turn on a freeway"; intersection[0].entry_allowed = false; // UTURN on the freeway } else if (exiting_motorways == 2) @@ -334,8 +334,8 @@ Intersection MotorwayHandler::fromMotorway(const EdgeID via_eid, Intersection in } else { - util::SimpleLogger().Write(logDEBUG) << "Found motorway junction with more than " - "2 exiting motorways or additional ramps"; + util::Log(logDEBUG) << "Found motorway junction with more than " + "2 exiting motorways or additional ramps"; return fallback(std::move(intersection)); } } // done for more than one highway exit @@ -489,9 +489,8 @@ Intersection MotorwayHandler::fromRamp(const EdgeID via_eid, Intersection inters } else { // FALLBACK, hopefully this should never been reached - util::SimpleLogger().Write(logDEBUG) << "Reached fallback on motorway ramp with " - << intersection.size() << " roads and " - << countValid(intersection) << " valid turns."; + util::Log(logDEBUG) << "Reached fallback on motorway ramp with " << intersection.size() + << " roads and " << countValid(intersection) << " valid turns."; return fallback(std::move(intersection)); } return intersection; diff --git a/src/extractor/guidance/roundabout_handler.cpp b/src/extractor/guidance/roundabout_handler.cpp index cefaafd8b1e..bbfe7c07998 100644 --- a/src/extractor/guidance/roundabout_handler.cpp +++ b/src/extractor/guidance/roundabout_handler.cpp @@ -4,7 +4,7 @@ #include "util/coordinate_calculation.hpp" #include "util/guidance/toolkit.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include diff --git a/src/extractor/guidance/turn_analysis.cpp b/src/extractor/guidance/turn_analysis.cpp index d1575c93a29..a2639e8e318 100644 --- a/src/extractor/guidance/turn_analysis.cpp +++ b/src/extractor/guidance/turn_analysis.cpp @@ -5,7 +5,7 @@ #include "util/coordinate.hpp" #include "util/coordinate_calculation.hpp" #include "util/guidance/toolkit.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include diff --git a/src/extractor/guidance/turn_classification.cpp b/src/extractor/guidance/turn_classification.cpp index aeb34179dfd..73fcc134c70 100644 --- a/src/extractor/guidance/turn_classification.cpp +++ b/src/extractor/guidance/turn_classification.cpp @@ -1,6 +1,6 @@ #include "extractor/guidance/turn_classification.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include diff --git a/src/extractor/guidance/turn_lane_augmentation.cpp b/src/extractor/guidance/turn_lane_augmentation.cpp index 950355bb622..cf52aa9eedc 100644 --- a/src/extractor/guidance/turn_lane_augmentation.cpp +++ b/src/extractor/guidance/turn_lane_augmentation.cpp @@ -1,6 +1,6 @@ #include "extractor/guidance/turn_lane_augmentation.hpp" #include "extractor/guidance/turn_lane_types.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -104,7 +104,7 @@ LaneDataVector augmentMultiple(const std::size_t none_index, } } // this should, theoretically, never be reached - util::SimpleLogger().Write(logWARNING) << "Failed lane assignment. Reached bad situation."; + util::Log(logWARNING) << "Failed lane assignment. Reached bad situation."; return std::make_pair(std::size_t{0}, std::size_t{0}); }(); for (auto intersection_index = range.first; intersection_index < range.second; diff --git a/src/extractor/guidance/turn_lane_handler.cpp b/src/extractor/guidance/turn_lane_handler.cpp index 1afa4b8d382..a93993944af 100644 --- a/src/extractor/guidance/turn_lane_handler.cpp +++ b/src/extractor/guidance/turn_lane_handler.cpp @@ -3,7 +3,7 @@ #include "extractor/guidance/turn_discovery.hpp" #include "extractor/guidance/turn_lane_augmentation.hpp" #include "extractor/guidance/turn_lane_matcher.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" #include @@ -46,8 +46,8 @@ TurnLaneHandler::TurnLaneHandler(const util::NodeBasedDynamicGraph &node_based_g TurnLaneHandler::~TurnLaneHandler() { - std::cout << "Handled: " << count_handled << " of " << count_called - << " lanes: " << (double)(count_handled * 100) / (count_called) << " %." << std::endl; + util::Log() << "Handled: " << count_handled << " of " << count_called + << " lanes: " << (double)(count_handled * 100) / (count_called) << " %."; } /* diff --git a/src/extractor/raster_source.cpp b/src/extractor/raster_source.cpp index 9570ca4fbf9..03fffad4302 100644 --- a/src/extractor/raster_source.cpp +++ b/src/extractor/raster_source.cpp @@ -1,7 +1,10 @@ #include "extractor/raster_source.hpp" -#include "util/simple_logger.hpp" +#include "util/exception.hpp" +#include "util/exception_utils.hpp" +#include "util/log.hpp" #include "util/timing_util.hpp" +#include "util/typedefs.hpp" #include @@ -92,20 +95,20 @@ int SourceContainer::LoadRasterSource(const std::string &path_string, const auto itr = LoadedSourcePaths.find(path_string); if (itr != LoadedSourcePaths.end()) { - util::SimpleLogger().Write() << "[source loader] Already loaded source '" << path_string - << "' at source_id " << itr->second; + util::Log() << "[source loader] Already loaded source '" << path_string << "' at source_id " + << itr->second; return itr->second; } int source_id = static_cast(LoadedSources.size()); - util::SimpleLogger().Write() << "[source loader] Loading from " << path_string << " ... "; + util::Log() << "[source loader] Loading from " << path_string << " ... "; TIMER_START(loading_source); boost::filesystem::path filepath(path_string); if (!boost::filesystem::exists(filepath)) { - throw util::exception("error reading: no such path"); + throw util::exception(path_string + " does not exist" + SOURCE_REF); } RasterGrid rasterData{filepath, ncols, nrows}; @@ -115,8 +118,7 @@ int SourceContainer::LoadRasterSource(const std::string &path_string, LoadedSourcePaths.emplace(path_string, source_id); LoadedSources.push_back(std::move(source)); - util::SimpleLogger().Write() << "[source loader] ok, after " << TIMER_SEC(loading_source) - << "s"; + util::Log() << "[source loader] ok, after " << TIMER_SEC(loading_source) << "s"; return source_id; } @@ -126,7 +128,9 @@ RasterDatum SourceContainer::GetRasterDataFromSource(unsigned int source_id, dou { if (LoadedSources.size() < source_id + 1) { - throw util::exception("error reading: no such loaded source"); + throw util::exception("Attempted to access source " + std::to_string(source_id) + + ", but there are only " + std::to_string(LoadedSources.size()) + + " loaded" + SOURCE_REF); } BOOST_ASSERT(lat < 90); @@ -145,7 +149,9 @@ SourceContainer::GetRasterInterpolateFromSource(unsigned int source_id, double l { if (LoadedSources.size() < source_id + 1) { - throw util::exception("error reading: no such loaded source"); + throw util::exception("Attempted to access source " + std::to_string(source_id) + + ", but there are only " + std::to_string(LoadedSources.size()) + + " loaded" + SOURCE_REF); } BOOST_ASSERT(lat < 90); diff --git a/src/extractor/restriction_parser.cpp b/src/extractor/restriction_parser.cpp index 7c466a5363c..a5b5b412c9a 100644 --- a/src/extractor/restriction_parser.cpp +++ b/src/extractor/restriction_parser.cpp @@ -4,7 +4,7 @@ #include "extractor/external_memory_node.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -33,15 +33,15 @@ RestrictionParser::RestrictionParser(ScriptingEnvironment &scripting_environment const unsigned count = restrictions.size(); if (count > 0) { - util::SimpleLogger().Write() << "Found " << count << " turn restriction tags:"; + util::Log() << "Found " << count << " turn restriction tags:"; for (const std::string &str : restrictions) { - util::SimpleLogger().Write() << " " << str; + util::Log() << " " << str; } } else { - util::SimpleLogger().Write() << "Found no turn restriction tags"; + util::Log() << "Found no turn restriction tags"; } } } diff --git a/src/extractor/scripting_environment_lua.cpp b/src/extractor/scripting_environment_lua.cpp index ee3978ed88d..274a82eb5a9 100644 --- a/src/extractor/scripting_environment_lua.cpp +++ b/src/extractor/scripting_environment_lua.cpp @@ -9,8 +9,9 @@ #include "extractor/raster_source.hpp" #include "extractor/restriction_parser.hpp" #include "util/exception.hpp" +#include "util/exception_utils.hpp" +#include "util/log.hpp" #include "util/lua_util.hpp" -#include "util/simple_logger.hpp" #include "util/typedefs.hpp" #include @@ -65,14 +66,14 @@ int luaErrorCallback(lua_State *state) std::string error_msg = lua_tostring(state, -1); std::ostringstream error_stream; error_stream << error_msg; - throw util::exception("ERROR occurred in profile script:\n" + error_stream.str()); + throw util::exception("ERROR occurred in profile script:\n" + error_stream.str() + SOURCE_REF); } } LuaScriptingEnvironment::LuaScriptingEnvironment(const std::string &file_name) : file_name(file_name) { - util::SimpleLogger().Write() << "Using script " << file_name; + util::Log() << "Using script " << file_name; } void LuaScriptingEnvironment::InitContext(LuaScriptingContext &context) @@ -265,7 +266,8 @@ void LuaScriptingEnvironment::InitContext(LuaScriptingContext &context) luabind::object error_msg(luabind::from_stack(context.state, -1)); std::ostringstream error_stream; error_stream << error_msg; - throw util::exception("ERROR occurred in profile script:\n" + error_stream.str()); + throw util::exception("ERROR occurred in profile script:\n" + error_stream.str() + + SOURCE_REF); } context.has_turn_penalty_function = util::luaFunctionExists(context.state, "turn_function"); @@ -360,7 +362,7 @@ std::vector LuaScriptingEnvironment::GetNameSuffixList() } catch (const luabind::error &er) { - util::SimpleLogger().Write(logWARNING) << er.what(); + util::Log(logWARNING) << er.what(); } return suffixes_vector; @@ -406,7 +408,7 @@ int32_t LuaScriptingEnvironment::GetTurnPenalty(const double angle) } catch (const luabind::error &er) { - util::SimpleLogger().Write(logWARNING) << er.what(); + util::Log(logWARNING) << er.what(); } } return 0; diff --git a/src/server/request_handler.cpp b/src/server/request_handler.cpp index 4a3a013e199..ed606d359af 100644 --- a/src/server/request_handler.cpp +++ b/src/server/request_handler.cpp @@ -6,7 +6,7 @@ #include "server/http/request.hpp" #include "util/json_renderer.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/string_util.hpp" #include "util/timing_util.hpp" #include "util/typedefs.hpp" @@ -42,7 +42,7 @@ void RequestHandler::HandleRequest(const http::request ¤t_request, http::r if (!service_handler) { current_reply = http::reply::stock_reply(http::reply::internal_server_error); - util::SimpleLogger().Write(logWARNING) << "No service handler registered." << std::endl; + util::Log(logWARNING) << "No service handler registered." << std::endl; return; } @@ -52,7 +52,7 @@ void RequestHandler::HandleRequest(const http::request ¤t_request, http::r TIMER_START(request_duration); std::string request_string; util::URIDecode(current_request.uri, request_string); - util::SimpleLogger().Write(logDEBUG) << "req: " << request_string; + util::Log(logDEBUG) << "req: " << request_string; auto api_iterator = request_string.begin(); auto maybe_parsed_url = api::parseURL(api_iterator, request_string.end()); @@ -125,7 +125,7 @@ void RequestHandler::HandleRequest(const http::request ¤t_request, http::r { // deactivated as GCC apparently does not implement that, not even in 4.9 // std::time_t t = std::time(nullptr); - // util::SimpleLogger().Write() << std::put_time(std::localtime(&t), "%m-%d-%Y + // util::Log() << std::put_time(std::localtime(&t), "%m-%d-%Y // %H:%M:%S") << // " " << current_request.endpoint.to_string() << " " << // current_request.referrer << ( 0 == current_request.referrer.length() ? "- " :" ") @@ -140,25 +140,26 @@ void RequestHandler::HandleRequest(const http::request ¤t_request, http::r ltime = time(nullptr); time_stamp = localtime(<ime); // log timestamp - util::SimpleLogger().Write() - << (time_stamp->tm_mday < 10 ? "0" : "") << time_stamp->tm_mday << "-" - << (time_stamp->tm_mon + 1 < 10 ? "0" : "") << (time_stamp->tm_mon + 1) << "-" - << 1900 + time_stamp->tm_year << " " << (time_stamp->tm_hour < 10 ? "0" : "") - << time_stamp->tm_hour << ":" << (time_stamp->tm_min < 10 ? "0" : "") - << time_stamp->tm_min << ":" << (time_stamp->tm_sec < 10 ? "0" : "") - << time_stamp->tm_sec << " " << TIMER_MSEC(request_duration) << "ms " - << current_request.endpoint.to_string() << " " << current_request.referrer - << (0 == current_request.referrer.length() ? "- " : " ") << current_request.agent - << (0 == current_request.agent.length() ? "- " : " ") << current_reply.status - << " " // - << request_string; + util::Log() << (time_stamp->tm_mday < 10 ? "0" : "") << time_stamp->tm_mday << "-" + << (time_stamp->tm_mon + 1 < 10 ? "0" : "") << (time_stamp->tm_mon + 1) + << "-" << 1900 + time_stamp->tm_year << " " + << (time_stamp->tm_hour < 10 ? "0" : "") << time_stamp->tm_hour << ":" + << (time_stamp->tm_min < 10 ? "0" : "") << time_stamp->tm_min << ":" + << (time_stamp->tm_sec < 10 ? "0" : "") << time_stamp->tm_sec << " " + << TIMER_MSEC(request_duration) << "ms " + << current_request.endpoint.to_string() << " " << current_request.referrer + << (0 == current_request.referrer.length() ? "- " : " ") + << current_request.agent + << (0 == current_request.agent.length() ? "- " : " ") + << current_reply.status << " " // + << request_string; } } catch (const std::exception &e) { current_reply = http::reply::stock_reply(http::reply::internal_server_error); - util::SimpleLogger().Write(logWARNING) << "[server error] code: " << e.what() - << ", uri: " << current_request.uri; + util::Log(logWARNING) << "[server error] code: " << e.what() + << ", uri: " << current_request.uri; } } } diff --git a/src/storage/storage.cpp b/src/storage/storage.cpp index e270aaff52f..47c00fda8f4 100644 --- a/src/storage/storage.cpp +++ b/src/storage/storage.cpp @@ -14,12 +14,13 @@ #include "engine/datafacade/datafacade_base.hpp" #include "util/coordinate.hpp" #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/fingerprint.hpp" #include "util/io.hpp" +#include "util/log.hpp" #include "util/packed_vector.hpp" #include "util/range_table.hpp" #include "util/shared_memory_vector_wrapper.hpp" -#include "util/simple_logger.hpp" #include "util/static_graph.hpp" #include "util/static_rtree.hpp" #include "util/typedefs.hpp" @@ -105,7 +106,7 @@ Storage::ReturnCode Storage::Run(int max_wait) { if (!current_regions_lock.try_lock()) { - util::SimpleLogger().Write(logWARNING) << "A data update is in progress"; + util::Log(logWARNING) << "A data update is in progress"; return ReturnCode::Error; } } @@ -122,7 +123,7 @@ Storage::ReturnCode Storage::Run(int max_wait) const bool lock_flags = MCL_CURRENT | MCL_FUTURE; if (-1 == mlockall(lock_flags)) { - util::SimpleLogger().Write(logWARNING) << "Could not request RAM lock"; + util::Log(logWARNING) << "Could not request RAM lock"; } #endif @@ -132,12 +133,12 @@ Storage::ReturnCode Storage::Run(int max_wait) if (max_wait > 0) { - util::SimpleLogger().Write() << "Waiting for " << max_wait - << " second for all queries on the old dataset to finish:"; + util::Log() << "Waiting for " << max_wait + << " second for all queries on the old dataset to finish:"; } else { - util::SimpleLogger().Write() << "Waiting for all queries on the old dataset to finish:"; + util::Log() << "Waiting for all queries on the old dataset to finish:"; } boost::interprocess::scoped_lock regions_lock( @@ -148,8 +149,8 @@ Storage::ReturnCode Storage::Run(int max_wait) if (!regions_lock.timed_lock(boost::posix_time::microsec_clock::universal_time() + boost::posix_time::seconds(max_wait))) { - util::SimpleLogger().Write(logWARNING) << "Queries did not finish in " << max_wait - << " seconds. Claiming the lock by force."; + util::Log(logWARNING) << "Queries did not finish in " << max_wait + << " seconds. Claiming the lock by force."; // WARNING: if queries are still using the old dataset they might crash if (regions_layout.old_layout_region == LAYOUT_1) { @@ -170,16 +171,18 @@ Storage::ReturnCode Storage::Run(int max_wait) { regions_lock.lock(); } - util::SimpleLogger().Write() << "Ok."; + util::Log() << "Ok."; // since we can't change the size of a shared memory regions we delete and reallocate if (SharedMemory::RegionExists(layout_region) && !SharedMemory::Remove(layout_region)) { - throw util::exception("Could not remove " + regionToString(layout_region)); + throw util::exception("Could not remove shared memory region " + + regionToString(layout_region) + SOURCE_REF); } if (SharedMemory::RegionExists(data_region) && !SharedMemory::Remove(data_region)) { - throw util::exception("Could not remove " + regionToString(data_region)); + throw util::exception("Could not remove shared memory region " + + regionToString(data_region) + SOURCE_REF); } // Allocate a memory layout in shared memory @@ -189,8 +192,8 @@ Storage::ReturnCode Storage::Run(int max_wait) PopulateLayout(*shared_layout_ptr); // allocate shared memory block - util::SimpleLogger().Write() << "allocating shared memory of " - << shared_layout_ptr->GetSizeOfLayout() << " bytes"; + util::Log() << "allocating shared memory of " << shared_layout_ptr->GetSizeOfLayout() + << " bytes"; auto shared_memory = makeSharedMemory(data_region, shared_layout_ptr->GetSizeOfLayout(), true); char *shared_memory_ptr = static_cast(shared_memory->Ptr()); @@ -207,8 +210,7 @@ Storage::ReturnCode Storage::Run(int max_wait) if (max_wait > 0) { - util::SimpleLogger().Write() << "Waiting for " << max_wait - << " seconds to write new dataset timestamp"; + util::Log() << "Waiting for " << max_wait << " seconds to write new dataset timestamp"; auto end_time = boost::posix_time::microsec_clock::universal_time() + boost::posix_time::seconds(max_wait); current_regions_exclusive_lock = @@ -217,9 +219,8 @@ Storage::ReturnCode Storage::Run(int max_wait) if (!current_regions_exclusive_lock.owns()) { - util::SimpleLogger().Write(logWARNING) << "Aquiring the lock timed out after " - << max_wait - << " seconds. Claiming the lock by force."; + util::Log(logWARNING) << "Aquiring the lock timed out after " << max_wait + << " seconds. Claiming the lock by force."; current_regions_lock.unlock(); current_regions_lock.release(); storage::SharedBarriers::resetCurrentRegions(); @@ -228,18 +229,18 @@ Storage::ReturnCode Storage::Run(int max_wait) } else { - util::SimpleLogger().Write() << "Waiting to write new dataset timestamp"; + util::Log() << "Waiting to write new dataset timestamp"; current_regions_exclusive_lock = boost::interprocess::scoped_lock( std::move(current_regions_lock)); } - util::SimpleLogger().Write() << "Ok."; + util::Log() << "Ok."; data_timestamp_ptr->layout = layout_region; data_timestamp_ptr->data = data_region; data_timestamp_ptr->timestamp += 1; } - util::SimpleLogger().Write() << "All data loaded."; + util::Log() << "All data loaded."; return ReturnCode::Ok; } @@ -260,7 +261,7 @@ void Storage::PopulateLayout(DataLayout &layout) { // collect number of elements to store in shared memory object - util::SimpleLogger().Write() << "load names from: " << config.names_data_path; + util::Log() << "load names from: " << config.names_data_path; // number of entries in name index io::FileReader name_file(config.names_data_path, io::FileReader::HasNoFingerprint); diff --git a/src/storage/storage_config.cpp b/src/storage/storage_config.cpp index c1edaf6f790..221c27c9ec3 100644 --- a/src/storage/storage_config.cpp +++ b/src/storage/storage_config.cpp @@ -1,5 +1,5 @@ #include "storage/storage_config.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include @@ -43,7 +43,7 @@ bool StorageConfig::IsValid() const { if (!boost::filesystem::is_regular_file(*path)) { - util::SimpleLogger().Write(logWARNING) << "Missing/Broken File: " << path->string(); + util::Log(logWARNING) << "Missing/Broken File: " << path->string(); success = false; } } diff --git a/src/tools/components.cpp b/src/tools/components.cpp index 9681d4985ea..5c8a4ffaadc 100644 --- a/src/tools/components.cpp +++ b/src/tools/components.cpp @@ -2,9 +2,10 @@ #include "util/coordinate_calculation.hpp" #include "util/dynamic_graph.hpp" #include "util/exception.hpp" +#include "util/exception_utils.hpp" #include "util/fingerprint.hpp" #include "util/graph_loader.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/static_graph.hpp" #include "util/typedefs.hpp" @@ -102,7 +103,7 @@ int main(int argc, char *argv[]) // enable logging if (argc < 2) { - osrm::util::SimpleLogger().Write(logWARNING) << "usage:\n" << argv[0] << " "; + osrm::util::Log(logWARNING) << "usage:\n" << argv[0] << " "; return EXIT_FAILURE; } @@ -115,14 +116,12 @@ int main(int argc, char *argv[]) graph_edge_list.clear(); graph_edge_list.shrink_to_fit(); - osrm::util::SimpleLogger().Write() << "Starting SCC graph traversal"; + osrm::util::Log() << "Starting SCC graph traversal"; auto tarjan = std::make_unique>(graph); tarjan->Run(); - osrm::util::SimpleLogger().Write() << "identified: " << tarjan->GetNumberOfComponents() - << " many components"; - osrm::util::SimpleLogger().Write() << "identified " << tarjan->GetSizeOneCount() - << " size 1 SCCs"; + osrm::util::Log() << "identified: " << tarjan->GetNumberOfComponents() << " many components"; + osrm::util::Log() << "identified " << tarjan->GetSizeOneCount() << " size 1 SCCs"; // output TIMER_START(SCC_RUN_SETUP); @@ -138,13 +137,13 @@ int main(int argc, char *argv[]) auto *po_driver = OGRSFDriverRegistrar::GetRegistrar()->GetDriverByName(psz_driver_name); if (nullptr == po_driver) { - throw osrm::util::exception("ESRI Shapefile driver not available"); + throw osrm::util::exception("ESRI Shapefile driver not available" + SOURCE_REF); } auto *po_datasource = po_driver->CreateDataSource("component.shp", nullptr); if (nullptr == po_datasource) { - throw osrm::util::exception("Creation of output file failed"); + throw osrm::util::exception("Creation of output file failed" + SOURCE_REF); } auto *po_srs = new OGRSpatialReference(); @@ -154,55 +153,62 @@ int main(int argc, char *argv[]) if (nullptr == po_layer) { - throw osrm::util::exception("Layer creation failed."); + throw osrm::util::exception("Layer creation failed." + SOURCE_REF); } TIMER_STOP(SCC_RUN_SETUP); - osrm::util::SimpleLogger().Write() << "shapefile setup took " - << TIMER_MSEC(SCC_RUN_SETUP) / 1000. << "s"; + osrm::util::Log() << "shapefile setup took " << TIMER_MSEC(SCC_RUN_SETUP) / 1000. << "s"; - uint64_t total_network_length = 0; - osrm::util::Percent percentage(graph->GetNumberOfNodes()); TIMER_START(SCC_OUTPUT); - for (const NodeID source : osrm::util::irange(0u, graph->GetNumberOfNodes())) + uint64_t total_network_length = 0; { - percentage.PrintIncrement(); - for (const auto current_edge : graph->GetAdjacentEdgeRange(source)) + osrm::util::UnbufferedLog log; + log << "Constructing geometry "; + osrm::util::Percent percentage(log, graph->GetNumberOfNodes()); + for (const NodeID source : osrm::util::irange(0u, graph->GetNumberOfNodes())) { - const auto target = graph->GetTarget(current_edge); - - if (source < target || SPECIAL_EDGEID == graph->FindEdge(target, source)) + percentage.PrintIncrement(); + for (const auto current_edge : graph->GetAdjacentEdgeRange(source)) { - total_network_length += - 100 * osrm::util::coordinate_calculation::greatCircleDistance( - coordinate_list[source], coordinate_list[target]); + const auto target = graph->GetTarget(current_edge); - BOOST_ASSERT(current_edge != SPECIAL_EDGEID); - BOOST_ASSERT(source != SPECIAL_NODEID); - BOOST_ASSERT(target != SPECIAL_NODEID); + if (source < target || SPECIAL_EDGEID == graph->FindEdge(target, source)) + { + total_network_length += + 100 * osrm::util::coordinate_calculation::greatCircleDistance( + coordinate_list[source], coordinate_list[target]); - const unsigned size_of_containing_component = - std::min(tarjan->GetComponentSize(tarjan->GetComponentID(source)), - tarjan->GetComponentSize(tarjan->GetComponentID(target))); + BOOST_ASSERT(current_edge != SPECIAL_EDGEID); + BOOST_ASSERT(source != SPECIAL_NODEID); + BOOST_ASSERT(target != SPECIAL_NODEID); - // edges that end on bollard nodes may actually be in two distinct components - if (size_of_containing_component < 1000) - { - OGRLineString line_string; - line_string.addPoint( - static_cast(osrm::util::toFloating(coordinate_list[source].lon)), - static_cast(osrm::util::toFloating(coordinate_list[source].lat))); - line_string.addPoint( - static_cast(osrm::util::toFloating(coordinate_list[target].lon)), - static_cast(osrm::util::toFloating(coordinate_list[target].lat))); - - OGRFeature *po_feature = OGRFeature::CreateFeature(po_layer->GetLayerDefn()); - - po_feature->SetGeometry(&line_string); - if (OGRERR_NONE != po_layer->CreateFeature(po_feature)) + const unsigned size_of_containing_component = + std::min(tarjan->GetComponentSize(tarjan->GetComponentID(source)), + tarjan->GetComponentSize(tarjan->GetComponentID(target))); + + // edges that end on bollard nodes may actually be in two distinct components + if (size_of_containing_component < 1000) { - throw osrm::util::exception("Failed to create feature in shapefile."); + OGRLineString line_string; + line_string.addPoint(static_cast(osrm::util::toFloating( + coordinate_list[source].lon)), + static_cast(osrm::util::toFloating( + coordinate_list[source].lat))); + line_string.addPoint(static_cast(osrm::util::toFloating( + coordinate_list[target].lon)), + static_cast(osrm::util::toFloating( + coordinate_list[target].lat))); + + OGRFeature *po_feature = + OGRFeature::CreateFeature(po_layer->GetLayerDefn()); + + po_feature->SetGeometry(&line_string); + if (OGRERR_NONE != po_layer->CreateFeature(po_feature)) + { + throw osrm::util::exception("Failed to create feature in shapefile." + + SOURCE_REF); + } + OGRFeature::DestroyFeature(po_feature); } - OGRFeature::DestroyFeature(po_feature); } } } @@ -210,13 +216,11 @@ int main(int argc, char *argv[]) OGRSpatialReference::DestroySpatialReference(po_srs); OGRDataSource::DestroyDataSource(po_datasource); TIMER_STOP(SCC_OUTPUT); - osrm::util::SimpleLogger().Write() - << "generating output took: " << TIMER_MSEC(SCC_OUTPUT) / 1000. << "s"; + osrm::util::Log() << "generating output took: " << TIMER_MSEC(SCC_OUTPUT) / 1000. << "s"; - osrm::util::SimpleLogger().Write() - << "total network distance: " << static_cast(total_network_length / 100 / 1000.) - << " km"; + osrm::util::Log() << "total network distance: " + << static_cast(total_network_length / 100 / 1000.) << " km"; - osrm::util::SimpleLogger().Write() << "finished component analysis"; + osrm::util::Log() << "finished component analysis"; return EXIT_SUCCESS; } diff --git a/src/tools/contract.cpp b/src/tools/contract.cpp index bc7b3a7df5f..2521e80d49e 100644 --- a/src/tools/contract.cpp +++ b/src/tools/contract.cpp @@ -1,6 +1,7 @@ #include "contractor/contractor.hpp" #include "contractor/contractor_config.hpp" -#include "util/simple_logger.hpp" +#include "util/exception.hpp" +#include "util/log.hpp" #include "util/version.hpp" #include @@ -92,19 +93,19 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig } catch (const boost::program_options::error &e) { - util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); + util::Log(logERROR) << e.what(); return return_code::fail; } if (option_variables.count("version")) { - util::SimpleLogger().Write() << OSRM_VERSION; + std::cout << OSRM_VERSION << std::endl; return return_code::exit; } if (option_variables.count("help")) { - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return return_code::exit; } @@ -112,7 +113,7 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig if (!option_variables.count("input")) { - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return return_code::fail; } @@ -121,6 +122,7 @@ return_code parseArguments(int argc, char *argv[], contractor::ContractorConfig int main(int argc, char *argv[]) try { + util::LogPolicy::GetInstance().Unmute(); contractor::ContractorConfig contractor_config; @@ -140,7 +142,7 @@ int main(int argc, char *argv[]) try if (1 > contractor_config.requested_num_threads) { - util::SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger"; + util::Log(logERROR) << "Number of threads must be 1 or larger"; return EXIT_FAILURE; } @@ -148,21 +150,19 @@ int main(int argc, char *argv[]) try if (recommended_num_threads != contractor_config.requested_num_threads) { - util::SimpleLogger().Write(logWARNING) - << "The recommended number of threads is " << recommended_num_threads - << "! This setting may have performance side-effects."; + util::Log(logWARNING) << "The recommended number of threads is " << recommended_num_threads + << "! This setting may have performance side-effects."; } if (!boost::filesystem::is_regular_file(contractor_config.osrm_input_path)) { - util::SimpleLogger().Write(logWARNING) - << "Input file " << contractor_config.osrm_input_path.string() << " not found!"; + util::Log(logERROR) << "Input file " << contractor_config.osrm_input_path.string() + << " not found!"; return EXIT_FAILURE; } - util::SimpleLogger().Write() << "Input file: " - << contractor_config.osrm_input_path.filename().string(); - util::SimpleLogger().Write() << "Threads: " << contractor_config.requested_num_threads; + util::Log() << "Input file: " << contractor_config.osrm_input_path.filename().string(); + util::Log() << "Threads: " << contractor_config.requested_num_threads; tbb::task_scheduler_init init(contractor_config.requested_num_threads); @@ -170,8 +170,7 @@ int main(int argc, char *argv[]) try } catch (const std::bad_alloc &e) { - util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); - util::SimpleLogger().Write(logWARNING) - << "Please provide more memory or consider using a larger swapfile"; + util::Log(logERROR) << "[exception] " << e.what(); + util::Log(logERROR) << "Please provide more memory or consider using a larger swapfile"; return EXIT_FAILURE; } diff --git a/src/tools/extract.cpp b/src/tools/extract.cpp index 6d0104ae2a8..3068b016881 100644 --- a/src/tools/extract.cpp +++ b/src/tools/extract.cpp @@ -1,7 +1,7 @@ #include "extractor/extractor.hpp" #include "extractor/extractor_config.hpp" #include "extractor/scripting_environment_lua.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/version.hpp" #include @@ -84,19 +84,19 @@ return_code parseArguments(int argc, char *argv[], extractor::ExtractorConfig &e } catch (const boost::program_options::error &e) { - util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); + util::Log(logERROR) << e.what(); return return_code::fail; } if (option_variables.count("version")) { - util::SimpleLogger().Write() << OSRM_VERSION; + std::cout << OSRM_VERSION << std::endl; return return_code::exit; } if (option_variables.count("help")) { - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return return_code::exit; } @@ -104,7 +104,7 @@ return_code parseArguments(int argc, char *argv[], extractor::ExtractorConfig &e if (!option_variables.count("input")) { - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return return_code::exit; } @@ -132,21 +132,21 @@ int main(int argc, char *argv[]) try if (1 > extractor_config.requested_num_threads) { - util::SimpleLogger().Write(logWARNING) << "Number of threads must be 1 or larger"; + util::Log(logERROR) << "Number of threads must be 1 or larger"; return EXIT_FAILURE; } if (!boost::filesystem::is_regular_file(extractor_config.input_path)) { - util::SimpleLogger().Write(logWARNING) - << "Input file " << extractor_config.input_path.string() << " not found!"; + util::Log(logERROR) << "Input file " << extractor_config.input_path.string() + << " not found!"; return EXIT_FAILURE; } if (!boost::filesystem::is_regular_file(extractor_config.profile_path)) { - util::SimpleLogger().Write(logWARNING) - << "Profile " << extractor_config.profile_path.string() << " not found!"; + util::Log(logERROR) << "Profile " << extractor_config.profile_path.string() + << " not found!"; return EXIT_FAILURE; } @@ -157,8 +157,7 @@ int main(int argc, char *argv[]) try } catch (const std::bad_alloc &e) { - util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); - util::SimpleLogger().Write(logWARNING) - << "Please provide more memory or consider using a larger swapfile"; + util::Log(logERROR) << "[exception] " << e.what(); + util::Log(logERROR) << "Please provide more memory or consider using a larger swapfile"; return EXIT_FAILURE; -} +} \ No newline at end of file diff --git a/src/tools/io-benchmark.cpp b/src/tools/io-benchmark.cpp index ebbd27b26ea..598f23d20ab 100644 --- a/src/tools/io-benchmark.cpp +++ b/src/tools/io-benchmark.cpp @@ -1,5 +1,6 @@ #include "util/exception.hpp" -#include "util/simple_logger.hpp" +#include "util/exception_utils.hpp" +#include "util/log.hpp" #include "util/timing_util.hpp" #include @@ -53,24 +54,24 @@ int main(int argc, char *argv[]) { #ifdef __FreeBSD__ - osrm::util::SimpleLogger().Write() << "Not supported on FreeBSD"; + osrm::util::Log() << "Not supported on FreeBSD"; return 0; #endif #ifdef _WIN32 - osrm::util::SimpleLogger().Write() << "Not supported on Windows"; + osrm::util::Log() << "Not supported on Windows"; return 0; #else osrm::util::LogPolicy::GetInstance().Unmute(); if (1 == argc) { - osrm::util::SimpleLogger().Write(logWARNING) << "usage: " << argv[0] << " /path/on/device"; + osrm::util::Log(logWARNING) << "usage: " << argv[0] << " /path/on/device"; return -1; } test_path = boost::filesystem::path(argv[1]); test_path /= "osrm.tst"; - osrm::util::SimpleLogger().Write(logDEBUG) << "temporary file: " << test_path.string(); + osrm::util::Log(logDEBUG) << "temporary file: " << test_path.string(); // create files for testing if (2 == argc) @@ -78,7 +79,8 @@ int main(int argc, char *argv[]) // create file to test if (boost::filesystem::exists(test_path)) { - throw osrm::util::exception("Data file already exists"); + throw osrm::util::exception("Data file already exists: " + test_path.string() + + SOURCE_REF); } int *random_array = new int[osrm::tools::NUMBER_OF_ELEMENTS]; @@ -97,34 +99,33 @@ int main(int argc, char *argv[]) open(test_path.string().c_str(), O_CREAT | O_TRUNC | O_WRONLY | O_SYNC, S_IRWXU); if (-1 == file_desc) { - throw osrm::util::exception("Could not open random data file"); + throw osrm::util::exception("Could not open random data file" + test_path.string() + + SOURCE_REF); } TIMER_START(write_1gb); int ret = write(file_desc, random_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); if (0 > ret) { - throw osrm::util::exception("could not write random data file"); + throw osrm::util::exception("could not write random data file" + test_path.string() + + SOURCE_REF); } TIMER_STOP(write_1gb); close(file_desc); #endif delete[] random_array; - osrm::util::SimpleLogger().Write(logDEBUG) << "writing raw 1GB took " - << TIMER_SEC(write_1gb) << "s"; - osrm::util::SimpleLogger().Write() << "raw write performance: " << std::setprecision(5) - << std::fixed << 1024 * 1024 / TIMER_SEC(write_1gb) - << "MB/sec"; - - osrm::util::SimpleLogger().Write(logDEBUG) - << "finished creation of random data. Flush disk cache now!"; + osrm::util::Log(logDEBUG) << "writing raw 1GB took " << TIMER_SEC(write_1gb) << "s"; + osrm::util::Log() << "raw write performance: " << std::setprecision(5) << std::fixed + << 1024 * 1024 / TIMER_SEC(write_1gb) << "MB/sec"; + + osrm::util::Log(logDEBUG) << "finished creation of random data. Flush disk cache now!"; } else { // Run Non-Cached I/O benchmarks if (!boost::filesystem::exists(test_path)) { - throw osrm::util::exception("data file does not exist"); + throw osrm::util::exception("data file does not exist" + SOURCE_REF); } // volatiles do not get optimized @@ -143,7 +144,7 @@ int main(int argc, char *argv[]) int file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC); if (-1 == file_desc) { - osrm::util::SimpleLogger().Write(logDEBUG) << "opened, error: " << strerror(errno); + osrm::util::Log(logDEBUG) << "opened, error: " << strerror(errno); return -1; } char *raw_array = (char *)memalign(512, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); @@ -156,22 +157,19 @@ int main(int argc, char *argv[]) #endif #ifdef __linux__ int ret = read(file_desc, raw_array, osrm::tools::NUMBER_OF_ELEMENTS * sizeof(unsigned)); - osrm::util::SimpleLogger().Write(logDEBUG) << "read " << ret - << " bytes, error: " << strerror(errno); + osrm::util::Log(logDEBUG) << "read " << ret << " bytes, error: " << strerror(errno); close(file_desc); file_desc = open(test_path.string().c_str(), O_RDONLY | O_DIRECT | O_SYNC); - osrm::util::SimpleLogger().Write(logDEBUG) << "opened, error: " << strerror(errno); + osrm::util::Log(logDEBUG) << "opened, error: " << strerror(errno); #endif TIMER_STOP(read_1gb); - osrm::util::SimpleLogger().Write(logDEBUG) << "reading raw 1GB took " << TIMER_SEC(read_1gb) - << "s"; - osrm::util::SimpleLogger().Write() << "raw read performance: " << std::setprecision(5) - << std::fixed << 1024 * 1024 / TIMER_SEC(read_1gb) - << "MB/sec"; + osrm::util::Log(logDEBUG) << "reading raw 1GB took " << TIMER_SEC(read_1gb) << "s"; + osrm::util::Log() << "raw read performance: " << std::setprecision(5) << std::fixed + << 1024 * 1024 / TIMER_SEC(read_1gb) << "MB/sec"; std::vector timing_results_raw_random; - osrm::util::SimpleLogger().Write(logDEBUG) << "running 1000 random I/Os of 4KB"; + osrm::util::Log(logDEBUG) << "running 1000 random I/Os of 4KB"; #ifdef __APPLE__ fseek(fd, 0, SEEK_SET); @@ -206,21 +204,21 @@ int main(int argc, char *argv[]) TIMER_STOP(random_access); if (((off_t)-1) == ret1) { - osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; - osrm::util::SimpleLogger().Write(logWARNING) << "seek error " << strerror(errno); - throw osrm::util::exception("seek error"); + osrm::util::Log(logWARNING) << "offset: " << current_offset; + osrm::util::Log(logWARNING) << "seek error " << strerror(errno); + throw osrm::util::exception("seek error" + SOURCE_REF); } if (-1 == ret2) { - osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; - osrm::util::SimpleLogger().Write(logWARNING) << "read error " << strerror(errno); - throw osrm::util::exception("read error"); + osrm::util::Log(logWARNING) << "offset: " << current_offset; + osrm::util::Log(logWARNING) << "read error " << strerror(errno); + throw osrm::util::exception("read error" + SOURCE_REF); } timing_results_raw_random.push_back(TIMER_SEC(random_access)); } // Do statistics - osrm::util::SimpleLogger().Write(logDEBUG) << "running raw random I/O statistics"; + osrm::util::Log(logDEBUG) << "running raw random I/O statistics"; std::ofstream random_csv("random.csv", std::ios::trunc); for (unsigned i = 0; i < timing_results_raw_random.size(); ++i) { @@ -228,12 +226,12 @@ int main(int argc, char *argv[]) } osrm::tools::runStatistics(timing_results_raw_random, stats); - osrm::util::SimpleLogger().Write() << "raw random I/O: " << std::setprecision(5) - << std::fixed << "min: " << stats.min << "ms, " - << "mean: " << stats.mean << "ms, " - << "med: " << stats.med << "ms, " - << "max: " << stats.max << "ms, " - << "dev: " << stats.dev << "ms"; + osrm::util::Log() << "raw random I/O: " << std::setprecision(5) << std::fixed + << "min: " << stats.min << "ms, " + << "mean: " << stats.mean << "ms, " + << "med: " << stats.med << "ms, " + << "max: " << stats.max << "ms, " + << "dev: " << stats.dev << "ms"; std::vector timing_results_raw_seq; #ifdef __APPLE__ @@ -266,15 +264,15 @@ int main(int argc, char *argv[]) TIMER_STOP(read_every_100); if (((off_t)-1) == ret1) { - osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; - osrm::util::SimpleLogger().Write(logWARNING) << "seek error " << strerror(errno); - throw osrm::util::exception("seek error"); + osrm::util::Log(logWARNING) << "offset: " << current_offset; + osrm::util::Log(logWARNING) << "seek error " << strerror(errno); + throw osrm::util::exception("seek error" + SOURCE_REF); } if (-1 == ret2) { - osrm::util::SimpleLogger().Write(logWARNING) << "offset: " << current_offset; - osrm::util::SimpleLogger().Write(logWARNING) << "read error " << strerror(errno); - throw osrm::util::exception("read error"); + osrm::util::Log(logWARNING) << "offset: " << current_offset; + osrm::util::Log(logWARNING) << "read error " << strerror(errno); + throw osrm::util::exception("read error" + SOURCE_REF); } timing_results_raw_seq.push_back(TIMER_SEC(read_every_100)); } @@ -288,7 +286,7 @@ int main(int argc, char *argv[]) close(file_desc); #endif // Do statistics - osrm::util::SimpleLogger().Write(logDEBUG) << "running sequential I/O statistics"; + osrm::util::Log(logDEBUG) << "running sequential I/O statistics"; // print simple statistics: min, max, median, variance std::ofstream seq_csv("sequential.csv", std::ios::trunc); for (unsigned i = 0; i < timing_results_raw_seq.size(); ++i) @@ -296,17 +294,17 @@ int main(int argc, char *argv[]) seq_csv << i << ", " << timing_results_raw_seq[i] << std::endl; } osrm::tools::runStatistics(timing_results_raw_seq, stats); - osrm::util::SimpleLogger().Write() << "raw sequential I/O: " << std::setprecision(5) - << std::fixed << "min: " << stats.min << "ms, " - << "mean: " << stats.mean << "ms, " - << "med: " << stats.med << "ms, " - << "max: " << stats.max << "ms, " - << "dev: " << stats.dev << "ms"; + osrm::util::Log() << "raw sequential I/O: " << std::setprecision(5) << std::fixed + << "min: " << stats.min << "ms, " + << "mean: " << stats.mean << "ms, " + << "med: " << stats.med << "ms, " + << "max: " << stats.max << "ms, " + << "dev: " << stats.dev << "ms"; if (boost::filesystem::exists(test_path)) { boost::filesystem::remove(test_path); - osrm::util::SimpleLogger().Write(logDEBUG) << "removing temporary files"; + osrm::util::Log(logDEBUG) << "removing temporary files"; } } return EXIT_SUCCESS; diff --git a/src/tools/routed.cpp b/src/tools/routed.cpp index 4e62a2e00ad..6870711c46e 100644 --- a/src/tools/routed.cpp +++ b/src/tools/routed.cpp @@ -1,5 +1,5 @@ #include "server/server.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/version.hpp" #include "osrm/engine_config.hpp" @@ -136,19 +136,19 @@ inline unsigned generateServerProgramOptions(const int argc, } catch (const boost::program_options::error &e) { - util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); + util::Log(logERROR) << e.what(); return INIT_FAILED; } if (option_variables.count("version")) { - util::SimpleLogger().Write() << OSRM_VERSION; + std::cout << OSRM_VERSION << std::endl; return INIT_OK_DO_NOT_START_ENGINE; } if (option_variables.count("help")) { - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return INIT_OK_DO_NOT_START_ENGINE; } @@ -164,11 +164,10 @@ inline unsigned generateServerProgramOptions(const int argc, } else if (use_shared_memory && option_variables.count("base")) { - util::SimpleLogger().Write(logWARNING) - << "Shared memory settings conflict with path settings."; + util::Log(logWARNING) << "Shared memory settings conflict with path settings."; } - util::SimpleLogger().Write() << visible_options; + std::cout << visible_options; return INIT_OK_DO_NOT_START_ENGINE; } @@ -211,69 +210,59 @@ int main(int argc, const char *argv[]) try { if (base_path.empty() != config.use_shared_memory) { - util::SimpleLogger().Write(logWARNING) << "Path settings and shared memory conflicts."; + util::Log(logWARNING) << "Path settings and shared memory conflicts."; } else { if (!boost::filesystem::is_regular_file(config.storage_config.ram_index_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.ram_index_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.ram_index_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.file_index_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.file_index_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.file_index_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.hsgr_data_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.hsgr_data_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.hsgr_data_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.nodes_data_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.nodes_data_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.nodes_data_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.edges_data_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.edges_data_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.edges_data_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.core_data_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.core_data_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.core_data_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.geometries_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.geometries_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.geometries_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.timestamp_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.timestamp_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.timestamp_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.datasource_names_path)) { - util::SimpleLogger().Write(logWARNING) - << config.storage_config.datasource_names_path << " is not found"; + util::Log(logWARNING) << config.storage_config.datasource_names_path + << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.datasource_indexes_path)) { - util::SimpleLogger().Write(logWARNING) - << config.storage_config.datasource_indexes_path << " is not found"; + util::Log(logWARNING) << config.storage_config.datasource_indexes_path + << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.names_data_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.names_data_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.names_data_path << " is not found"; } if (!boost::filesystem::is_regular_file(config.storage_config.properties_path)) { - util::SimpleLogger().Write(logWARNING) << config.storage_config.properties_path - << " is not found"; + util::Log(logWARNING) << config.storage_config.properties_path << " is not found"; } } return EXIT_FAILURE; @@ -287,7 +276,7 @@ int main(int argc, const char *argv[]) try if (should_lock && -1 == mlockall(MCL_CURRENT | MCL_FUTURE)) { could_lock = false; - util::SimpleLogger().Write(logWARNING) << "memory could not be locked to RAM"; + util::Log(logWARNING) << "memory could not be locked to RAM"; } } ~MemoryLocker() @@ -298,16 +287,16 @@ int main(int argc, const char *argv[]) try bool should_lock = false, could_lock = true; } memory_locker(config.use_shared_memory); #endif - util::SimpleLogger().Write() << "starting up engines, " << OSRM_VERSION; + util::Log() << "starting up engines, " << OSRM_VERSION; if (config.use_shared_memory) { - util::SimpleLogger().Write() << "Loading from shared memory"; + util::Log() << "Loading from shared memory"; } - util::SimpleLogger().Write() << "Threads: " << requested_thread_num; - util::SimpleLogger().Write() << "IP address: " << ip_address; - util::SimpleLogger().Write() << "IP port: " << ip_port; + util::Log() << "Threads: " << requested_thread_num; + util::Log() << "IP address: " << ip_address; + util::Log() << "IP port: " << ip_port; #ifndef _WIN32 int sig = 0; @@ -324,7 +313,7 @@ int main(int argc, const char *argv[]) try if (trial_run) { - util::SimpleLogger().Write() << "trial run, quitting after successful initialization"; + util::Log() << "trial run, quitting after successful initialization"; } else { @@ -343,7 +332,7 @@ int main(int argc, const char *argv[]) try sigaddset(&wait_mask, SIGQUIT); sigaddset(&wait_mask, SIGTERM); pthread_sigmask(SIG_BLOCK, &wait_mask, nullptr); - util::SimpleLogger().Write() << "running and waiting for requests"; + util::Log() << "running and waiting for requests"; if (std::getenv("SIGNAL_PARENT_WHEN_READY")) { kill(getppid(), SIGUSR1); @@ -353,12 +342,12 @@ int main(int argc, const char *argv[]) try // Set console control handler to allow server to be stopped. console_ctrl_function = std::bind(&server::Server::Stop, routing_server); SetConsoleCtrlHandler(console_ctrl_handler, TRUE); - util::SimpleLogger().Write() << "running and waiting for requests"; + util::Log() << "running and waiting for requests"; routing_server->Run(); #endif - util::SimpleLogger().Write() << "initiating shutdown"; + util::Log() << "initiating shutdown"; routing_server->Stop(); - util::SimpleLogger().Write() << "stopping threads"; + util::Log() << "stopping threads"; auto status = future.wait_for(std::chrono::seconds(2)); @@ -368,19 +357,18 @@ int main(int argc, const char *argv[]) try } else { - util::SimpleLogger().Write(logWARNING) << "Didn't exit within 2 seconds. Hard abort!"; + util::Log(logWARNING) << "Didn't exit within 2 seconds. Hard abort!"; server_task.reset(); // just kill it } } - util::SimpleLogger().Write() << "freeing objects"; + util::Log() << "freeing objects"; routing_server.reset(); - util::SimpleLogger().Write() << "shutdown completed"; + util::Log() << "shutdown completed"; } catch (const std::bad_alloc &e) { - util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); - util::SimpleLogger().Write(logWARNING) - << "Please provide more memory or consider using a larger swapfile"; + util::Log(logWARNING) << "[exception] " << e.what(); + util::Log(logWARNING) << "Please provide more memory or consider using a larger swapfile"; return EXIT_FAILURE; } diff --git a/src/tools/springclean.cpp b/src/tools/springclean.cpp index cb5794e9d01..2ae1ec62afe 100644 --- a/src/tools/springclean.cpp +++ b/src/tools/springclean.cpp @@ -2,7 +2,7 @@ #include "storage/shared_datatype.hpp" #include "storage/shared_memory.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" namespace osrm { @@ -36,14 +36,14 @@ void deleteRegion(const SharedDataType region) } }(); - util::SimpleLogger().Write(logWARNING) << "could not delete shared memory region " << name; + util::Log(logWARNING) << "could not delete shared memory region " << name; } } // find all existing shmem regions and remove them. void springclean() { - util::SimpleLogger().Write() << "spring-cleaning all shared memory regions"; + util::Log() << "spring-cleaning all shared memory regions"; deleteRegion(DATA_1); deleteRegion(LAYOUT_1); deleteRegion(DATA_2); @@ -56,19 +56,18 @@ void springclean() int main() { osrm::util::LogPolicy::GetInstance().Unmute(); - osrm::util::SimpleLogger().Write() << "Releasing all locks"; - osrm::util::SimpleLogger().Write() << "ATTENTION! BE CAREFUL!"; - osrm::util::SimpleLogger().Write() << "----------------------"; - osrm::util::SimpleLogger().Write() << "This tool may put osrm-routed into an undefined state!"; - osrm::util::SimpleLogger().Write() - << "Type 'Y' to acknowledge that you know what your are doing."; - osrm::util::SimpleLogger().Write() << "\n\nDo you want to purge all shared memory allocated " - << "by osrm-datastore? [type 'Y' to confirm]"; + osrm::util::Log() << "Releasing all locks"; + osrm::util::Log() << "ATTENTION! BE CAREFUL!"; + osrm::util::Log() << "----------------------"; + osrm::util::Log() << "This tool may put osrm-routed into an undefined state!"; + osrm::util::Log() << "Type 'Y' to acknowledge that you know what your are doing."; + osrm::util::Log() << "\n\nDo you want to purge all shared memory allocated " + << "by osrm-datastore? [type 'Y' to confirm]"; const auto letter = getchar(); if (letter != 'Y') { - osrm::util::SimpleLogger().Write() << "aborted."; + osrm::util::Log() << "aborted."; return EXIT_SUCCESS; } osrm::tools::springclean(); diff --git a/src/tools/store.cpp b/src/tools/store.cpp index 03e1df4ad7a..80b5145608b 100644 --- a/src/tools/store.cpp +++ b/src/tools/store.cpp @@ -1,6 +1,6 @@ #include "storage/storage.hpp" #include "util/exception.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include "util/typedefs.hpp" #include "util/version.hpp" @@ -49,7 +49,7 @@ bool generateDataStoreOptions(const int argc, // print help options if no infile is specified if (argc < 2) { - util::SimpleLogger().Write() << visible_options; + util::Log() << visible_options; return false; } @@ -66,19 +66,19 @@ bool generateDataStoreOptions(const int argc, } catch (const boost::program_options::error &e) { - util::SimpleLogger().Write(logWARNING) << "[error] " << e.what(); + util::Log(logERROR) << e.what(); return false; } if (option_variables.count("version")) { - util::SimpleLogger().Write() << OSRM_VERSION; + util::Log() << OSRM_VERSION; return false; } if (option_variables.count("help")) { - util::SimpleLogger().Write() << visible_options; + util::Log() << visible_options; return false; } @@ -100,7 +100,7 @@ int main(const int argc, const char *argv[]) try storage::StorageConfig config(base_path); if (!config.IsValid()) { - util::SimpleLogger().Write(logWARNING) << "Config contains invalid file paths. Exiting!"; + util::Log(logERROR) << "Config contains invalid file paths. Exiting!"; return EXIT_FAILURE; } storage::Storage storage(std::move(config)); @@ -115,8 +115,8 @@ int main(const int argc, const char *argv[]) try { if (retry_counter > 0) { - util::SimpleLogger().Write(logWARNING) << "Try number " << (retry_counter + 1) - << " to load the dataset."; + util::Log(logWARNING) << "Try number " << (retry_counter + 1) + << " to load the dataset."; } code = storage.Run(max_wait); retry_counter++; @@ -131,9 +131,8 @@ int main(const int argc, const char *argv[]) try } catch (const std::bad_alloc &e) { - util::SimpleLogger().Write(logWARNING) << "[exception] " << e.what(); - util::SimpleLogger().Write(logWARNING) - << "Please provide more memory or disable locking the virtual " - "address space (note: this makes OSRM swap, i.e. slow)"; + util::Log(logERROR) << "[exception] " << e.what(); + util::Log(logERROR) << "Please provide more memory or disable locking the virtual " + "address space (note: this makes OSRM swap, i.e. slow)"; return EXIT_FAILURE; -} +} \ No newline at end of file diff --git a/src/tools/unlock_all_mutexes.cpp b/src/tools/unlock_all_mutexes.cpp index 508777d6172..bdda90d3bb5 100644 --- a/src/tools/unlock_all_mutexes.cpp +++ b/src/tools/unlock_all_mutexes.cpp @@ -1,12 +1,12 @@ #include "storage/shared_barriers.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include int main() { osrm::util::LogPolicy::GetInstance().Unmute(); - osrm::util::SimpleLogger().Write() << "Releasing all locks"; + osrm::util::Log() << "Releasing all locks"; osrm::storage::SharedBarriers::resetCurrentRegions(); osrm::storage::SharedBarriers::resetRegions1(); diff --git a/src/util/coordinate.cpp b/src/util/coordinate.cpp index c39803791ab..ac4c2d6e3e7 100644 --- a/src/util/coordinate.cpp +++ b/src/util/coordinate.cpp @@ -1,7 +1,7 @@ #include "util/coordinate_calculation.hpp" #ifndef NDEBUG -#include "util/simple_logger.hpp" +#include "util/log.hpp" #endif #include "osrm/coordinate.hpp" diff --git a/src/util/log.cpp b/src/util/log.cpp new file mode 100644 index 00000000000..4b28c4c2f00 --- /dev/null +++ b/src/util/log.cpp @@ -0,0 +1,119 @@ +#include "util/log.hpp" +#include "util/isatty.hpp" +#include +#include +#include +#include + +namespace osrm +{ +namespace util +{ + +namespace +{ +static const char COL_RESET[]{"\x1b[0m"}; +static const char RED[]{"\x1b[31m"}; +static const char YELLOW[]{"\x1b[33m"}; +#ifndef NDEBUG +static const char MAGENTA[]{"\x1b[35m"}; +#endif +// static const char GREEN[] { "\x1b[32m"}; +// static const char BLUE[] { "\x1b[34m"}; +// static const char CYAN[] { "\x1b[36m"}; +} + +void LogPolicy::Unmute() { m_is_mute = false; } + +void LogPolicy::Mute() { m_is_mute = true; } + +bool LogPolicy::IsMute() const { return m_is_mute; } + +LogPolicy &LogPolicy::GetInstance() +{ + static LogPolicy runningInstance; + return runningInstance; +} + +Log::Log(LogLevel level_, std::ostream &ostream) : level(level_), stream(ostream) +{ + const bool is_terminal = IsStdoutATTY(); + std::lock_guard lock(get_mutex()); + switch (level) + { + case logWARNING: + stream << (is_terminal ? YELLOW : "") << "[warn] "; + break; + case logERROR: + stream << (is_terminal ? RED : "") << "[error] "; + break; + case logDEBUG: +#ifndef NDEBUG + stream << (is_terminal ? MAGENTA : "") << "[debug] "; +#endif + break; + default: // logINFO: + stream << "[info] "; + break; + } +} + +Log::Log(LogLevel level_) : Log(level_, buffer) {} + +std::mutex &Log::get_mutex() +{ + static std::mutex mtx; + return mtx; +} + +/** + * Close down this logging instance. + * This destructor is responsible for flushing any buffered data, + * and printing a newline character (each logger object is responsible for only one line) + * Because sub-classes can replace the `stream` object - we need to verify whether + * we're writing to std::cerr/cout, or whether we should write to the stream + */ +Log::~Log() +{ + std::lock_guard lock(get_mutex()); + const bool usestd = (&stream == &buffer); + if (!LogPolicy::GetInstance().IsMute()) + { + const bool is_terminal = IsStdoutATTY(); + if (usestd) + { + switch (level) + { + case logWARNING: + case logERROR: + std::cerr << buffer.str(); + std::cerr << (is_terminal ? COL_RESET : ""); + std::cerr << std::endl; + break; + case logDEBUG: +#ifdef NDEBUG + break; +#endif + case logINFO: + default: + std::cout << buffer.str(); + std::cout << (is_terminal ? COL_RESET : ""); + std::cout << std::endl; + break; + } + } + else + { + stream << (is_terminal ? COL_RESET : ""); + stream << std::endl; + } + } +} + +UnbufferedLog::UnbufferedLog(LogLevel level_) + : Log(level_, (level_ == logWARNING || level_ == logERROR) ? std::cerr : std::cout) +{ + stream.flags(std::ios_base::unitbuf); +} +} +} diff --git a/src/util/name_table.cpp b/src/util/name_table.cpp index 42437cc644e..082ac857182 100644 --- a/src/util/name_table.cpp +++ b/src/util/name_table.cpp @@ -1,6 +1,6 @@ #include "util/name_table.hpp" #include "util/exception.hpp" -#include "util/simple_logger.hpp" +#include "util/log.hpp" #include #include @@ -32,9 +32,8 @@ NameTable::NameTable(const std::string &filename) } else { - util::SimpleLogger().Write(logINFO) - << "list of street names is empty in construction of name table from: \"" << filename - << "\""; + util::Log() << "list of street names is empty in construction of name table from: \"" + << filename << "\""; } } diff --git a/src/util/simple_logger.cpp b/src/util/simple_logger.cpp deleted file mode 100644 index f582841ee0f..00000000000 --- a/src/util/simple_logger.cpp +++ /dev/null @@ -1,95 +0,0 @@ -#include "util/simple_logger.hpp" -#include "util/isatty.hpp" -#include -#include -#include -#include - -namespace osrm -{ -namespace util -{ - -namespace -{ -static const char COL_RESET[]{"\x1b[0m"}; -static const char RED[]{"\x1b[31m"}; -#ifndef NDEBUG -static const char YELLOW[]{"\x1b[33m"}; -#endif -// static const char GREEN[] { "\x1b[32m"}; -// static const char BLUE[] { "\x1b[34m"}; -// static const char MAGENTA[] { "\x1b[35m"}; -// static const char CYAN[] { "\x1b[36m"}; -} - -void LogPolicy::Unmute() { m_is_mute = false; } - -void LogPolicy::Mute() { m_is_mute = true; } - -bool LogPolicy::IsMute() const { return m_is_mute; } - -LogPolicy &LogPolicy::GetInstance() -{ - static LogPolicy runningInstance; - return runningInstance; -} - -SimpleLogger::SimpleLogger() : level(logINFO) {} - -std::mutex &SimpleLogger::get_mutex() -{ - static std::mutex mtx; - return mtx; -} - -std::ostringstream &SimpleLogger::Write(LogLevel lvl) noexcept -{ - std::lock_guard lock(get_mutex()); - level = lvl; - os << "["; - switch (level) - { - case logWARNING: - os << "warn"; - break; - case logDEBUG: -#ifndef NDEBUG - os << "debug"; -#endif - break; - default: // logINFO: - os << "info"; - break; - } - os << "] "; - return os; -} - -SimpleLogger::~SimpleLogger() -{ - std::lock_guard lock(get_mutex()); - if (!LogPolicy::GetInstance().IsMute()) - { - const bool is_terminal = IsStdoutATTY(); - switch (level) - { - case logWARNING: - std::cerr << (is_terminal ? RED : "") << os.str() << (is_terminal ? COL_RESET : "") - << std::endl; - break; - case logDEBUG: -#ifndef NDEBUG - std::cout << (is_terminal ? YELLOW : "") << os.str() << (is_terminal ? COL_RESET : "") - << std::endl; -#endif - break; - case logINFO: - default: - std::cout << os.str() << (is_terminal ? COL_RESET : "") << std::endl; - break; - } - } -} -} -} diff --git a/unit_tests/util/io.cpp b/unit_tests/util/io.cpp index 3836ee17388..16d6944fa17 100644 --- a/unit_tests/util/io.cpp +++ b/unit_tests/util/io.cpp @@ -43,9 +43,9 @@ BOOST_AUTO_TEST_CASE(io_nonexistent_file) } catch (const osrm::util::exception &e) { - std::cout << e.what() << std::endl; - BOOST_REQUIRE(std::string(e.what()) == - "Error opening non_existent_test_io.tmp"); + const std::string expected("Error opening non_existent_test_io.tmp"); + const std::string got(e.what()); + BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin())); } } @@ -71,9 +71,10 @@ BOOST_AUTO_TEST_CASE(file_too_small) } catch (const osrm::util::exception &e) { - std::cout << e.what() << std::endl; - BOOST_REQUIRE(std::string(e.what()) == - "Error reading from file_too_small_test_io.tmp: Unexpected end of file"); + const std::string expected( + "Error reading from file_too_small_test_io.tmp: Unexpected end of file"); + const std::string got(e.what()); + BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin())); } } @@ -98,9 +99,9 @@ BOOST_AUTO_TEST_CASE(io_corrupt_fingerprint) } catch (const osrm::util::exception &e) { - std::cout << e.what() << std::endl; - BOOST_REQUIRE(std::string(e.what()) == - "Fingerprint mismatch in corrupt_fingerprint_file_test_io.tmp"); + const std::string expected("Fingerprint mismatch in corrupt_fingerprint_file_test_io.tmp"); + const std::string got(e.what()); + BOOST_REQUIRE(std::equal(expected.begin(), expected.end(), got.begin())); } }