Skip to content

Commit

Permalink
simplifications
Browse files Browse the repository at this point in the history
  • Loading branch information
drexlerd committed Aug 16, 2024
1 parent c033e75 commit 7eeeadd
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 120 deletions.
87 changes: 45 additions & 42 deletions include/flatmemory/details/algorithms/hash.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,63 +26,66 @@
namespace flatmemory
{

// --------------
// Hash functions
// --------------
/**
* Forward declarations
*/

template<typename T>
inline void hash_combine(size_t& seed, const T& val)
{
seed ^= std::hash<T>()(val) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
inline void hash_combine(size_t& seed, const T& value);

template<>
inline void hash_combine(size_t& seed, const std::size_t& val)
{
seed ^= val + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
template<typename T, typename... Rest>
inline void hash_combine(size_t& seed, const Rest&... rest);

template<typename... Types>
inline size_t hash_combine(const Types&... args)
{
size_t seed = 0;
(hash_combine(seed, args), ...);
return seed;
template<typename... Ts>
inline size_t hash_combine(const Ts&... rest);
}

template<class Container>
inline std::size_t hash_container(const Container& container)
/**
* std::hash specializations
*/

/// @brief std::hash specialization for a forward range.
/// @tparam ForwardRange
template<std::ranges::input_range R>
struct std::hash<R>
{
using T = typename Container::value_type;
const auto hash_function = std::hash<T>();
std::size_t aggregated_hash = 0;
for (const auto& item : container)
size_t operator()(const R& range) const
{
const auto item_hash = hash_function(item);
hash_combine(aggregated_hash, item_hash);
std::size_t aggregated_hash = 0;
for (const auto& item : range)
{
flatmemory::hash_combine(aggregated_hash, item);
}
return aggregated_hash;
}
return aggregated_hash;
};

/**
* Definitions
*/

namespace flatmemory
{

template<typename T>
inline void hash_combine(size_t& seed, const T& value)
{
seed ^= std::hash<T>()(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}

template<class Iterator>
inline std::size_t hash_iteration(Iterator begin, Iterator end)
template<typename T, typename... Rest>
inline void hash_combine(size_t& seed, const Rest&... rest)
{
using T = typename std::iterator_traits<Iterator>::value_type;
const std::hash<T> hash_function;
std::size_t aggregated_hash = 0;
for (Iterator iter = begin; iter != end; ++iter)
{
const auto item_hash = hash_function(*iter);
hash_combine(aggregated_hash, item_hash);
}
return aggregated_hash;
(flatmemory::hash_combine(seed, rest), ...);
}

template<typename Container>
struct hash_container_type
template<typename... Ts>
inline size_t hash_combine(const Ts&... rest)
{
size_t operator()(const Container& container) const { return hash_container(container); }
};
size_t seed = 0;
(flatmemory::hash_combine(seed, rest), ...);
return seed;
}

}

Expand Down
4 changes: 4 additions & 0 deletions include/flatmemory/details/layout_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,14 @@ namespace flatmemory
/**
* Common data types
*/

using offset_type = uint32_t;

using buffer_size_type = uint32_t;

// It does not make sense the other way around, since potentially many offsets are stored within a single buffer.
static_assert(sizeof(buffer_size_type) >= sizeof(offset_type));

/**
* Compute padding needed to store an object with given alignment factor from the given position.
*/
Expand Down
23 changes: 11 additions & 12 deletions include/flatmemory/details/types/bitset.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1367,24 +1367,23 @@ void Builder<Bitset<Block, Tag>>::finish_impl()
template<IsUnsignedIntegral Block, typename Tag>
size_t Builder<Bitset<Block, Tag>>::finish_impl(ByteBuffer& out, size_t pos)
{
/* Write header info */
// Write default_bit_value
/* Write the default_bit_value. */
out.write(pos + BitsetLayout::default_bit_value_position, m_default_bit_value);
out.write_padding(pos + BitsetLayout::default_bit_value_end, BitsetLayout::default_bit_value_padding);

/* Write dynamic info */
buffer_size_type buffer_size = BitsetLayout::blocks_position;
size_t data_pos = BitsetLayout::blocks_position;

// Write blocks
buffer_size += m_blocks.finish(out, pos + buffer_size);
// Write final padding
buffer_size += m_buffer.write_padding(pos + buffer_size, calculate_amount_padding(buffer_size, BitsetLayout::final_alignment));
/* Write the blocks inline because there is no other data. */
data_pos += m_blocks.finish(out, pos + data_pos);

/* Write buffer size */
out.write(pos + BitsetLayout::buffer_size_position, buffer_size);
out.set_size(buffer_size);
/* Write the final padding. */
data_pos += m_buffer.write_padding(pos + data_pos, calculate_amount_padding(data_pos, BitsetLayout::final_alignment));

return buffer_size;
/* Write the size of the buffer to the beginning. */
out.write(pos + BitsetLayout::buffer_size_position, static_cast<buffer_size_type>(data_pos));
out.set_size(data_pos);

return data_pos;
}

template<IsUnsignedIntegral Block, typename Tag>
Expand Down
47 changes: 19 additions & 28 deletions include/flatmemory/details/types/tuple.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -445,38 +445,42 @@ template<IsTriviallyCopyableOrNonTrivialType... Ts>
template<size_t... Is>
size_t Builder<Tuple<Ts...>>::finish_iterative_impl(std::index_sequence<Is...>, ByteBuffer& out, size_t pos)
{
offset_type buffer_size = Layout<Tuple<Ts...>>::layout_data.element_datas_position;
size_t data_pos = Layout<Tuple<Ts...>>::layout_data.element_datas_position;
(
[&]
{
using T = element_type<Is>;
constexpr auto& element_data = Layout<Tuple<Ts...>>::layout_data.element_datas[Is];
constexpr bool is_trivial = IsTriviallyCopyable<T>;

if constexpr (is_trivial)
{
/* Write the data inline. */
auto& value = std::get<Is>(m_data);
out.write(pos + element_data.position, value);
out.write_padding(pos + element_data.end, element_data.padding);
}
else
{
// write offset
out.write(pos + element_data.position, buffer_size);
/* Write the data pos at the offset pos. */
out.write(pos + element_data.position, data_pos);
out.write_padding(pos + element_data.end, element_data.padding);

// write data
/* Write the data at offset */
auto& nested_builder = std::get<Is>(m_data);
buffer_size += nested_builder.finish(out, pos + buffer_size);
buffer_size += out.write_padding(pos + buffer_size, calculate_amount_padding(buffer_size, element_data.next_data_alignment));
data_pos += nested_builder.finish(out, pos + data_pos);
data_pos += out.write_padding(pos + data_pos, calculate_amount_padding(data_pos, element_data.next_data_alignment));
}
}(),
...);
// No need to write padding because if size=0 then no padding is needed and otherwise, if size>0 then the loop adds final padding.
/* Write buffer size */
out.write(pos + Layout<Tuple<Ts...>>::layout_data.buffer_size_position, static_cast<buffer_size_type>(buffer_size));
out.set_size(buffer_size);

return buffer_size;
// There is no need to write padding here because if size=0 then no padding is needed and otherwise, if size>0 then the loop adds final padding.

/* Write size of the buffer to the beginning. */
out.write(pos + Layout<Tuple<Ts...>>::layout_data.buffer_size_position, static_cast<buffer_size_type>(data_pos));
out.set_size(data_pos);

return data_pos;
}

template<IsTriviallyCopyableOrNonTrivialType... Ts>
Expand Down Expand Up @@ -576,20 +580,7 @@ template<size_t... Is>
size_t Builder<Tuple<Ts...>>::hash_helper(std::index_sequence<Is...>) const
{
size_t seed = Layout<Tuple<Ts...>>::size;
(
[&]
{
constexpr bool is_trivial = IsTriviallyCopyable<element_type<Is>>;
if constexpr (is_trivial)
{
hash_combine(seed, std::hash<element_type<Is>>()(get<Is>()));
}
else
{
hash_combine(seed, get<Is>().hash());
}
}(),
...);
([&] { flatmemory::hash_combine(seed, get<Is>()); }(), ...);
return seed;
}

Expand Down Expand Up @@ -670,8 +661,8 @@ template<IsTriviallyCopyableOrNonTrivialType... Ts>
template<std::size_t I>
decltype(auto) View<Tuple<Ts...>>::get()
{
static_assert(I < Layout<Tuple<Ts...>>::size);
assert(m_buf);
assert(I < Layout<Tuple<Ts...>>::size);
constexpr bool is_trivial = IsTriviallyCopyable<element_type<I>>;
if constexpr (is_trivial)
{
Expand All @@ -688,8 +679,8 @@ template<IsTriviallyCopyableOrNonTrivialType... Ts>
template<std::size_t I>
decltype(auto) View<Tuple<Ts...>>::get() const
{
static_assert(I < Layout<Tuple<Ts...>>::size);
assert(m_buf);
assert(I < Layout<Tuple<Ts...>>::size);
constexpr bool is_trivial = IsTriviallyCopyable<element_type<I>>;
if constexpr (is_trivial)
{
Expand Down Expand Up @@ -812,8 +803,8 @@ template<IsTriviallyCopyableOrNonTrivialType... Ts>
template<std::size_t I>
decltype(auto) ConstView<Tuple<Ts...>>::get() const
{
static_assert(I < Layout<Tuple<Ts...>>::size);
assert(m_buf);
assert(I < Layout<Tuple<Ts...>>::size);
constexpr bool is_trivial = IsTriviallyCopyable<element_type<I>>;
if constexpr (is_trivial)
{
Expand Down
64 changes: 26 additions & 38 deletions include/flatmemory/details/types/vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -416,50 +416,51 @@ void Builder<Vector<T>>::finish_impl()
template<IsTriviallyCopyableOrNonTrivialType T>
size_t Builder<Vector<T>>::finish_impl(ByteBuffer& out, size_t pos)
{
/* Write header info */
// Write vector size
/* Write the vector size */
out.write(pos + Layout<Vector<T>>::vector_size_position, m_data.size());
out.write_padding(pos + Layout<Vector<T>>::vector_size_end, Layout<Vector<T>>::vector_size_padding);

/* Write dynamic info */
offset_type buffer_size = Layout<Vector<T>>::vector_data_position;
// Write vector data
size_t data_pos = Layout<Vector<T>>::vector_data_position;

/* Write vector data */
constexpr bool is_trivial = IsTriviallyCopyable<T>;
if constexpr (is_trivial)
{
/* For trivial type we can write the data without additional padding. */
buffer_size += out.write(pos + buffer_size, reinterpret_cast<const uint8_t*>(m_data.data()), sizeof(T_) * m_data.size());
/* Write the data of the trivial type inline. */
data_pos += out.write(pos + data_pos, reinterpret_cast<const uint8_t*>(m_data.data()), sizeof(T_) * m_data.size());
}
else
{
/* For non-trivial type T, we store the offsets first */
// position of offset
offset_type offset_pos = Layout<Vector<T>>::vector_data_position;
/* Write the offset inline and data at offset. */
size_t offset_pos = Layout<Vector<T>>::vector_data_position;
/* Write sufficiently much padding before the data. */
size_t offset_end = offset_pos + m_data.size() * sizeof(offset_type);
size_t offset_padding = calculate_amount_padding(offset_end, Layout<T>::final_alignment);
out.write_padding(pos + offset_end, offset_padding);
// We have to add padding to ensure that the data is correctly aligned
buffer_size = offset_end + offset_padding;

/* Set data pos after the offset locations. */
data_pos = offset_end + offset_padding;
for (size_t i = 0; i < m_data.size(); ++i)
{
// write distance between written data position and offset position
offset_type distance = buffer_size - offset_pos;
/* Write the distance between written data pos and offset pos at the offset pos.
This allows for more efficient iterator logic.
*/
offset_type distance = data_pos - offset_pos;
offset_pos += out.write(pos + offset_pos, distance);

// write data
auto& nested_builder = m_data[i];
buffer_size += nested_builder.finish(out, pos + buffer_size);
buffer_size += out.write_padding(pos + buffer_size, calculate_amount_padding(buffer_size, Layout<Vector<T>>::final_alignment));
/* Write the data at offset. */
data_pos += m_data[i].finish(out, pos + data_pos);
data_pos += out.write_padding(pos + data_pos, calculate_amount_padding(data_pos, Layout<Vector<T>>::final_alignment));
}
}
// Write final padding to satisfy alignment requirements
buffer_size += out.write_padding(pos + buffer_size, calculate_amount_padding(buffer_size, Layout<Vector<T>>::final_alignment));
/* Write the final padding. */
data_pos += out.write_padding(pos + data_pos, calculate_amount_padding(data_pos, Layout<Vector<T>>::final_alignment));

/* Write buffer size */
out.write(pos + Layout<Vector<T>>::buffer_size_position, static_cast<buffer_size_type>(buffer_size));
out.set_size(buffer_size);
/* Write the size of the buffer to the beginning. */
out.write(pos + Layout<Vector<T>>::buffer_size_position, static_cast<buffer_size_type>(data_pos));
out.set_size(data_pos);

return buffer_size;
return data_pos;
}

template<IsTriviallyCopyableOrNonTrivialType T>
Expand Down Expand Up @@ -574,20 +575,7 @@ const Builder<Vector<T>>::T_& Builder<Vector<T>>::at(size_t pos) const
template<IsTriviallyCopyableOrNonTrivialType T>
size_t Builder<Vector<T>>::hash() const
{
constexpr bool is_trivial = IsTriviallyCopyable<T>;
if constexpr (is_trivial)
{
return hash_combine(hash_iteration(m_data.begin(), m_data.end()));
}
else
{
size_t seed = size();
for (auto iter = m_data.begin(); iter < m_data.end(); ++iter)
{
seed = hash_combine(seed, iter->hash());
}
return seed;
}
return flatmemory::hash_combine(m_data);
}

template<IsTriviallyCopyableOrNonTrivialType T>
Expand Down

0 comments on commit 7eeeadd

Please sign in to comment.