diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c index 124026cb676a4..01d2743444ec6 100644 --- a/drivers/md/dm-vdo/murmurhash3.c +++ b/drivers/md/dm-vdo/murmurhash3.c @@ -8,7 +8,7 @@ #include "murmurhash3.h" -#include +#include static inline u64 rotl64(u64 x, s8 r) { @@ -16,15 +16,6 @@ static inline u64 rotl64(u64 x, s8 r) } #define ROTL64(x, y) rotl64(x, y) -static __always_inline u64 getblock64(const u64 *p, int i) -{ - return le64_to_cpup(&p[i]); -} - -static __always_inline void putblock64(u64 *p, int i, u64 value) -{ - p[i] = cpu_to_le64(value); -} /* Finalization mix - force all bits of a hash block to avalanche */ @@ -50,6 +41,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) const u64 c1 = 0x87c37b91114253d5LLU; const u64 c2 = 0x4cf5ad432745937fLLU; + u64 *hash_out = out; + /* body */ const u64 *blocks = (const u64 *)(data); @@ -57,8 +50,8 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) int i; for (i = 0; i < nblocks; i++) { - u64 k1 = getblock64(blocks, i * 2 + 0); - u64 k2 = getblock64(blocks, i * 2 + 1); + u64 k1 = get_unaligned_le64(&blocks[i * 2]); + u64 k2 = get_unaligned_le64(&blocks[i * 2 + 1]); k1 *= c1; k1 = ROTL64(k1, 31); @@ -160,6 +153,6 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out) h1 += h2; h2 += h1; - putblock64((u64 *)out, 0, h1); - putblock64((u64 *)out, 1, h2); + put_unaligned_le64(h1, &hash_out[0]); + put_unaligned_le64(h2, &hash_out[1]); }