diff --git a/.gitmodules b/.gitmodules index 44d6d26f4..c4c09f170 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,3 +2,6 @@ path = tests/googletest url = https://github.com/google/googletest.git ignore = dirty +[submodule "third_party/rocksdb"] + path = third_party/rocksdb + url = https://github.com/facebook/rocksdb.git diff --git a/CMakeLists.txt b/CMakeLists.txt index f7ad51300..a8e47170d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -73,13 +73,35 @@ endforeach() message(CMAKE_CUDA_FLAGS="${CMAKE_CUDA_FLAGS}") +# Sub projects. +add_subdirectory(tests/googletest) + +function(add_subdirectory_rocksdb) + set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) + set(WITH_SNAPPY OFF) + set(WITH_LZ4 OFF) + set(WITH_ZLIB OFF) + set(WITH_ZSTD OFF) + set(WITH_GFLAGS OFF) + set(ROCKSDB_BUILD_SHARED OFF) + set(WITH_JNI OFF) + set(WITH_TESTS OFF) + set(WITH_BENCHMARK_TOOLS OFF) + set(WITH_CORE_TOOLS OFF) + set(WITH_TOOLS OFF) + set(WITH_ALL_TESTS OFF) + set(WITH_EXAMPLES OFF) + set(WITH_BENCHMARK OFF) + add_subdirectory(third_party/rocksdb) +endfunction() +add_subdirectory_rocksdb() + include_directories( ${PROJECT_SOURCE_DIR}/include + ${PROJECT_SOURCE_DIR}/third_party/rocksdb/include ${PROJECT_SOURCE_DIR}/tests/googletest/googletest/include ) -ADD_SUBDIRECTORY(tests/googletest) - link_directories( ) @@ -134,4 +156,9 @@ TARGET_LINK_LIBRARIES(group_lock_test gtest_main) add_executable(find_or_insert_ptr_test tests/find_or_insert_ptr_test.cc.cu) target_compile_features(find_or_insert_ptr_test PUBLIC cxx_std_14) set_target_properties(find_or_insert_ptr_test PROPERTIES CUDA_ARCHITECTURES OFF) -TARGET_LINK_LIBRARIES(find_or_insert_ptr_test gtest_main) \ No newline at end of file +TARGET_LINK_LIBRARIES(find_or_insert_ptr_test gtest_main) + +add_executable(ext_storage_rocksdb_test tests/ext_storage_rocksdb_test.cc.cu) +target_compile_features(ext_storage_rocksdb_test PUBLIC cxx_std_14) +set_target_properties(ext_storage_rocksdb_test PROPERTIES CUDA_ARCHITECTURES OFF) +TARGET_LINK_LIBRARIES(ext_storage_rocksdb_test gtest_main rocksdb) \ No newline at end of file diff --git a/include/merlin/external_storage.hpp b/include/merlin/external_storage.hpp new file mode 100644 index 000000000..957bce788 --- /dev/null +++ b/include/merlin/external_storage.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include + +namespace nv { +namespace merlin { + +template +class ExternalStorage { + public: + using size_type = size_t; + using key_type = Key; + using value_type = Value; + + /** + * @brief Inserts key/value pairs into the external storage. If a key/value + * pair already exists, overwrites the current value. + * + * @param n Number of key/value slots provided in other arguments. + * @param d_masked_keys Device pointer to an (n)-sized array of keys. + * Key-Value slots that should be ignored have the key set to `EMPTY_KEY`. + * @param d_values Device pointer to an (n)-sized array containing pointers to + * respectively a memory location where the current values for a key are + * stored. Each pointer points to a vector of length `value_dim`. Pointers + * *can* be set to `nullptr` for slots where the corresponding key equated to + * the `EMPTY_KEY`. The memory locations can be device or host memory (see + * also `hkvs_is_pure_hbm`). + * @param stream Stream that MUST be used for queuing asynchronous CUDA + * operations. If only the input arguments or resources obtained from + * respectively `dev_mem_pool` and `host_mem_pool` are used for such + * operations, it is not necessary to synchronize the stream prior to + * returning from the function. + */ + virtual void insert_or_assign(size_type n, + const key_type* d_masked_keys, // (n) + const value_type* d_values, // (n) + size_type value_dims, cudaStream_t stream) = 0; + + /** + * @brief Attempts to find the supplied `d_keys` if the corresponding + * `d_founds`-flag is `false` and fills the stored into the supplied memory + * locations (i.e. in `d_values`). + * + * @param n Number of key/value slots provided in other arguments. + * @param d_keys Device pointer to an (n)-sized array of keys. + * @param d_values Device pointer to an (n * value_dim)-sized array to store + * the retrieved `d_values`. For slots where the corresponding `d_founds`-flag + * is not `false`, the value may already have been assigned and, thus, MUST + * not be altered. + * @param d_founds Device pointer to an (n)-sized array which indicates + * whether the corresponding `d_values` slot is already filled or not. So, if + * and only if `d_founds` is still false, the implementation shall attempt to + * retrieve and fill in the value for the corresponding key. If a key/value + * was retrieved successfully from external storage, the implementation MUST + * also set `d_founds` to `true`. + * @param stream Stream that MUST be used for queuing asynchronous CUDA + * operations. If only the input arguments or resources obtained from + * respectively `dev_mem_pool` and `host_mem_pool` are used for such + * operations, it is not necessary to synchronize the stream prior to + * returning from the function. + */ + virtual size_type find(size_type n, + const key_type* d_keys, // (n) + value_type* d_values, // (n * value_dim) + size_type value_dims, + bool* d_founds, // (n) + cudaStream_t stream) const = 0; + + /** + * @brief Attempts to erase the entries associated with the supplied `d_keys`. + * For keys do not exist nothing happens. It is permissible for this function + * to be implemented asynchronously (i.e., to return before the actual + * deletion has happened). + * + * @param n Number of keys provided in `d_keys` arguments. + * @param d_keys Device pointer to an (n)-sized array of keys. This pointer is + * only guarnteed to be valid for the duration of the call. If easure is + * implemented asynchronously, you must make a copy and manage its lifetime + * yourself. + */ + virtual void erase(size_type n, const key_type* d_keys, + cudaStream_t stream) = 0; +}; + +} // namespace merlin +} // namespace nv diff --git a/include/merlin/rocksdb_storage.hpp b/include/merlin/rocksdb_storage.hpp new file mode 100644 index 000000000..f2db84b92 --- /dev/null +++ b/include/merlin/rocksdb_storage.hpp @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifdef ROCKSDB_CHECK +#error Unexpected redfinition of ROCKSDB_CHECK! Something is wrong. +#endif + +#define ROCKSDB_CHECK(status) \ + do { \ + if (!status.ok()) { \ + std::cerr << __FILE__ << ':' << __LINE__ << ": RocksDB error " \ + << status.ToString() << '\n'; \ + std::abort(); \ + } \ + } while (0) + +namespace nv { +namespace merlin { + +struct RocksDBStorageOptions { + std::string path; + std::string column_name{rocksdb::kDefaultColumnFamilyName}; + bool read_only{}; + MemoryPoolOptions host_mem_pool; +}; + +std::ostream& operator<<(std::ostream& os, const RocksDBStorageOptions& opts) { + return os << std::setw(15) << "path" + << ": " << opts.path << '\n' + << std::setw(15) << "column_name" + << ": " << opts.column_name << '\n' + << std::setw(15) << "read_only" + << ": " << opts.read_only; +} + +template +class RocksDBStorage : public ExternalStorage { + public: + using base_type = ExternalStorage; + + using size_type = typename base_type::size_type; + using key_type = typename base_type::key_type; + using value_type = typename base_type::value_type; + + using host_mem_pool_type = MemoryPool>; + + RocksDBStorage(const RocksDBStorageOptions& opts) { + MERLIN_CHECK( + !opts.path.empty(), + "Must provide where the database files are / should be stored!"); + MERLIN_CHECK(!opts.column_name.empty(), + "Must specify a RocksDB column group!"); + + // Basic behavior. + rocksdb::Options rdb_opts; + rdb_opts.create_if_missing = true; + rdb_opts.manual_wal_flush = true; + rdb_opts.OptimizeForPointLookup(8); + rdb_opts.OptimizeLevelStyleCompaction(); + rdb_opts.IncreaseParallelism(32); + + // Configure various behaviors and options used in later operations. + rocksdb::ColumnFamilyOptions col_fam_opts; + col_fam_opts.OptimizeForPointLookup(8); + col_fam_opts.OptimizeLevelStyleCompaction(); + + read_opts_.readahead_size = 2 * 1024 * 1024; + read_opts_.verify_checksums = false; + + write_opts_.sync = false; + write_opts_.disableWAL = false; + write_opts_.no_slowdown = false; + + // Connect to DB with all column families. + { + // Enumerate column families and link to our preferred options. + std::vector col_descs; + { + std::vector col_names; + if (!rocksdb::DB::ListColumnFamilies(rdb_opts, opts.path, &col_names) + .ok()) { + col_names.clear(); + } + + bool has_default{}; + for (const std::string& cn : col_names) { + has_default |= cn == rocksdb::kDefaultColumnFamilyName; + } + if (!has_default) { + col_names.push_back(rocksdb::kDefaultColumnFamilyName); + } + + for (const std::string& cn : col_names) { + col_descs.emplace_back(cn, col_fam_opts); + } + } + + // Connect to database. + rocksdb::DB* db; + if (opts.read_only) { + ROCKSDB_CHECK(rocksdb::DB::OpenForReadOnly( + rdb_opts, opts.path, col_descs, &col_handles_, &db)); + } else { + ROCKSDB_CHECK(rocksdb::DB::Open(rdb_opts, opts.path, col_descs, + &col_handles_, &db)); + } + db_.reset(db); + } + + // Create column family for this storage, if it doesn't exist yet. + for (rocksdb::ColumnFamilyHandle* const ch : col_handles_) { + if (ch->GetName() == opts.column_name) { + col_handle_ = ch; + } + } + if (!col_handle_) { + ROCKSDB_CHECK(db_->CreateColumnFamily(col_fam_opts, opts.column_name, + &col_handle_)); + col_handles_.emplace_back(col_handle_); + } + + // Create memory pools. + host_mem_pool_ = std::make_unique(opts.host_mem_pool); + } + + virtual ~RocksDBStorage() { + // Destroy memory pool. + host_mem_pool_.reset(); + + // Synchronize and close database. + ROCKSDB_CHECK(db_->SyncWAL()); + for (auto& ch : col_handles_) { + ROCKSDB_CHECK(db_->DestroyColumnFamilyHandle(ch)); + } + col_handles_.clear(); + + ROCKSDB_CHECK(db_->Close()); + db_.reset(); + } + + virtual void insert_or_assign( + const size_type n, + const key_type* const d_keys, // (n) + const value_type* const d_values, // (n * value_dims) + const size_type value_dims, cudaStream_t stream) override { + const size_t ws_size{(sizeof(key_type) + sizeof(value_type) * value_dims) * + n}; + auto ws{host_mem_pool_->get_workspace<1>(ws_size, stream)}; + + // Copy keys & values to host. + auto h_keys{ws.get(0)}; + auto h_values{reinterpret_cast(h_keys + n)}; + + CUDA_CHECK(cudaMemcpyAsync(h_keys, d_keys, sizeof(key_type) * n, + cudaMemcpyDeviceToHost, stream)); + const size_type value_size{sizeof(value_type) * value_dims}; + CUDA_CHECK(cudaMemcpyAsync(h_values, d_values, value_size * n, + cudaMemcpyDeviceToHost, stream)); + + // Create some structures that we will need. + rocksdb::WriteBatch batch(12 + // rocksdb::WriteBatchInternal::kHeader + n * (sizeof(char) + + sizeof(uint32_t) + // column_id + sizeof(uint32_t) + sizeof(key_type) + // key + sizeof(uint32_t) + value_size // value + )); + + rocksdb::ColumnFamilyHandle* const col_handle{col_handle_}; + rocksdb::Slice k_view{nullptr, sizeof(key_type)}; + rocksdb::Slice v_view{nullptr, value_size}; + + // Ensure copy operation is complete. + CUDA_CHECK(cudaStreamSynchronize(stream)); + + for (size_type i{}; i != n; ++i) { + k_view.data_ = reinterpret_cast(&h_keys[i]); + v_view.data_ = reinterpret_cast(&h_values[i * value_dims]); + ROCKSDB_CHECK(batch.Put(col_handle, k_view, v_view)); + } + ROCKSDB_CHECK(db_->Write(write_opts_, &batch)); + ROCKSDB_CHECK(db_->FlushWAL(true)); + } + + virtual size_type find(const size_type n, + const key_type* const d_keys, // (n) + value_type* const d_values, // (n * value_dims) + const size_type value_dims, bool* const d_founds, + cudaStream_t stream) const override { + const size_t ws_size{(sizeof(key_type) + sizeof(bool)) * n}; + auto ws{host_mem_pool_->get_workspace<1>(ws_size, stream)}; + + auto h_keys{ws.get(0)}; + auto h_founds{reinterpret_cast(h_keys + n)}; + + // Copy keys and founds to host. + CUDA_CHECK(cudaMemcpyAsync(h_keys, d_keys, sizeof(key_type) * n, + cudaMemcpyDeviceToHost, stream)); + CUDA_CHECK(cudaMemcpyAsync(h_founds, d_founds, sizeof(bool) * n, + cudaMemcpyDeviceToHost, stream)); + const size_type value_size{sizeof(value_type) * value_dims}; + + std::vector col_handles(n, col_handle_); + std::vector k_views; + k_views.reserve(n); + std::vector v_views; + v_views.reserve(n); + + // Ensure copy operation is complete. + CUDA_CHECK(cudaStreamSynchronize(stream)); + + for (size_type i{}; i != n; ++i) { + k_views.emplace_back(reinterpret_cast(&h_keys[i]), + sizeof(key_type)); + } + + const std::vector statuses{ + db_->MultiGet(read_opts_, col_handles, k_views, &v_views)}; + + size_type miss_count{}; + for (size_type i{}; i != n; ++i) { + const rocksdb::Status& s{statuses[i]}; + if (s.ok()) { + auto& v_view{v_views[i]}; + MERLIN_CHECK(v_view.size() == value_size, "Value size mismatch!"); + CUDA_CHECK(cudaMemcpyAsync(&d_values[i * value_dims], v_view.data(), + value_size, cudaMemcpyHostToDevice, stream)); + h_founds[i] = true; + } else if (s.IsNotFound()) { + ++miss_count; + } else { + ROCKSDB_CHECK(s); + } + } + + // Copy founds back and ensure we finished copying. + CUDA_CHECK(cudaMemcpyAsync(d_founds, h_founds, sizeof(bool) * n, + cudaMemcpyHostToDevice, stream)); + CUDA_CHECK(cudaStreamSynchronize(stream)); + + return n - miss_count; + } + + virtual void erase(size_type n, const key_type* d_keys, + cudaStream_t stream) override { + const size_t ws_size{sizeof(key_type) * n}; + auto ws{host_mem_pool_->get_workspace<1>(ws_size, stream)}; + + auto h_keys{ws.get(0)}; + + // Copy keys to host. + CUDA_CHECK(cudaMemcpyAsync(h_keys, d_keys, sizeof(key_type) * n, + cudaMemcpyDeviceToHost, stream)); + + // Create some structures that we will need. + rocksdb::WriteBatch batch(12 + // rocksdb::WriteBatchInternal::kHeader + n * (sizeof(char) + + sizeof(uint32_t) + // column_id + sizeof(uint32_t) + sizeof(key_type) // key + )); + + rocksdb::ColumnFamilyHandle* const col_handle{col_handle_}; + rocksdb::Slice k_view{nullptr, sizeof(key_type)}; + + // Ensure copy operation is complete. + CUDA_CHECK(cudaStreamSynchronize(stream)); + + for (size_type i{}; i != n; ++i) { + k_view.data_ = reinterpret_cast(&h_keys[i]); + ROCKSDB_CHECK(batch.Delete(col_handle, k_view)); + } + ROCKSDB_CHECK(db_->Write(write_opts_, &batch)); + ROCKSDB_CHECK(db_->FlushWAL(true)); + } + + private: + rocksdb::ReadOptions read_opts_; + rocksdb::WriteOptions write_opts_; + + std::unique_ptr db_; + std::vector col_handles_; + rocksdb::ColumnFamilyHandle* col_handle_{}; + + std::unique_ptr host_mem_pool_; +}; + +} // namespace merlin +} // namespace nv \ No newline at end of file diff --git a/tests/ext_storage_rocksdb_test.cc.cu b/tests/ext_storage_rocksdb_test.cc.cu new file mode 100644 index 000000000..a8e13a1f7 --- /dev/null +++ b/tests/ext_storage_rocksdb_test.cc.cu @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "merlin/debug.hpp" +#include "merlin/rocksdb_storage.hpp" + +using namespace nv::merlin; + +using rocks_db_storage = RocksDBStorage; + +const size_t value_dims{3}; + +template +using device_ptr = std::unique_ptr>; + +template +device_ptr device_alloc(const size_t n, cudaStream_t stream) { + T* ptr; + CUDA_CHECK(cudaMallocAsync(&ptr, sizeof(T) * n, stream)); + return {ptr, + [stream](T* const ptr) { CUDA_CHECK(cudaFreeAsync(ptr, stream)); }}; +} + +template +device_ptr to_device(const std::vector& vec, cudaStream_t stream) { + auto ptr{device_alloc(vec.size(), stream)}; + CUDA_CHECK(cudaMemcpyAsync(ptr.get(), vec.data(), sizeof(T) * vec.size(), + cudaMemcpyHostToDevice, stream)); + return ptr; +} + +template +void zero_fill(device_ptr& ptr, const size_t n, cudaStream_t stream) { + CUDA_CHECK(cudaMemsetAsync(ptr.get(), 0, sizeof(T) * n, stream)); +} + +template ::value, char, T>> +std::vector to_host(const device_ptr& ptr, const size_t n, + cudaStream_t stream) { + static_assert(sizeof(bool) == sizeof(char)); + + std::vector vec(n); + CUDA_CHECK(cudaMemcpyAsync(vec.data(), ptr.get(), sizeof(T) * n, + cudaMemcpyDeviceToHost, stream)); + CUDA_CHECK(cudaStreamSynchronize(stream)); + return vec; +} + +void test_rocksdb_create() { + RocksDBStorageOptions opts; + opts.path = "/tmp/rocksdb_fantasy_path_that_does_not_exist"; + + // Ensure rocksdb database doesn't exist. + std::system(("rm -rf " + opts.path).c_str()); + { + struct stat st; + ASSERT_NE(stat(opts.path.c_str(), &st), 0); + } + + // Create rocksdb database. + rocks_db_storage store(opts); + + std::ifstream log(opts.path + "/LOG"); + ASSERT_TRUE(log.is_open()); +} + +void test_rocksdb_open_write_and_read() { + using key_type = typename rocks_db_storage::key_type; + using value_type = typename rocks_db_storage::value_type; + + CUDA_CHECK(cudaSetDevice(0)); + + // Create rocksdb database. + RocksDBStorageOptions opts; + opts.path = "/tmp/rocksdb_fantasy_path_that_does_not_exist"; + rocks_db_storage store(opts); + + cudaStream_t stream; + CUDA_CHECK(cudaStreamCreate(&stream)); + + { + std::vector keys{1, 2, 3, 4}; + auto d_keys{to_device(keys, stream)}; + + std::vector values; + values.resize(value_dims * keys.size()); + for (size_t i{}; i != values.size(); ++i) + values[i] = static_cast(i + 1); + auto d_values{to_device(values, stream)}; + + store.insert_or_assign(keys.size(), d_keys.get(), d_values.get(), + value_dims, stream); + } + + { + std::vector keys{2, 3, 5, 1, 99, 1}; + auto d_keys{to_device(keys, stream)}; + const size_t n{keys.size()}; + + device_ptr d_values{ + device_alloc(n * value_dims, stream)}; + zero_fill(d_values, n * value_dims, stream); + + device_ptr d_founds{device_alloc(n, stream)}; + zero_fill(d_founds, n, stream); + + const size_t hit_count{store.find(n, d_keys.get(), d_values.get(), + value_dims, d_founds.get(), stream)}; + ASSERT_EQ(hit_count, 4); + + const std::vector founds{to_host(d_founds, n, stream)}; + ASSERT_EQ(founds, (std::vector{1, 1, 0, 1, 0, 1})); + + const std::vector values{to_host(d_values, n * value_dims, stream)}; + ASSERT_EQ(values, (std::vector{4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2, 3, 0, + 0, 0, 1, 2, 3})); + } + + CUDA_CHECK(cudaStreamSynchronize(stream)); + CUDA_CHECK(cudaStreamDestroy(stream)); +} + +void test_rocksdb_open_erase_and_read() { + using key_type = typename rocks_db_storage::key_type; + using value_type = typename rocks_db_storage::value_type; + + CUDA_CHECK(cudaSetDevice(0)); + + // Create rocksdb database. + RocksDBStorageOptions opts; + opts.path = "/tmp/rocksdb_fantasy_path_that_does_not_exist"; + rocks_db_storage store(opts); + + cudaStream_t stream; + CUDA_CHECK(cudaStreamCreate(&stream)); + + { + std::vector keys{2, 3}; + auto d_keys{to_device(keys, stream)}; + + store.erase(keys.size(), d_keys.get(), stream); + } + + { + std::vector keys{1, 2, 3, 4, 5}; + auto d_keys{to_device(keys, stream)}; + const size_t n{keys.size()}; + + device_ptr d_values{ + device_alloc(n * value_dims, stream)}; + zero_fill(d_values, n * value_dims, stream); + + device_ptr d_founds{device_alloc(n, stream)}; + zero_fill(d_founds, n, stream); + + const size_t hit_count{store.find(n, d_keys.get(), d_values.get(), + value_dims, d_founds.get(), stream)}; + ASSERT_EQ(hit_count, 2); + + const std::vector founds{to_host(d_founds, n, stream)}; + ASSERT_EQ(founds, (std::vector{1, 0, 0, 1, 0})); + + const std::vector values{to_host(d_values, n * value_dims, stream)}; + ASSERT_EQ(values, (std::vector{1, 2, 3, 0, 0, 0, 0, 0, 0, 10, 11, 12, + 0, 0, 0})); + } + + CUDA_CHECK(cudaStreamSynchronize(stream)); + CUDA_CHECK(cudaStreamDestroy(stream)); +} + +TEST(RocksDBTest, create) { test_rocksdb_create(); } +TEST(RocksDBTest, open_write_and_read) { test_rocksdb_open_write_and_read(); } +TEST(RocksDBTest, open_erase_and_read) { test_rocksdb_open_erase_and_read(); } diff --git a/third_party/rocksdb b/third_party/rocksdb new file mode 160000 index 000000000..da11a5903 --- /dev/null +++ b/third_party/rocksdb @@ -0,0 +1 @@ +Subproject commit da11a59034584ea2d0911268b8136e5249d6b692