Skip to content

Commit

Permalink
Misc cleanups
Browse files Browse the repository at this point in the history
  • Loading branch information
dagardner-nv committed Jun 18, 2024
1 parent caccd39 commit c099255
Showing 1 changed file with 20 additions and 20 deletions.
40 changes: 20 additions & 20 deletions morpheus/_lib/src/stages/preprocess_fil.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,14 @@
#include <cudf/column/column_view.hpp> // for column_view
#include <cudf/types.hpp> // for type_id, data_type
#include <cudf/unary.hpp> // for cast
#include <mrc/cuda/common.hpp> // for __check_cuda_errors, MRC_CHECK_CUDA
#include <mrc/segment/builder.hpp> // for Builder
#include <pybind11/gil.h> // for gil_scoped_acquire
#include <pybind11/pybind11.h> // for object_api::operator(), operator""_a, arg
#include <pybind11/pytypes.h> // for object, str, object_api, generic_item, literals
#include <rmm/cuda_stream_view.hpp> // for cuda_stream_per_thread
#include <rmm/device_buffer.hpp> // for device_buffer
#include <glog/logging.h>
#include <mrc/cuda/common.hpp> // for __check_cuda_errors, MRC_CHECK_CUDA
#include <mrc/segment/builder.hpp> // for Builder
#include <pybind11/gil.h> // for gil_scoped_acquire
#include <pybind11/pybind11.h> // for object_api::operator(), operator""_a, arg
#include <pybind11/pytypes.h> // for object, str, object_api, generic_item, literals
#include <rmm/cuda_stream_view.hpp> // for cuda_stream_per_thread
#include <rmm/device_buffer.hpp> // for device_buffer

#include <algorithm> // for find
#include <cstddef> // for size_t
Expand Down Expand Up @@ -229,32 +230,32 @@ std::shared_ptr<MultiInferenceMessage> PreprocessFILStage<MultiMessage, MultiInf
template <>
std::shared_ptr<ControlMessage> PreprocessFILStage<ControlMessage, ControlMessage>::on_control_message(
std::shared_ptr<ControlMessage> x)

{
auto num_rows = x->payload()->get_info().num_rows();
auto df_meta = this->fix_bad_columns(x);
const auto num_rows = df_meta.num_rows();

auto packed_data =
std::make_shared<rmm::device_buffer>(m_fea_cols.size() * num_rows * sizeof(float), rmm::cuda_stream_per_thread);
auto df_meta = this->fix_bad_columns(x);

for (size_t i = 0; i < df_meta.num_columns(); ++i)
{
auto curr_col = df_meta.get_column(i);

auto curr_ptr = static_cast<float*>(packed_data->data()) + i * df_meta.num_rows();
auto curr_ptr = static_cast<float*>(packed_data->data()) + i * num_rows;

// Check if we are something other than float
if (curr_col.type().id() != cudf::type_id::FLOAT32)
{
auto float_data = cudf::cast(curr_col, cudf::data_type(cudf::type_id::FLOAT32))->release();

// Do the copy here before it goes out of scope
MRC_CHECK_CUDA(cudaMemcpy(
curr_ptr, float_data.data->data(), df_meta.num_rows() * sizeof(float), cudaMemcpyDeviceToDevice));
MRC_CHECK_CUDA(
cudaMemcpy(curr_ptr, float_data.data->data(), num_rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
else
{
MRC_CHECK_CUDA(cudaMemcpy(curr_ptr,
curr_col.template data<float>(),
df_meta.num_rows() * sizeof(float),
cudaMemcpyDeviceToDevice));
MRC_CHECK_CUDA(cudaMemcpy(
curr_ptr, curr_col.template data<float>(), num_rows * sizeof(float), cudaMemcpyDeviceToDevice));
}
}

Expand All @@ -279,10 +280,9 @@ std::shared_ptr<ControlMessage> PreprocessFILStage<ControlMessage, ControlMessag
auto memory = std::make_shared<TensorMemory>(num_rows);
memory->set_tensor("input__0", std::move(input__0));
memory->set_tensor("seq_ids", std::move(seq_ids));
auto next = x;
next->tensors(memory);
x->tensors(memory);

return next;
return x;
}

template class PreprocessFILStage<MultiMessage, MultiInferenceMessage>;
Expand Down

0 comments on commit c099255

Please sign in to comment.