Skip to content

Commit

Permalink
Add PR suggestions
Browse files Browse the repository at this point in the history
  • Loading branch information
Alpus committed Feb 18, 2019
1 parent 32e246e commit fa1e514
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 54 deletions.
18 changes: 8 additions & 10 deletions examples/fit-model-ceres.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,8 @@ int main(int argc, char* argv[])
// These will be the 2D image points and their corresponding 3D vertex id's used for the fitting
auto& landmarks = fitting_data.landmarks;

auto landmarks_definitions = morphable_model.get_landmark_definitions();
auto* landmarks_definitions_ptr =
landmarks_definitions.has_value() ? &landmarks_definitions.value() : nullptr;
auto indexed_landmarks = landmark_mapper.get_indexed_landmarks(landmarks, landmarks_definitions_ptr);
auto indexed_landmarks =
landmark_mapper.get_indexed_landmarks(landmarks, morphable_model.get_landmark_definitions());

google::InitGoogleLogging(argv[0]); // Ceres logging initialisation
std::stringstream fitting_log;
Expand All @@ -319,9 +317,9 @@ int main(int argc, char* argv[])
EOS_CERES_EXAMPLE_COLOR_COEFFS_NUM>(&morphable_model, &blendshapes);
auto camera = fitting::PerspectiveCameraParameters(image.width(), image.height());
model_fitter.add_camera_cost_function(camera, indexed_landmarks);
model_fitter.block_shapes_fitting();
model_fitter.block_blendshapes_fitting();
model_fitter.block_fov_fitting(camera, 60.0);
model_fitter.set_shape_coefficients_constant();
model_fitter.set_blendshape_coefficients_constant();
model_fitter.set_fov_constant(camera, 60.0);

auto solver_summary = model_fitter.solve();

Expand All @@ -339,7 +337,7 @@ int main(int argc, char* argv[])
camera.calculate_projection_matrix(), camera.get_viewport());
ceres_example::draw_landmarks(outimg, indexed_landmarks);

auto camera_euler_rotation = camera.get_euler_rotation();
const auto camera_euler_rotation = camera.get_euler_rotation();
fitting_log << "Pose fit with mean shape:\tYaw " << glm::degrees(camera_euler_rotation[1]) << ", Pitch "
<< glm::degrees(camera_euler_rotation[0]) << ", Roll "
<< glm::degrees(camera_euler_rotation[2]) << "; t & f: " << camera.translation_and_intrinsics
Expand Down Expand Up @@ -368,7 +366,7 @@ int main(int argc, char* argv[])

model_fitter.reset_problem();
model_fitter.add_camera_cost_function(camera, indexed_landmarks);
model_fitter.block_fov_fitting(camera, 60.0);
model_fitter.set_fov_constant(camera, 60.0);

model_fitter.add_shape_prior_cost_function();
model_fitter.add_blendshape_prior_cost_function();
Expand Down Expand Up @@ -398,7 +396,7 @@ int main(int argc, char* argv[])
// Draw the landmarks projected using all estimated parameters:
// Construct the rotation & translation (model-view) matrices, projection matrix, and viewport:

auto points = model_fitter.calculate_estimated_points_positions();
const auto points = model_fitter.calculate_estimated_points_positions();

core::Mesh mesh = morphablemodel::sample_to_mesh(
points, color_instance, morphable_model.get_shape_model().get_triangle_list(),
Expand Down
7 changes: 3 additions & 4 deletions include/eos/core/LandmarkMapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,7 @@ class LandmarkMapper
*
* @param[in] mappings A set of landmark mappings.
*/
LandmarkMapper(std::unordered_map<std::string, std::string> mappings)
: landmark_mappings(std::move(mappings)){};
LandmarkMapper(std::unordered_map<std::string, std::string>& mappings) : landmark_mappings(mappings){};

/**
* @brief Converts the given landmark name to the mapped name.
Expand Down Expand Up @@ -141,7 +140,7 @@ class LandmarkMapper
*/
template <typename LandmarkType>
auto get_indexed_landmarks(const LandmarkCollection<LandmarkType>& landmarks,
const LandmarkDefinitions* const landmark_definitions = nullptr) const
const cpp17::optional<LandmarkDefinitions> landmark_definitions = cpp17::nullopt) const
{
IndexedLandmarkCollection<LandmarkType> indexed_landmarks;
for (auto& landmark : landmarks)
Expand All @@ -153,7 +152,7 @@ class LandmarkMapper
}

int vertex_idx;
if (landmark_definitions != nullptr)
if (landmark_definitions)
{
const auto found_vertex_idx = landmark_definitions->find(converted_name.value());

Expand Down
61 changes: 24 additions & 37 deletions include/eos/fitting/ceres_nonlinear.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,19 +161,18 @@ struct LandmarkCost
blendshape_coeffs, get_num_cam_params(use_perspective));

// Project the point to 2D:
const auto point_3d = tvec3<T>(point_arr[0], point_arr[1], point_arr[2]);
const tvec3<T> point_3d(point_arr[0], point_arr[1], point_arr[2]);
// I think the quaternion is always normalised because we run Ceres with QuaternionParameterization
const auto rot_quat =
tquat<T>(camera_rotation[0], camera_rotation[1], camera_rotation[2], camera_rotation[3]);
const tquat<T> rot_quat(camera_rotation[0], camera_rotation[1], camera_rotation[2], camera_rotation[3]);
// We rotate ZXY*p, which is RPY*p. I'm not sure this matrix still corresponds to RPY - probably if we
// use glm::eulerAngles(), these are not RPY anymore and we'd have to adjust if we were to use
// rotation matrices.
const auto rot_mtx = glm::mat4_cast(rot_quat);

// Todo: use get_opencv_viewport() from nonlin_cam_esti.hpp.
const auto viewport = tvec4<T>(0, image_height, image_width, -image_height); // OpenCV convention
const tvec4<T> viewport(0, image_height, image_width, -image_height); // OpenCV convention

auto projected_point = tvec3<T>(); // Note: could avoid default construction by using a lambda and
tvec3<T> projected_point(); // Note: could avoid default construction by using a lambda and
// immediate invocation
if (use_perspective)
{
Expand Down Expand Up @@ -464,7 +463,6 @@ ceres::Solver::Options get_default_solver_options()
ceres::Solver::Options solver_options;
solver_options.linear_solver_type = ceres::ITERATIVE_SCHUR;
solver_options.num_threads = 8;
solver_options.num_linear_solver_threads = 8; // only SPARSE_SCHUR can use this
solver_options.minimizer_progress_to_stdout = true;
solver_options.max_num_iterations = 50;

Expand Down Expand Up @@ -612,9 +610,24 @@ class ModelFitter
*/
explicit ModelFitter(const morphablemodel::MorphableModel* const morphable_model,
const morphablemodel::Blendshapes* const blendshapes = nullptr)
: problem(std::make_unique<ceres::Problem>()), morphable_model(morphable_model),
blendshapes(find_blendshapes(blendshapes))
: problem(std::make_unique<ceres::Problem>()), morphable_model(morphable_model)
{
if (blendshapes == nullptr)
{
if (!morphable_model->has_separate_expression_model())
{
throw std::runtime_error(
"Blendshapes was not passed and morphable model does not contain them too."
"Try to pass blendshapes explicitly.");
} else
{
this->blendshapes =
&cpp17::get<morphablemodel::Blendshapes>(morphable_model->get_expression_model().value());
}
} else
{
this->blendshapes = blendshapes;
}
}

/*
Expand Down Expand Up @@ -820,15 +833,15 @@ class ModelFitter
/**
* Block shapes fitting
*/
void block_shapes_fitting()
void set_shape_coefficients_constant()
{
problem->SetParameterBlockConstant(&shape_coefficients[0]);
}

/**
* Block blendshapes fitting
*/
void block_blendshapes_fitting()
void set_blendshape_coefficients_constant()
{
problem->SetParameterBlockConstant(&blendshape_coefficients[0]);
}
Expand All @@ -841,7 +854,7 @@ class ModelFitter
* @param[in,out] camera perspective camera to fit
* @param[in] fov_in_grad field of view to fix
*/
void block_fov_fitting(PerspectiveCameraParameters& camera, double fov_in_grad)
void set_fov_constant(PerspectiveCameraParameters &camera, double fov_in_grad)
{
auto& camera_translation_and_intrinsics = camera.translation_and_intrinsics;
camera_translation_and_intrinsics[3] = glm::radians(fov_in_grad);
Expand All @@ -859,32 +872,6 @@ class ModelFitter
std::unique_ptr<ceres::Problem> problem;

private:
/*
* Check if blendshapes is not nullptr or try to find them in morphable_model.
*
* * @return std::array with 3 or 4 (for perspective projection) parameters.
*/
auto find_blendshapes(const morphablemodel::Blendshapes* const blendshapes = nullptr) const
{
if (blendshapes == nullptr)
{
const auto& blendshapes_optional = morphable_model->get_expression_model();

if (blendshapes_optional.has_value())
{
return &cpp17::get<morphablemodel::Blendshapes>(*blendshapes_optional);
} else
{
throw std::runtime_error(
"Blendshapes was not passed and morphable model does not contain them too."
"Try to pass blendshapes explicitly.");
}
} else
{
return blendshapes;
}
}

const morphablemodel::MorphableModel* const morphable_model;
const morphablemodel::Blendshapes* const blendshapes;
};
Expand Down
6 changes: 3 additions & 3 deletions include/eos/fitting/contour_correspondence.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ struct ModelContour

// We store r/l separately because we currently only fit to the contour facing the camera.
// Also if we were to fit to the whole contour: Be careful not to just fit to the closest. The
// "invisible" ones behind might be closer on an e.g 90° angle. Store CNT for left/right side separately?
// "invisible" ones behind might be closer on an e.g 90° angle. Store CNT for left/right side separately?

/**
* Helper method to load a ModelContour from
Expand Down Expand Up @@ -265,7 +265,7 @@ get_contour_correspondences(const core::LandmarkCollection<Eigen::Vector2f>& lan
* have different size.
* Correspondence can be established using get_nearest_contour_correspondences().
*
* If the yaw angle is between +-7.5°, both contours will be selected.
* If the yaw angle is between +-7.5°, both contours will be selected.
*
* @param[in] yaw_angle Yaw angle in degrees.
* @param[in] contour_landmarks 2D image contour ids of left or right side (for example for ibug landmarks).
Expand Down Expand Up @@ -298,7 +298,7 @@ select_contour(float yaw_angle, const ContourLandmarks& contour_landmarks, const
begin(contour_landmarks.left_contour),
end(contour_landmarks.left_contour));
}
// Note there's an overlap between the angles - if a subject is between +- 7.5°, both contours get added.
// Note there's an overlap between the angles - if a subject is between +- 7.5°, both contours get added.
return std::make_pair(contour_landmark_identifiers, model_contour_indices);
};

Expand Down

0 comments on commit fa1e514

Please sign in to comment.