Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added const to Pre/PostRun #462

Merged
merged 1 commit into from
Aug 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions include/matx/core/tensor_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -841,12 +841,12 @@ class tensor_impl_t {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept
{
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) noexcept
__MATX_INLINE__ void PostRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept
{
}

Expand Down
4 changes: 2 additions & 2 deletions include/matx/core/tensor_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ namespace detail {
* @return Value after broadcasting
*/
template <class T, typename... Is>
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto get_matx_value(T &i, Is... indices)
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto get_matx_value(const T &i, Is... indices)
{
if constexpr (T::Rank() == int(sizeof...(Is)) || T::Rank() == matxNoRank) {
return i(indices...);
Expand Down Expand Up @@ -348,7 +348,7 @@ namespace detail {


template <class T, typename... Is>
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto get_value(T &i, Is... indices)
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto get_value(const T &i, Is... indices)
{
if constexpr (is_matx_op<T>())
{
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/all.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ namespace detail {
private:
OpA a_;
std::array<index_t, ORank> out_dims_;
matx::tensor_t<typename remove_cvref_t<OpA>::scalar_type, ORank> tmp_out_;
mutable matx::tensor_t<typename remove_cvref_t<OpA>::scalar_type, ORank> tmp_out_;

public:
using matxop = bool;
Expand All @@ -70,7 +70,7 @@ namespace detail {
};

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
all_impl(std::get<0>(out), a_, ex);
}

Expand All @@ -80,7 +80,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
10 changes: 5 additions & 5 deletions include/matx/operators/ambgfun.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@ namespace matx
class AmbgFunOp : public BaseOp<AmbgFunOp<OpX, OpY>>
{
private:
OpX x_;
OpY y_;
mutable OpX x_;
mutable OpY y_;
double fs_;
AMBGFunCutType_t cut_;
float cut_val_;
std::array<index_t, 2> out_dims_;
matx::tensor_t<typename OpX::scalar_type, 2> tmp_out_;
mutable matx::tensor_t<typename OpX::scalar_type, 2> tmp_out_;

public:
using matxop = bool;
Expand Down Expand Up @@ -104,14 +104,14 @@ namespace matx
}

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(is_device_executor_v<Executor>, "ambgfun() only supports the CUDA executor currently");
static_assert(std::tuple_element_t<0, remove_cvref_t<Out>>::Rank() == 2, "Output tensor of ambgfun must be 2D");
ambgfun_impl(std::get<0>(out), x_, y_, fs_, cut_, cut_val_, ex.getStream());
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpX>()) {
x_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/any.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ namespace detail {
private:
OpA a_;
std::array<index_t, ORank> out_dims_;
matx::tensor_t<typename remove_cvref_t<OpA>::scalar_type, ORank> tmp_out_;
mutable matx::tensor_t<typename remove_cvref_t<OpA>::scalar_type, ORank> tmp_out_;

public:
using matxop = bool;
Expand All @@ -70,7 +70,7 @@ namespace detail {
};

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
any_impl(std::get<0>(out), a_, ex);
}

Expand All @@ -80,7 +80,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
4 changes: 2 additions & 2 deletions include/matx/operators/argmax.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ namespace detail {
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto operator()(Is... indices) const = delete;

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(std::tuple_size_v<remove_cvref_t<Out>> == 3, "Must use mtie with 2 outputs on argmax(). ie: (mtie(O, I) = argmax(A))");
argmax_impl(std::get<0>(out), std::get<1>(out), a_, ex);
}
Expand All @@ -75,7 +75,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
MATX_ASSERT_STR(false, matxNotSupported, "argmax() must only be called with a single assignment since it has multiple return types");
}
Expand Down
4 changes: 2 additions & 2 deletions include/matx/operators/argmin.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ namespace detail {
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto operator()(Is... indices) const = delete;

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(std::tuple_size_v<remove_cvref_t<Out>> == 3, "Must use mtie with 2 outputs on argmin(). ie: (mtie(O, I) = argmin(A))");
argmin_impl(std::get<0>(out), std::get<1>(out), a_, ex);
}
Expand All @@ -75,7 +75,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
MATX_ASSERT_STR(false, matxNotSupported, "argmin() must only be called with a single assignment since it has multiple return types");
}
Expand Down
4 changes: 2 additions & 2 deletions include/matx/operators/base_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ namespace matx
* @param ex Executor
*/
template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept
{
}

Expand All @@ -136,7 +136,7 @@ namespace matx
* @param ex Executor
*/
template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) noexcept
__MATX_INLINE__ void PostRun([[maybe_unused]] ShapeType &&shape, [[maybe_unused]] Executor &&ex) const noexcept
{
}

Expand Down
8 changes: 4 additions & 4 deletions include/matx/operators/binary_operators.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ namespace matx
class matxBinaryOp : public BaseOp<matxBinaryOp<I1,I2,Op>>
{
private:
typename base_type<I1>::type in1_;
typename base_type<I2>::type in2_;
mutable typename base_type<I1>::type in1_;
mutable typename base_type<I2>::type in2_;
typename base_type<Op>::type op_;

public:
Expand Down Expand Up @@ -147,7 +147,7 @@ namespace matx
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun(ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<I1>()) {
in1_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand All @@ -159,7 +159,7 @@ namespace matx
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun(ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PostRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<I1>()) {
in1_.PostRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
7 changes: 4 additions & 3 deletions include/matx/operators/cgsolve.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ namespace matx
double tol_;
int max_iters_;
std::array<index_t, 2> out_dims_;
matx::tensor_t<typename OpA::scalar_type, 2> tmp_out_;
mutable matx::tensor_t<typename OpA::scalar_type, 2> tmp_out_;

public:
using matxop = bool;
Expand Down Expand Up @@ -79,19 +79,20 @@ namespace matx
{
return remove_cvref_t<OpB>::Rank();
}

constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t Size(int dim) const
{
return out_dims_[dim];
}

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const{
static_assert(is_device_executor_v<Executor>, "cgsolve() only supports the CUDA executor currently");
cgsolve_impl(std::get<0>(out), a_, b_, tol_, max_iters_, ex.getStream());
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/channelize_poly.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ namespace detail {
index_t num_channels_;
index_t decimation_factor_;
std::array<index_t, OpA::Rank() + 1> out_dims_;
matx::tensor_t<out_t, OpA::Rank() + 1> tmp_out_;
mutable matx::tensor_t<out_t, OpA::Rank() + 1> tmp_out_;

public:
using matxop = bool;
Expand Down Expand Up @@ -77,7 +77,7 @@ namespace detail {
}

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(is_device_executor_v<Executor>, "channelize_poly() only supports the CUDA executor currently");

channelize_poly_impl(std::get<0>(out), a_, f_, num_channels_, decimation_factor_, ex.getStream());
Expand All @@ -89,7 +89,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/chol.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ namespace detail {
private:
OpA a_;
cublasFillMode_t uplo_;
matx::tensor_t<typename OpA::scalar_type, OpA::Rank()> tmp_out_;
mutable matx::tensor_t<typename OpA::scalar_type, OpA::Rank()> tmp_out_;

public:
using matxop = bool;
Expand All @@ -61,7 +61,7 @@ namespace detail {
__MATX_INLINE__ __MATX_DEVICE__ __MATX_HOST__ auto operator()(Is... indices) const = delete;

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(is_device_executor_v<Executor>, "chol() only supports the CUDA executor currently");

chol_impl(std::get<0>(out), a_, ex.getStream(), uplo_);
Expand All @@ -73,7 +73,7 @@ namespace detail {
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
27 changes: 26 additions & 1 deletion include/matx/operators/comma.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,32 @@ namespace matx
constexpr __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ auto Size(int dim) const noexcept
{
return op2_.Size(dim);
}
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<Op1>()) {
op1_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}

if constexpr (is_matx_op<Op2>()) {
op2_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PostRun(ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<Op1>()) {
op1_.PostRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}

if constexpr (is_matx_op<Op2>()) {
op2_.PostRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
}
}

private:
typename base_type<Op1>::type op1_;
typename base_type<Op2>::type op2_;
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/conv.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ namespace matx
matxConvCorrMode_t mode_;
PermDims perm_;
std::array<index_t, max_rank> out_dims_;
matx::tensor_t<out_t, max_rank> tmp_out_;
mutable matx::tensor_t<out_t, max_rank> tmp_out_;

public:
using matxop = bool;
Expand Down Expand Up @@ -132,7 +132,7 @@ namespace matx
}

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(is_device_executor_v<Executor>, "conv1d() only supports the CUDA executor currently");
MATX_STATIC_ASSERT_STR((Rank() == std::tuple_element_t<0, remove_cvref_t<Out>>::Rank()),
matxInvalidParameter, "conv1d: inputs and outputs must have same rank to use conv1d with axis parameter");
Expand All @@ -145,7 +145,7 @@ namespace matx
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
6 changes: 3 additions & 3 deletions include/matx/operators/corr.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ namespace matx
matxConvCorrMethod_t method_;
PermDims perm_;
std::array<index_t, max_rank> out_dims_;
matx::tensor_t<out_t, max_rank> tmp_out_;
mutable matx::tensor_t<out_t, max_rank> tmp_out_;

public:
using matxop = bool;
Expand Down Expand Up @@ -133,7 +133,7 @@ namespace matx
}

template <typename Out, typename Executor>
void Exec(Out &&out, Executor &&ex) {
void Exec(Out &&out, Executor &&ex) const {
static_assert(is_device_executor_v<Executor>, "corr() only supports the CUDA executor currently");
MATX_STATIC_ASSERT_STR((Rank() == std::tuple_element_t<0, remove_cvref_t<Out>>::Rank()),
matxInvalidParameter, "corr: inputs and outputs must have same rank to use corr with axis parameter");
Expand All @@ -146,7 +146,7 @@ namespace matx
}

template <typename ShapeType, typename Executor>
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) noexcept
__MATX_INLINE__ void PreRun([[maybe_unused]] ShapeType &&shape, Executor &&ex) const noexcept
{
if constexpr (is_matx_op<OpA>()) {
a_.PreRun(std::forward<ShapeType>(shape), std::forward<Executor>(ex));
Expand Down
Loading