Skip to content
119 changes: 0 additions & 119 deletions aten/src/ATen/LegacyTHFunctionsCPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -743,125 +743,6 @@ std::tuple<Tensor,Tensor> _th_mode(const Tensor & self, int64_t dim, bool keepdi
}
return std::tuple<Tensor, Tensor>(values, indices);
}
std::tuple<Tensor &,Tensor &> _th_sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);

switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THByteTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Char: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THCharTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Double: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THDoubleTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Float: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THFloatTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Int: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THIntTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Long: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THLongTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Short: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THShortTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Half: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CPU, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CPU, dispatch_scalar_type);
THHalfTensor_sort(values_, indices_, self_, dim, descending);
break;
}
default:
AT_ERROR("_th_sort_out not supported on CPUType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(values, indices);
}
std::tuple<Tensor,Tensor> _th_sort(const Tensor & self, int64_t dim, bool descending) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto values_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CPU, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto values = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(values_));
auto indices_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CPU, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto indices = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(indices_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THByteTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THCharTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THDoubleTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THFloatTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THIntTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THLongTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THShortTensor_sort(values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CPU, dispatch_scalar_type);
THHalfTensor_sort(values_, indices_, self_, dim, descending);
break;
}
default:
AT_ERROR("_th_sort not supported on CPUType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(values, indices);
}
Tensor _th_var(const Tensor & self, bool unbiased) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/LegacyTHFunctionsCPU.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ Tensor & _th_put_(Tensor & self, const Tensor & index, const Tensor & source, bo
Tensor & _th_index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value);
std::tuple<Tensor &,Tensor &> _th_mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim);
std::tuple<Tensor,Tensor> _th_mode(const Tensor & self, int64_t dim, bool keepdim);
std::tuple<Tensor &,Tensor &> _th_sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending);
std::tuple<Tensor,Tensor> _th_sort(const Tensor & self, int64_t dim, bool descending);
Tensor _th_var(const Tensor & self, bool unbiased);
Tensor _th_std(const Tensor & self, bool unbiased);
Tensor & _th_renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
Expand Down
34 changes: 34 additions & 0 deletions aten/src/ATen/native/CompositeRandomAccessor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#pragma once

#include <ATen/native/CompositeRandomAccessorCommon.h>

namespace at { namespace native {

struct TupleInfoCPU {
template <typename ...Types>
using tuple = std::tuple<Types...>;

template <typename ...Types>
static constexpr auto tie(Types&... args) noexcept {
return std::tie(args...);
}
};

template <typename KeyAccessor, typename ValueAccessor>
using CompositeRandomAccessorCPU =
CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;

template <typename Values, typename References>
void swap(
references_holder<Values, References> rh1,
references_holder<Values, References> rh2
) {
return std::swap(rh1.data(), rh2.data());
}

template <int N, typename Values, typename References>
auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.data())) {
return std::get<N>(rh.data());
}

}} // namespace at::native
Loading