Skip to content

Commit a291b25

Browse files
kshitij12345facebook-github-bot
authored andcommitted
Migrate masked_scatter_ CPU to ATen (#49732)
Summary: Fixes #49541 Reference: #24507 Pull Request resolved: #49732 Reviewed By: ejguan Differential Revision: D25991438 Pulled By: ngimel fbshipit-source-id: a43bd0bfe043d8e32a6cadbbf736a0eaa697e7ec
1 parent db079a9 commit a291b25

8 files changed

Lines changed: 80 additions & 228 deletions

File tree

aten/src/ATen/LegacyTHFunctionsCPU.cpp

Lines changed: 0 additions & 146 deletions
Original file line numberDiff line numberDiff line change
@@ -34,152 +34,6 @@ namespace {
3434
}
3535
}
3636

37-
Tensor & _th_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) {
38-
// DeviceGuard omitted
39-
auto dispatch_scalar_type = infer_scalar_type(self);
40-
41-
switch (dispatch_scalar_type) {
42-
case ScalarType::Bool: {
43-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
44-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
45-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
46-
THBoolTensor_maskedCopy(self_, mask_, source_);
47-
break;
48-
}
49-
case ScalarType::Byte: {
50-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
51-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
52-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
53-
THByteTensor_maskedCopy(self_, mask_, source_);
54-
break;
55-
}
56-
case ScalarType::Char: {
57-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
58-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
59-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
60-
THCharTensor_maskedCopy(self_, mask_, source_);
61-
break;
62-
}
63-
case ScalarType::Double: {
64-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
65-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
66-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
67-
THDoubleTensor_maskedCopy(self_, mask_, source_);
68-
break;
69-
}
70-
case ScalarType::Float: {
71-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
72-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
73-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
74-
THFloatTensor_maskedCopy(self_, mask_, source_);
75-
break;
76-
}
77-
case ScalarType::Int: {
78-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
79-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
80-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
81-
THIntTensor_maskedCopy(self_, mask_, source_);
82-
break;
83-
}
84-
case ScalarType::Long: {
85-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
86-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
87-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
88-
THLongTensor_maskedCopy(self_, mask_, source_);
89-
break;
90-
}
91-
case ScalarType::Short: {
92-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
93-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
94-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
95-
THShortTensor_maskedCopy(self_, mask_, source_);
96-
break;
97-
}
98-
case ScalarType::BFloat16: {
99-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
100-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CPU, ScalarType::Byte);
101-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CPU, dispatch_scalar_type);
102-
THBFloat16Tensor_maskedCopy(self_, mask_, source_);
103-
break;
104-
}
105-
default:
106-
AT_ERROR("_th_masked_scatter_ not supported on CPUType for ", dispatch_scalar_type);
107-
}
108-
return self;
109-
}
110-
Tensor & _th_masked_scatter_bool_(Tensor & self, const Tensor & mask, const Tensor & source) {
111-
// DeviceGuard omitted
112-
auto dispatch_scalar_type = infer_scalar_type(self);
113-
114-
switch (dispatch_scalar_type) {
115-
case ScalarType::Bool: {
116-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
117-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
118-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
119-
THBoolTensor_maskedCopyBool(self_, mask_, source_);
120-
break;
121-
}
122-
case ScalarType::Byte: {
123-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
124-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
125-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
126-
THByteTensor_maskedCopyBool(self_, mask_, source_);
127-
break;
128-
}
129-
case ScalarType::Char: {
130-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
131-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
132-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
133-
THCharTensor_maskedCopyBool(self_, mask_, source_);
134-
break;
135-
}
136-
case ScalarType::Double: {
137-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
138-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
139-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
140-
THDoubleTensor_maskedCopyBool(self_, mask_, source_);
141-
break;
142-
}
143-
case ScalarType::Float: {
144-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
145-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
146-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
147-
THFloatTensor_maskedCopyBool(self_, mask_, source_);
148-
break;
149-
}
150-
case ScalarType::Int: {
151-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
152-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
153-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
154-
THIntTensor_maskedCopyBool(self_, mask_, source_);
155-
break;
156-
}
157-
case ScalarType::Long: {
158-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
159-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
160-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
161-
THLongTensor_maskedCopyBool(self_, mask_, source_);
162-
break;
163-
}
164-
case ScalarType::Short: {
165-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
166-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
167-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
168-
THShortTensor_maskedCopyBool(self_, mask_, source_);
169-
break;
170-
}
171-
case ScalarType::BFloat16: {
172-
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
173-
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CPU, ScalarType::Bool);
174-
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CPU, dispatch_scalar_type);
175-
THBFloat16Tensor_maskedCopyBool(self_, mask_, source_);
176-
break;
177-
}
178-
default:
179-
AT_ERROR("_th_masked_scatter_bool_ not supported on CPUType for ", dispatch_scalar_type);
180-
}
181-
return self;
182-
}
18337
Tensor & _th_nonzero_out(Tensor & result, const Tensor & self) {
18438
// DeviceGuard omitted
18539
auto dispatch_scalar_type = infer_scalar_type(self);

aten/src/ATen/native/LegacyDefinitions.cpp

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9,21 +9,6 @@ namespace at { namespace native {
99

1010
// Methods
1111

12-
Tensor & masked_scatter__cpu(Tensor& self, const Tensor & mask, const Tensor & source) {
13-
at::assert_no_internal_overlap(self);
14-
Tensor b_mask;
15-
std::tie(b_mask) = expand_inplace(self, mask, "masked_scatter_");
16-
// As we dispatch on self and TH is type-checked, we need different definitions.
17-
// This can be fixed by moving to ATen.
18-
if (b_mask.dtype() == at::ScalarType::Byte) {
19-
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
20-
"please use a mask with dtype torch.bool instead.");
21-
return legacy::cpu::_th_masked_scatter_(self, b_mask, source);
22-
} else {
23-
return legacy::cpu::_th_masked_scatter_bool_(self, b_mask, source);
24-
}
25-
}
26-
2712
Tensor argsort(const Tensor & self, int64_t dim, bool descending) {
2813
return std::get<1>(at::sort(self, dim, descending));
2914
}

aten/src/ATen/native/TensorAdvancedIndexing.cpp

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@ DEFINE_DISPATCH(masked_fill_stub);
7474
REGISTER_NO_CPU_DISPATCH(index_put_accum_stub, index_put_accum_fn);
7575
DEFINE_DISPATCH(masked_select_serial_stub);
7676
DEFINE_DISPATCH(masked_select_stub);
77+
DEFINE_DISPATCH(masked_scatter_stub);
7778

7879
DEFINE_DISPATCH(gather_stub);
7980
DEFINE_DISPATCH(scatter_stub);
@@ -1122,4 +1123,39 @@ std::vector<Tensor> nonzero_numpy(const Tensor& self) {
11221123
return self.nonzero().unbind(1);
11231124
}
11241125

1126+
Tensor & masked_scatter__cpu(Tensor& self, const Tensor & mask, const Tensor & source) {
1127+
at::assert_no_internal_overlap(self);
1128+
TORCH_CHECK(
1129+
self.scalar_type() == source.scalar_type(),
1130+
"masked_scatter: expected self and source to have same dtypes but got",
1131+
self.scalar_type(),
1132+
" and ",
1133+
source.scalar_type());
1134+
1135+
TORCH_CHECK(self.device().type() == at::kCPU, "device type of self (", self.device().type(), ") is not CPU");
1136+
TORCH_CHECK(mask.device().type() == at::kCPU, "device type of mask (", mask.device().type(), ") is not CPU");
1137+
TORCH_CHECK(source.device().type() == at::kCPU, "device type of source (", source.device().type(), ") is not CPU");
1138+
1139+
Tensor b_mask;
1140+
std::tie(b_mask) = expand_inplace(self, mask, "masked_scatter_");
1141+
1142+
if (b_mask.dtype() == ScalarType::Byte) {
1143+
TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \
1144+
"please use a mask with dtype torch.bool instead.");
1145+
}
1146+
1147+
auto src_cont = source.contiguous();
1148+
1149+
auto iter = TensorIteratorConfig()
1150+
.set_check_mem_overlap(false)
1151+
.check_all_same_dtype(false)
1152+
.resize_outputs(false)
1153+
.add_output(self)
1154+
.add_input(b_mask)
1155+
.build();
1156+
1157+
masked_scatter_stub(iter.device_type(), iter, src_cont);
1158+
return self;
1159+
}
1160+
11251161
}} // at::native

aten/src/ATen/native/TensorAdvancedIndexing.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ using index_put_fn = void(*)(TensorIterator &, IntArrayRef indexed_sizes, IntArr
1818
using index_put_accum_fn = void(*)(Tensor &, const c10::List<c10::optional<Tensor>> &, const Tensor &, bool unsafe);
1919
using masked_fill_fn = void(*)(TensorIterator &, Scalar scalar);
2020
using masked_select_fn = void(*)(TensorIterator &, int64_t orig_stride);
21+
using masked_scatter_fn = void(*)(TensorIterator &, const Tensor &);
2122

2223
using gather_fn = void (*)(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index);
2324
using scatter_fn = void(*)(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src);
@@ -34,6 +35,7 @@ DECLARE_DISPATCH(index_put_accum_fn, index_put_accum_stub);
3435
DECLARE_DISPATCH(masked_fill_fn, masked_fill_stub);
3536
DECLARE_DISPATCH(masked_select_fn, masked_select_serial_stub);
3637
DECLARE_DISPATCH(masked_select_fn, masked_select_stub);
38+
DECLARE_DISPATCH(masked_scatter_fn, masked_scatter_stub);
3739

3840
DECLARE_DISPATCH(gather_fn, gather_stub);
3941
DECLARE_DISPATCH(scatter_fn, scatter_stub);

aten/src/ATen/native/cpu/IndexKernel.cpp

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,46 @@ void masked_fill_kernel(TensorIterator& iter, Scalar value) {
163163
});
164164
}
165165

166+
template <typename scalar_t, typename mask_t>
167+
void cpu_masked_scatter_kernel(TensorIterator& iter, const Tensor& source) {
168+
auto is_mask_bool = std::is_same<mask_t, bool>::value;
169+
std::ptrdiff_t source_cntr = 0;
170+
scalar_t* source_ptr = source.data_ptr<scalar_t>();
171+
auto numel = source.numel();
172+
173+
auto loop = [&](char** data, const int64_t* strides, int64_t n) {
174+
char* dst = data[0];
175+
const int64_t dst_stride = strides[0];
176+
char* mask = data[1];
177+
const int64_t mask_stride = strides[1];
178+
for (int64_t i = 0; i < n; i++) {
179+
mask_t mask_value = *(mask_t*)(mask + mask_stride * i);
180+
if (!is_mask_bool) {
181+
TORCH_CHECK(mask_value <= static_cast<mask_t>(1), "Mask tensor can take 0 and 1 values only");
182+
}
183+
if (mask_value) {
184+
TORCH_CHECK(source_cntr < numel, "Number of elements of source < number of ones in mask");
185+
*(scalar_t*)(dst + dst_stride * i) = *(source_ptr);
186+
source_ptr++;
187+
source_cntr++;
188+
}
189+
}
190+
};
191+
iter.serial_for_each(loop, {0, iter.numel()});
192+
}
193+
194+
void masked_scatter_kernel(TensorIterator& iter, const Tensor& source) {
195+
AT_DISPATCH_ALL_TYPES_AND2(ScalarType::Bool, ScalarType::BFloat16,
196+
iter.dtype(), "masked_scatter", [&] {
197+
auto mask_dtype = iter.input_dtype(0);
198+
if (mask_dtype == ScalarType::Bool) {
199+
cpu_masked_scatter_kernel<scalar_t, bool>(iter, source);
200+
} else {
201+
cpu_masked_scatter_kernel<scalar_t, unsigned char>(iter, source);
202+
}
203+
});
204+
}
205+
166206
template <typename scalar_t, typename mask_t, typename func_t>
167207
void cpu_masked_select_serial_kernel(TensorIterator& iter, const func_t& f) {
168208
auto is_mask_bool = std::is_same<mask_t, bool>::value;
@@ -248,5 +288,6 @@ REGISTER_DISPATCH(index_put_stub, &index_put_kernel);
248288
REGISTER_DISPATCH(masked_fill_stub, &masked_fill_kernel);
249289
REGISTER_DISPATCH(masked_select_serial_stub, &masked_select_serial_kernel);
250290
REGISTER_DISPATCH(masked_select_stub, &masked_select_kernel);
291+
REGISTER_DISPATCH(masked_scatter_stub, &masked_scatter_kernel);
251292

252293
}} // namespace at::native

aten/src/TH/generic/THTensorEvenMoreMath.cpp

Lines changed: 0 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -76,69 +76,6 @@ void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor)
7676

7777
#if !defined(TH_REAL_IS_HALF) /* non half part */
7878

79-
void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src )
80-
{
81-
THTensor *srct = THTensor_(newContiguous)(src);
82-
scalar_t *src_data = srct->data<scalar_t>();
83-
ptrdiff_t cntr = 0;
84-
ptrdiff_t nelem = THTensor_(nElement)(srct);
85-
if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask))
86-
{
87-
c10::raw::intrusive_ptr::decref(srct);
88-
THError("Number of elements of destination tensor != Number of elements in mask");
89-
}
90-
TH_TENSOR_APPLY2(scalar_t, tensor, unsigned char, mask,
91-
if (*mask_data > 1)
92-
{
93-
c10::raw::intrusive_ptr::decref(srct);
94-
THFree(mask_counter);
95-
THFree(tensor_counter);
96-
THError("Mask tensor can take 0 and 1 values only");
97-
}
98-
else if (*mask_data == 1)
99-
{
100-
if (cntr == nelem)
101-
{
102-
c10::raw::intrusive_ptr::decref(srct);
103-
THFree(mask_counter);
104-
THFree(tensor_counter);
105-
THError("Number of elements of src < number of ones in mask");
106-
}
107-
*tensor_data = *src_data;
108-
src_data++;
109-
cntr++;
110-
});
111-
c10::raw::intrusive_ptr::decref(srct);
112-
}
113-
114-
void THTensor_(maskedCopyBool)(THTensor *tensor, THBoolTensor *mask, THTensor* src )
115-
{
116-
THTensor *srct = THTensor_(newContiguous)(src);
117-
scalar_t *src_data = srct->data<scalar_t>();
118-
ptrdiff_t cntr = 0;
119-
ptrdiff_t nelem = THTensor_(nElement)(srct);
120-
if (THTensor_(nElement)(tensor) != THBoolTensor_nElement(mask))
121-
{
122-
c10::raw::intrusive_ptr::decref(srct);
123-
THError("Number of elements of destination tensor != Number of elements in mask");
124-
}
125-
TH_TENSOR_APPLY2(scalar_t, tensor, bool, mask,
126-
if (*mask_data)
127-
{
128-
if (cntr == nelem)
129-
{
130-
c10::raw::intrusive_ptr::decref(srct);
131-
THFree(mask_counter);
132-
THFree(tensor_counter);
133-
THError("Number of elements of src < number of ones in mask");
134-
}
135-
*tensor_data = *src_data;
136-
src_data++;
137-
cntr++;
138-
});
139-
c10::raw::intrusive_ptr::decref(srct);
140-
}
141-
14279
#if !defined(TH_REAL_IS_BOOL)
14380
void THTensor_(mul)(THTensor *r_, THTensor *t, scalar_t value)
14481
{

aten/src/TH/generic/THTensorMath.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@ TH_API int THTensor_(equal)(THTensor *ta, THTensor *tb);
99

1010
#if !defined(TH_REAL_IS_HALF)
1111

12-
TH_API void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src);
13-
TH_API void THTensor_(maskedCopyBool)(THTensor *tensor, THBoolTensor *mask, THTensor* src);
14-
1512
TH_API ptrdiff_t THTensor_(numel)(THTensor *t);
1613

1714
#if !defined(TH_REAL_IS_BOOL)

0 commit comments

Comments
 (0)