Skip to content

Commit 92e03cd

Browse files
Revert "Add torch.empty_permuted (pytorch#95069)"
This reverts commit bedeb1f. Reverted pytorch#95069 on behalf of https://github.com/jeanschmidt due to Breaking internal builds. More in https://fburl.com/phabricator/ztrxrroq
1 parent 079476c commit 92e03cd

12 files changed

Lines changed: 0 additions & 254 deletions

File tree

aten/src/ATen/native/TensorFactories.cpp

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646
#include <ATen/ops/empty_like.h>
4747
#include <ATen/ops/empty_like_native.h>
4848
#include <ATen/ops/empty_native.h>
49-
#include <ATen/ops/empty_permuted_native.h>
5049
#include <ATen/ops/empty_strided.h>
5150
#include <ATen/ops/empty_strided_native.h>
5251
#include <ATen/ops/eye.h>
@@ -279,45 +278,6 @@ Tensor empty_names(
279278
return result;
280279
}
281280

282-
Tensor empty_permuted_symint(SymIntArrayRef size, IntArrayRef physical_layout, c10::optional<ScalarType> dtype_opt,
283-
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt
284-
) {
285-
// size is logical; aka, the output size you'll get from the operation overall
286-
//
287-
// physical_layout follows NCHW/NHWC convention:
288-
// contiguous is [0,1,2,3], channels last is [0,2,3,1]
289-
//
290-
// this means if i is physical index, physical_layout[i] is logical index;
291-
// e.g., to find what is innermost physical dim (3), query NHWC[3] == 1
292-
// (aka it is channels)
293-
int64_t dim = static_cast<int64_t>(size.size());
294-
SymDimVector phys_size(dim);
295-
TORCH_CHECK(physical_layout.size() == dim,
296-
"Number of dimensions in size does not match the "
297-
"length of the physical_layout; i.e. len(size) = ", dim,
298-
" is not equal to len(physical_layout) = ", physical_layout.size());
299-
std::vector<bool> seen_dims(dim);
300-
for (const auto i : c10::irange(dim)) {
301-
TORCH_CHECK(physical_layout[i] >= 0 && physical_layout[i] < dim,
302-
"Dimension out of range (expected to be between 0 and ", dim - 1, ", but got ",
303-
physical_layout[i], " at index ", i, "). NB: negative dims "
304-
"not currently supported; file an issue if you want it.");
305-
TORCH_CHECK(!seen_dims[physical_layout[i]], "Duplicate dim not allowed");
306-
phys_size[i] = size[physical_layout[i]];
307-
seen_dims[physical_layout[i]] = true;
308-
}
309-
// do a contiguous allocation
310-
Tensor phys_tensor = at::empty_symint(phys_size, dtype_opt, layout_opt, device_opt, pin_memory_opt, c10::nullopt);
311-
SymIntArrayRef phys_strides = phys_tensor.sym_strides();
312-
// permute the strides (inverse permutation! This is why this is
313-
// empty_permute*d*, not empty_permute; it's not an empty + permute)
314-
SymDimVector strides(dim);
315-
for (const auto i : c10::irange(dim)) {
316-
strides[physical_layout[i]] = phys_strides[i];
317-
}
318-
return phys_tensor.as_strided_symint(size, strides);
319-
}
320-
321281
Tensor empty_strided_cpu(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt,
322282
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
323283
return at::detail::empty_strided_cpu(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);

aten/src/ATen/native/native_functions.yaml

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2241,11 +2241,6 @@
22412241
SparseCsrCPU, SparseCsrCUDA: empty_sparse_compressed
22422242
QuantizedCPU, QuantizedCUDA, QuantizedMeta: empty_unknown_quantized
22432243

2244-
- func: empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
2245-
dispatch:
2246-
CompositeExplicitAutograd: empty_permuted_symint
2247-
autogen: empty_permuted.out
2248-
22492244
# We do not make new_empty a composite that calls into new_empty_strided, as the strided version
22502245
# is significantly more difficult to implement by different backends
22512246
- func: new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

test/expect/HasDecompTest.test_has_decomposition.expect

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -719,8 +719,6 @@ aten::embedding_renorm_
719719
aten::empty.memory_format
720720
aten::empty.names
721721
aten::empty.names_out
722-
aten::empty_permuted
723-
aten::empty_permuted.out
724722
aten::empty_quantized
725723
aten::empty_quantized.out
726724
aten::equal

test/inductor/test_torchinductor_opinfo.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,6 @@ def wrapper_set_seed(op, *args, **kwargs):
429429
inductor_override_kwargs = {
430430
# the return value of empty is undefined
431431
"empty": {"assert_equal": False},
432-
"empty_permuted": {"assert_equal": False},
433432
"empty_like": {"assert_equal": False},
434433
"new_empty": {"assert_equal": False},
435434
"new_empty_strided": {"assert_equal": False},

test/test_proxy_tensor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1154,7 +1154,6 @@ def f(a, b, c, d, e):
11541154
skip('new_empty'),
11551155
skip('empty_like'),
11561156
skip('empty'),
1157-
skip('empty_permuted'),
11581157
# flaky
11591158
skip('linalg.lstsq', 'grad_oriented'),
11601159
skip('nn.functional.max_unpool1d', '', device_type='cpu'),

torch/_inductor/decomposition.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -61,18 +61,6 @@ def floordiv(a, b):
6161
return aten.div.Tensor_mode(a, b, rounding_mode="floor")
6262

6363

64-
# Not really sure how to put this into the main library. PrimTorch wants
65-
# empty_permuted to go to the prim, and typically users don't really want
66-
# to decompose to empty_strided (but inductor is OK with it, because we are
67-
# cool with strides and everything goes to empty_strided)
68-
@register_decomposition([aten.empty_permuted.default])
69-
def empty_permuted(size, physical_layout, **kwargs):
70-
perm = [0] * len(size)
71-
for p, l in enumerate(physical_layout):
72-
perm[l] = p
73-
return torch.empty([size[l] for l in physical_layout], **kwargs).permute(perm)
74-
75-
7664
def get_alignment_size(x):
7765
if x.dtype == torch.float16 or x.dtype == torch.half or x.dtype == torch.bfloat16:
7866
return 8

torch/_prims/__init__.py

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,6 @@
193193
# Tensor Creation Prims
194194
#
195195
"empty_strided",
196-
"empty_permuted",
197196
"scalar_tensor",
198197
"iota",
199198
#
@@ -2467,61 +2466,6 @@ def _empty_strided_meta(
24672466
)
24682467

24692468

2470-
def _empty_permuted_meta(
2471-
shape: ShapeType,
2472-
physical_layout: DimsSequenceType,
2473-
*,
2474-
dtype: torch.dtype,
2475-
device: torch.device,
2476-
requires_grad: bool,
2477-
) -> TensorLikeType:
2478-
p_strides = utils.make_contiguous_strides_for([shape[l] for l in physical_layout])
2479-
dim = len(shape)
2480-
utils.check(
2481-
len(physical_layout) == dim,
2482-
lambda: (
2483-
"Number of dimensions in the tensor input does not match the "
2484-
f"length of the physical layout; i.e. len(size) = {dim} "
2485-
f"is not equal to len(physical_layout) = {len(physical_layout)}"
2486-
),
2487-
)
2488-
strides = [0] * len(shape)
2489-
seen_dims = set()
2490-
for p, l in enumerate(physical_layout):
2491-
utils.check(
2492-
0 <= l < dim,
2493-
lambda: (
2494-
f"Dimension out of range (expected to be between 0 and {dim - 1}, but got "
2495-
f"{l} at index {p}). NB: negative dims "
2496-
"not currently supported; file an issue if you want it."
2497-
),
2498-
)
2499-
utils.check(l not in seen_dims, lambda: "Duplicate dim not allowed")
2500-
strides[l] = p_strides[p]
2501-
seen_dims.add(l)
2502-
return TensorMeta(
2503-
shape=shape,
2504-
strides=strides,
2505-
dtype=dtype,
2506-
device=device,
2507-
)
2508-
2509-
2510-
_empty_permuted_doc = """
2511-
Creates a tensor with uninitialized values according to some physical layout,
2512-
that is guaranteed to be non-overlapping and dense.
2513-
"""
2514-
2515-
# TODO: add layout, pin_memory
2516-
empty_permuted = _make_prim(
2517-
schema="empty_permuted(SymInt[] shape, int[] physical_layout, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor", # noqa: B950
2518-
return_type=RETURN_TYPE.NEW,
2519-
meta=_empty_permuted_meta,
2520-
impl_aten=torch.empty_permuted,
2521-
doc=_empty_permuted_doc,
2522-
)
2523-
2524-
25252469
def _full_meta(
25262470
shape: ShapeType,
25272471
fill_value: NumberType,

torch/_refs/__init__.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4042,27 +4042,6 @@ def empty(
40424042
)
40434043

40444044

4045-
@out_wrapper()
4046-
def empty_permuted(
4047-
shape,
4048-
physical_layout,
4049-
dtype: Optional[torch.dtype] = None,
4050-
layout: torch.layout = torch.strided,
4051-
device: Optional[torch.device] = None,
4052-
requires_grad: bool = False,
4053-
pin_memory: bool = False,
4054-
) -> TensorLikeType:
4055-
return prims.empty_permuted(
4056-
shape,
4057-
physical_layout,
4058-
dtype=dtype,
4059-
layout=layout,
4060-
device=device,
4061-
pin_memory=pin_memory,
4062-
requires_grad=requires_grad,
4063-
)
4064-
4065-
40664045
@register_decomposition(aten.new_empty)
40674046
def new_empty(
40684047
a: TensorLikeType,

torch/_torch_docs.py

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -12353,51 +12353,6 @@ def merge_dicts(*dicts):
1235312353
),
1235412354
)
1235512355

12356-
add_docstr(
12357-
torch.empty_permuted,
12358-
r"""
12359-
empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
12360-
12361-
Creates an uninitialized, non-overlapping and dense tensor with the
12362-
specified :attr:`size`, with :attr:`physical_layout` specifying how the
12363-
dimensions are physically laid out in memory (each logical dimension is listed
12364-
from outermost to innermost). :attr:`physical_layout` is a generalization
12365-
of NCHW/NHWC notation: if each dimension is assigned a number according to
12366-
what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
12367-
while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output
12368-
tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
12369-
(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
12370-
12371-
Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
12372-
tensor with no overlaps. If possible, prefer using this function over
12373-
:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
12374-
12375-
Args:
12376-
size (tuple of int): the shape of the output tensor
12377-
physical_layout (tuple of int): the ordering of dimensions physically in memory
12378-
12379-
Keyword args:
12380-
{dtype}
12381-
{layout}
12382-
{device}
12383-
{requires_grad}
12384-
{pin_memory}
12385-
12386-
Examples:
12387-
12388-
>>> torch.empty((2, 3, 5, 7)).stride()
12389-
(105, 35, 7, 1)
12390-
>>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
12391-
(105, 35, 7, 1)
12392-
>>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
12393-
(105, 1, 21, 3)
12394-
>>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
12395-
(105, 1, 21, 3)
12396-
""".format(
12397-
**factory_common_args
12398-
),
12399-
)
12400-
1240112356
add_docstr(
1240212357
torch.full,
1240312358
r"""

torch/overrides.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ def get_ignored_functions() -> Set[Callable]:
144144
torch.cudnn_grid_sampler,
145145
torch.cudnn_is_acceptable,
146146
torch.empty,
147-
torch.empty_permuted,
148147
torch.empty_strided,
149148
torch.empty_quantized,
150149
torch.eye,

0 commit comments

Comments
 (0)