-
Notifications
You must be signed in to change notification settings - Fork 390
↔ [Converter] Add support for torch.ops.aten.pixel_unshuffle.default in Torch-TensorRT #2694
Copy link
Copy link
Closed
Labels
component: convertersIssues re: Specific op convertersIssues re: Specific op convertersfeature requestNew feature or requestNew feature or request
Description
-
Function Schema: https://github.com/pytorch/pytorch/blob/6f74b7607207054c1f5331183ab725640c99fe8d/aten/src/ATen/native/native_functions.yaml#L4483
-
Original PyTorch API: https://pytorch.org/docs/stable/generated/torch.nn.PixelUnshuffle.html
Additional context
import torch
import torch.nn as nn
import torch_tensorrt
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.m = nn.PixelUnshuffle(3)
def forward(self, x):
return self.m(x)
model = MyModule().eval().cuda().half()
inputs = [
torch.randn((1, 1, 12, 12), dtype=torch.half, device="cuda"),
]
optimized_model = torch_tensorrt.compile(
model,
ir="dynamo",
inputs=inputs,
enabled_precisions={torch.half},
debug=True,
min_block_size=1,
)DEBUG:torch_tensorrt.dynamo._compiler:Input graph: graph():
%arg0_1 : [num_users=1] = placeholder[target=arg0_1]
%pixel_unshuffle : [num_users=1] = call_function[target=torch.ops.aten.pixel_unshuffle.default](args = (%arg0_1, 3), kwargs = {})
return (pixel_unshuffle,)
DEBUG:torch_tensorrt.dynamo.lowering.passes.constant_folding:Graph after constant folding:
graph():
%arg0_1 : [num_users=1] = placeholder[target=arg0_1]
%pixel_unshuffle : [num_users=1] = call_function[target=torch.ops.aten.pixel_unshuffle.default](args = (%arg0_1, 3), kwargs = {})
return (pixel_unshuffle,)
DEBUG:torch_tensorrt.dynamo._compiler:Lowered Input graph: graph():
%arg0_1 : [num_users=1] = placeholder[target=arg0_1]
%pixel_unshuffle : [num_users=1] = call_function[target=torch.ops.aten.pixel_unshuffle.default](args = (%arg0_1, 3), kwargs = {})
return (pixel_unshuffle,)
INFO:torch_tensorrt.dynamo._compiler:Compilation Settings: CompilationSettings(precision=torch.float16, debug=True, workspace_size=0, min_block_size=1, torch_executed_ops=set(), pass_through_build_failures=False, max_aux_streams=None, version_compatible=False, optimization_level=None, use_python_runtime=False, truncate_long_and_double=False, use_fast_partitioner=True, enable_experimental_decompositions=False, device=Device(type=DeviceType.GPU, gpu_id=0), require_full_compilation=False, disable_tf32=False, sparse_weights=False, refit=False, engine_capability=<EngineCapability.DEFAULT: 0>, num_avg_timing_iters=1, dla_sram_size=1048576, dla_local_dram_size=1073741824, dla_global_dram_size=536870912, dryrun=False, hardware_compatible=False, output_format='exported_program')
DEBUG:torch_tensorrt.dynamo.partitioning._global_partitioner:
Supported Nodes:
DEBUG:torch_tensorrt.dynamo.partitioning._global_partitioner:
Unsupported or Excluded Nodes:
- torch.ops.aten.pixel_unshuffle.default + Operator Count: 1
WARNING:torch_tensorrt.dynamo._compiler:0 supported operations detected in subgraph containing 1 computational nodes. Skipping this subgraph, since min_block_size was detected to be 1
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
component: convertersIssues re: Specific op convertersIssues re: Specific op convertersfeature requestNew feature or requestNew feature or request