@@ -3888,6 +3888,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
38883888 # "rsqrt_cpu" not implemented for 'BFloat16'
38893889 backward_dtypesIfCPU = all_types_and_complex_and (torch .bool ),
38903890 assert_autodiffed = True ,
3891+ supports_forward_ad = True ,
38913892 decorators = (precisionOverride ({torch .float16 : 1e-2 ,
38923893 torch .bfloat16 : 1e-1 ,
38933894 torch .complex64 : 1e-2 }),),
@@ -3916,6 +3917,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
39163917 safe_casts_outputs = True ,
39173918 decorators = (precisionOverride ({torch .bfloat16 : 5e-2 }),),
39183919 supports_inplace_autograd = False ,
3920+ supports_forward_ad = True ,
39193921 skips = (
39203922 SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics_extremal' ,
39213923 device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
@@ -3966,6 +3968,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
39663968 dtypesIfCUDA = floating_and_complex_types_and (torch .float16 , * [torch .bfloat16 ] if CUDA11OrLater else []),
39673969 assert_autodiffed = True ,
39683970 supports_inplace_autograd = False ,
3971+ supports_forward_ad = True ,
39693972 gradcheck_nondet_tol = GRADCHECK_NONDET_TOL ,
39703973 sample_inputs_func = sample_inputs_addmm ),
39713974 OpInfo ('addmm' ,
@@ -3977,6 +3980,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
39773980 dtypesIfCUDA = floating_and_complex_types_and (torch .float16 , * [torch .bfloat16 ] if CUDA11OrLater else []),
39783981 assert_autodiffed = True ,
39793982 supports_inplace_autograd = False ,
3983+ supports_forward_ad = True ,
39803984 gradcheck_nondet_tol = GRADCHECK_NONDET_TOL ,
39813985 autodiff_nonfusible_nodes = ['aten::add' , 'aten::mm' ],
39823986 sample_inputs_func = partial (sample_inputs_addmm , alpha = 1 , beta = 1 )),
@@ -3987,6 +3991,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
39873991 * [torch .bfloat16 ] if CUDA11OrLater else []),
39883992 dtypesIfROCM = floating_types_and (torch .half ),
39893993 supports_inplace_autograd = False ,
3994+ supports_forward_ad = True ,
39903995 skips = (
39913996 # issue may fix: https://github.com/pytorch/pytorch/issues/55589
39923997 # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
@@ -4000,6 +4005,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
40004005 dtypesIfCPU = all_types_and_complex_and (torch .float16 , torch .bfloat16 ),
40014006 dtypesIfCUDA = floating_and_complex_types_and (torch .float16 , * [torch .bfloat16 ] if CUDA11OrLater else []),
40024007 dtypesIfROCM = floating_types_and (torch .half ),
4008+ supports_forward_ad = True ,
40034009 skips = (
40044010 # addbmm does not correctly warn when resizing out= inputs
40054011 SkipInfo ('TestCommon' , 'test_out' ),
@@ -4065,6 +4071,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
40654071 backward_dtypesIfCUDA = all_types_and_complex_and (torch .bool ),
40664072 # Reference: https://github.com/pytorch/pytorch/issues/50747
40674073 supports_inplace_autograd = False ,
4074+ supports_forward_ad = True ,
40684075 skips = (
40694076 # Reference: https://github.com/pytorch/pytorch/issues/50747
40704077 SkipInfo ('TestCommon' , 'test_variant_consistency_eager' ,
@@ -4075,6 +4082,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
40754082 dtypes = all_types_and_complex (),
40764083 dtypesIfCUDA = all_types_and_complex_and (torch .float16 , torch .bfloat16 ),
40774084 assert_autodiffed = True ,
4085+ supports_forward_ad = True ,
40784086 supports_inplace_autograd = False ,
40794087 skips = (
40804088 # TODO: update sample inputs with for_inplace_variant kwarg to support this test
@@ -4084,6 +4092,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
40844092 dtypes = floating_and_complex_types (),
40854093 dtypesIfCUDA = floating_and_complex_types_and (torch .float16 , torch .bfloat16 ),
40864094 supports_inplace_autograd = False ,
4095+ supports_forward_ad = True ,
40874096 skips = (
40884097 # TODO: update sample inputs with for_inplace_variant kwarg to support this test
40894098 SkipInfo ('TestCommon' , 'test_variant_consistency_eager' ),),
@@ -4107,6 +4116,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
41074116 ref = np .arcsin ,
41084117 domain = (- 1 , 1 ),
41094118 supports_sparse = True ,
4119+ supports_forward_ad = True ,
41104120 decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
41114121 safe_casts_outputs = True ,
41124122 dtypes = all_types_and_complex_and (torch .bool , torch .bfloat16 ),
@@ -4137,6 +4147,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
41374147 safe_casts_outputs = True ,
41384148 decorators = (precisionOverride ({torch .bfloat16 : 5e-2 }),),
41394149 supports_inplace_autograd = False ,
4150+ supports_forward_ad = True ,
41404151 skips = (
41414152 SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics_extremal' ,
41424153 device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
@@ -4150,13 +4161,18 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
41504161 SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics_hard' ,
41514162 device_type = 'cuda' , dtypes = [torch .cdouble ],
41524163 active_if = IS_WINDOWS ),
4164+ # Complex gradcheck tests asinh at points 0 + ix for x > 1 which are points
4165+ # where asinh is not differentiable
4166+ SkipInfo ('TestGradients' , 'test_forward_mode_AD' ,
4167+ dtypes = complex_types ())
41534168 )),
41544169 UnaryUfuncInfo ('atan' ,
41554170 aliases = ('arctan' , ),
41564171 ref = np .arctan ,
41574172 dtypes = all_types_and_complex_and (torch .bool , torch .bfloat16 ),
41584173 dtypesIfCUDA = all_types_and_complex_and (torch .bool , torch .half , torch .bfloat16 ),
41594174 assert_autodiffed = True ,
4175+ supports_forward_ad = True ,
41604176 decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
41614177 safe_casts_outputs = True ,
41624178 skips = (
@@ -4191,6 +4207,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
41914207 safe_casts_outputs = True ,
41924208 decorators = (precisionOverride ({torch .bfloat16 : 1e-2 }),),
41934209 supports_inplace_autograd = False ,
4210+ supports_forward_ad = True ,
41944211 skips = (
41954212 SkipInfo ('TestUnaryUfuncs' , 'test_reference_numerics_extremal' ,
41964213 device_type = 'cpu' , dtypes = [torch .cfloat , torch .cdouble ]),
@@ -4309,6 +4326,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
43094326 ref = np .conj ,
43104327 dtypes = all_types_and_complex_and (torch .bool ,
43114328 torch .bfloat16 , torch .half ),
4329+ supports_forward_ad = True ,
43124330 skips = (
43134331 # File "test_unary_ufuncs.py", line 289, in test_reference_numerics
43144332 # if not torch.can_cast(numpy_to_torch_dtype_dict[expected.dtype.type], dtype):
@@ -5750,6 +5768,7 @@ def gradcheck_wrapper_triangular_input(op, input, *args, upper=False, **kwargs):
57505768 decorators = (precisionOverride ({torch .float16 : 1e-2 ,
57515769 torch .bfloat16 : 1e-2 }),),
57525770 safe_casts_outputs = True ,
5771+ supports_forward_ad = True ,
57535772 supports_complex_to_float = True ),
57545773 OpInfo ('linalg.solve' ,
57555774 aten_name = 'linalg_solve' ,
0 commit comments