|
35 | 35 | IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck) |
36 | 36 | from torch.autograd import Variable, Function, detect_anomaly, kineto_available |
37 | 37 | from torch.autograd.function import InplaceFunction |
38 | | -import torch.autograd.forward_ad as fwAD |
39 | 38 | from torch.testing import randn_like |
40 | 39 | from torch.testing._internal.common_methods_invocations import (method_tests, |
41 | 40 | create_input, unpack_variables, |
@@ -5327,26 +5326,6 @@ def fn(a, dim0_size=5): |
5327 | 5326 |
|
5328 | 5327 | self.assertEqual(x.grad, y.grad) |
5329 | 5328 |
|
5330 | | - def test_view_with_multi_output(self): |
5331 | | - x = torch.randn(2, 2, 2, dtype=torch.double) |
5332 | | - |
5333 | | - x1 = torch.view_as_complex(x) |
5334 | | - # Taking an invalid view should always be allowed as long as it is not |
5335 | | - # modified inplace |
5336 | | - res = x1.unbind(0) |
5337 | | - |
5338 | | - with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"): |
5339 | | - res[0] += torch.rand(2, requires_grad=True) |
5340 | | - |
5341 | | - x.requires_grad_(True) |
5342 | | - x1 = torch.view_as_complex(x) |
5343 | | - # Taking an invalid view should always be allowed as long as it is not |
5344 | | - # modified inplace |
5345 | | - res = x1.unbind(0) |
5346 | | - |
5347 | | - with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"): |
5348 | | - res[0] += torch.rand(2, requires_grad=True) |
5349 | | - |
5350 | 5329 | def as_identity(self): |
5351 | 5330 | # view_as_real and view_as_complex behavior should be like an identity |
5352 | 5331 | def func(z): |
@@ -6345,66 +6324,6 @@ def foo(a): |
6345 | 6324 | self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1)) |
6346 | 6325 | self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0)) |
6347 | 6326 |
|
6348 | | -class TestAutogradForwardMode(TestCase): |
6349 | | - def test_forward_level_cleanup(self): |
6350 | | - import weakref |
6351 | | - |
6352 | | - def get_tensor_and_weak_ref(): |
6353 | | - # Helper function to get a Tensor and a weak ref that tells us |
6354 | | - # if the c++ version of this Tensor is still alive or not. |
6355 | | - # |
6356 | | - # Create the following reference chain to do so: |
6357 | | - # - python Tensor t |
6358 | | - # - c++ Tensor corresponding by t |
6359 | | - # - c++ Node corresponding to t.grad_fn |
6360 | | - # - python dict of metadata from this Node |
6361 | | - # - an object in this dict that we can take a weakref of |
6362 | | - |
6363 | | - |
6364 | | - # Create a new Tensor and Node |
6365 | | - t = torch.rand(2, requires_grad=True).clone() |
6366 | | - # Create the metadata dict |
6367 | | - meta_dict = t.grad_fn.metadata |
6368 | | - # Create the object in the dict |
6369 | | - |
6370 | | - class Foo(object): |
6371 | | - pass |
6372 | | - my_obj = Foo() |
6373 | | - meta_dict[0] = my_obj |
6374 | | - |
6375 | | - # After exiting this function, the python Tensor t is the only |
6376 | | - # thing keeping ref alive |
6377 | | - ref = weakref.ref(my_obj) |
6378 | | - return t, ref |
6379 | | - |
6380 | | - # Sanity check that the helper function works as expected |
6381 | | - t, t_ref = get_tensor_and_weak_ref() |
6382 | | - self.assertIsNotNone(t_ref()) |
6383 | | - |
6384 | | - del t |
6385 | | - self.assertIsNone(t_ref()) |
6386 | | - |
6387 | | - # Main test code |
6388 | | - foo = torch.rand(2) |
6389 | | - |
6390 | | - with fwAD.dual_level(): |
6391 | | - tangent, tangent_ref = get_tensor_and_weak_ref() |
6392 | | - self.assertIsNotNone(tangent_ref()) |
6393 | | - |
6394 | | - dual = fwAD.make_dual(foo, tangent) |
6395 | | - self.assertIsNotNone(tangent_ref()) |
6396 | | - |
6397 | | - # Make sure that the tangent we provided has been re-used as is |
6398 | | - self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent) |
6399 | | - |
6400 | | - # Make sure that dual is keeping the tangent alive |
6401 | | - del tangent |
6402 | | - self.assertIsNotNone(tangent_ref()) |
6403 | | - |
6404 | | - # Make sure that the dual level does not keep the c++ |
6405 | | - # version of the tangent alive |
6406 | | - del dual |
6407 | | - self.assertIsNone(tangent_ref()) |
6408 | 6327 |
|
6409 | 6328 | # Generic device type autograd tests. |
6410 | 6329 | class TestAutogradDeviceType(TestCase): |
|
0 commit comments