Commit bfe3f7d
committed
Update base for Update on "Add formulas and basic tests"
RFC: pytorch/rfcs#11
This PR adds:
- Codegen support to define forward grad formulas and few manual formulas
- Codegen support to automatically generate formulas as well as few usage
- Codegen support to materialize undefined tangents when no value is provided
- Tests for basic forward grad components
Codegen generated examples. (note that some namings here are not up to date with the latest version, in particular the "legacy" namings that have been removed)
For each of them, the only part that is changed is the if statement before the return checking for fw grad defined.
- For manual entry:
```yaml
- name: max(Tensor self) -> Tensor
self: evenly_distribute_backward(grad, self, result)
result: max_forward(self_t, self_p, result)
```
```cpp
Tensor max(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
(void)_any_requires_grad;
std::shared_ptr<MaxBackward1> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<MaxBackward1>(new MaxBackward1(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::max(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "max");
if (isFwGradDefined(self)) {
auto self_t_raw = toLegacyFwGrad(self);
auto self_t = self_t_raw.defined() ? self_t_raw : at::zeros_like(toLegacyTensor(self));
auto self_p = toLegacyPrimal(self);
auto result_new_fw_grad = max_forward(self_t, self_p, result);
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
if (grad_fn) {
grad_fn->result_ = SavedVariable(result, true);
}
return result;
}
```
- For element wise entry:
```yaml
- name: abs(Tensor self) -> Tensor
self: grad * self.sgn()
result: auto_element_wise
```
```cpp
Tensor abs(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
(void)_any_requires_grad;
std::shared_ptr<AbsBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<AbsBackward>(new AbsBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::abs(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "abs");
if (isFwGradDefined(self)) {
auto self_t_raw = toLegacyFwGrad(self);
auto self_t = self_t_raw.defined() ? self_t_raw : at::zeros_like(toLegacyTensor(self));
auto self_p = toLegacyPrimal(self);
auto result_new_fw_grad = self_t * self_p.sgn();
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
return result;
}
```
- For linear entry:
```yaml
- name: clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor
self: grad
result: auto_linear
```
```cpp
Tensor clone(const Tensor & self, c10::optional<MemoryFormat> memory_format) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
(void)_any_requires_grad;
std::shared_ptr<CloneBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<CloneBackward>(new CloneBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::clone(self_, memory_format);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
if (isFwGradDefined(self)) {
auto self_t_raw = toLegacyFwGrad(self);
auto self_t = self_t_raw.defined() ? self_t_raw : at::zeros_like(toLegacyTensor(self));
auto result_new_fw_grad = at::clone(self_t, memory_format);
if (result_new_fw_grad.defined()) {
result.set_fw_grad(result_new_fw_grad, /* level */ 0, /* is_inplace_op */ false);
}
}
return result;
}
```
- For no entry:
```yaml
- name: angle(Tensor self) -> Tensor
self: angle_backward(grad, self)
```
```cpp
Tensor angle(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
auto _any_requires_grad = compute_requires_grad( self );
(void)_any_requires_grad;
std::shared_ptr<AngleBackward> grad_fn;
if (_any_requires_grad) {
grad_fn = std::shared_ptr<AngleBackward>(new AngleBackward(), deleteNode);
grad_fn->set_next_edges(collect_next_edges( self ));
grad_fn->self_ = SavedVariable(self, false);
}
#ifndef NDEBUG
c10::optional<Storage> self__storage_saved =
self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt;
c10::intrusive_ptr<TensorImpl> self__impl_saved;
if (self_.defined()) self__impl_saved = self_.getIntrusivePtr();
#endif
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return at::angle(self_);
})();
auto result = std::move(tmp);
#ifndef NDEBUG
if (self__storage_saved.has_value())
AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage()));
if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr());
#endif
if (grad_fn) {
set_history(flatten_tensor_args( result ), grad_fn);
}
throw_error_for_complex_autograd(result, "angle");
TORCH_CHECK(!(isFwGradDefined(self)), "Trying to use forward AD with angle that does not support it.");
return result;
}
```
Differential Revision: [D25607505](https://our.internmc.facebook.com/intern/diff/D25607505)
[ghstack-poisoned]1 parent cb92033 commit bfe3f7d
0 file changed
0 commit comments