Remove specific processing of declarations of add, sub, mul, div, rsub#29238
Remove specific processing of declarations of add, sub, mul, div, rsub#29238xuhdev wants to merge 49 commits intogh/xuhdev/50/basefrom
Conversation
So that their Scalar overload can be in effect from Python code. [ghstack-poisoned]
…l, and div" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) ghstack-source-id: 03a2ea2 Pull Request resolved: #29238
ezyang
left a comment
There was a problem hiding this comment.
assuming this passes CI seems fine
ailzhang
left a comment
There was a problem hiding this comment.
pytorch/torch/csrc/utils/python_arg_parser.cpp
Lines 631 to 655 in cd9f8ab
This code blocks seem to be handling number->tensor conversion as well, maybe also worth removing?
|
@ailzhang I'm not really sure about it. The function is used in pytorch/torch/csrc/utils/python_arg_parser.h Line 224 in cd9f8ab x = torch.tensor(3, dtype=torch.int)). Maybe someone can shed some light on this?
|
|
@ailzhang any idea about the XLA failure? |
|
Can you post the differences in generated code? You can use tools/git_add_generated_dirs. |
|
@gchanan Here you go! diff --git a/torch/csrc/autograd/generated/python_torch_functions.cpp b/torch/csrc/autograd/generated/python_torch_functions.cpp
index ed68256cfaa0..178425466903 100644
--- a/torch/csrc/autograd/generated/python_torch_functions.cpp
+++ b/torch/csrc/autograd/generated/python_torch_functions.cpp
@@ -1999,6 +1999,7 @@ static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "add(Tensor input, Scalar other, Scalar alpha=1)",
"add(Tensor input, Scalar alpha, Tensor other, *, Tensor out=None)|deprecated",
"add(Tensor input, Tensor other, *, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
@@ -2007,12 +2008,14 @@ static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_add(r.tensor(0), r.scalar(1), r.scalar(2)));
+ } else if (r.idx == 1) {
if (r.isNone(3)) {
return wrap(dispatch_add(r.tensor(0), r.scalar(1), r.tensor(2)));
} else {
return wrap(dispatch_add(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3)));
}
- } else if (r.idx == 1) {
+ } else if (r.idx == 2) {
if (r.isNone(3)) {
return wrap(dispatch_add(r.tensor(0), r.tensor(1), r.scalar(2)));
} else {
@@ -3958,6 +3961,7 @@ static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "div(Tensor input, Scalar other)",
"div(Tensor input, Tensor other, *, Tensor out=None)",
}, /*traceable=*/true);
@@ -3965,6 +3969,8 @@ static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_div(r.tensor(0), r.scalar(1)));
+ } else if (r.idx == 1) {
if (r.isNone(2)) {
return wrap(dispatch_div(r.tensor(0), r.tensor(1)));
} else {
@@ -7191,6 +7197,7 @@ static PyObject * THPVariable_mul(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "mul(Tensor input, Scalar other)",
"mul(Tensor input, Tensor other, *, Tensor out=None)",
}, /*traceable=*/true);
@@ -7198,6 +7205,8 @@ static PyObject * THPVariable_mul(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_mul(r.tensor(0), r.scalar(1)));
+ } else if (r.idx == 1) {
if (r.isNone(2)) {
return wrap(dispatch_mul(r.tensor(0), r.tensor(1)));
} else {
@@ -9565,6 +9574,7 @@ static PyObject * THPVariable_sub(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "sub(Tensor input, Scalar other, Scalar alpha=1)",
"sub(Tensor input, Scalar alpha, Tensor other, *, Tensor out=None)|deprecated",
"sub(Tensor input, Tensor other, *, Scalar alpha=1, Tensor out=None)",
}, /*traceable=*/true);
@@ -9573,12 +9583,14 @@ static PyObject * THPVariable_sub(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_sub(r.tensor(0), r.scalar(1), r.scalar(2)));
+ } else if (r.idx == 1) {
if (r.isNone(3)) {
return wrap(dispatch_sub(r.tensor(0), r.scalar(1), r.tensor(2)));
} else {
return wrap(dispatch_sub(r.tensor(0), r.scalar(1), r.tensor(2), r.tensor(3)));
}
- } else if (r.idx == 1) {
+ } else if (r.idx == 2) {
if (r.isNone(3)) {
return wrap(dispatch_sub(r.tensor(0), r.tensor(1), r.scalar(2)));
} else {
diff --git a/torch/csrc/autograd/generated/python_torch_functions_dispatch.h b/torch/csrc/autograd/generated/python_torch_functions_dispatch.h
index 8dc3d5fb5e44..be7cb7c22d62 100644
--- a/torch/csrc/autograd/generated/python_torch_functions_dispatch.h
+++ b/torch/csrc/autograd/generated/python_torch_functions_dispatch.h
@@ -548,6 +548,11 @@ inline std::tuple<Tensor,Tensor> dispatch_adaptive_max_pool1d(const Tensor & sel
AutoNoGIL no_gil;
return at::adaptive_max_pool1d(self, output_size);
}
+inline Tensor dispatch_add(const Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.add(other, alpha);
+}
inline Tensor dispatch_add(const Tensor & self, Scalar alpha, const Tensor & other, Tensor out) {
AutoNoGIL no_gil;
@@ -1453,6 +1458,11 @@ inline Tensor dispatch_dist(const Tensor & self, const Tensor & other, Scalar p)
AutoNoGIL no_gil;
return self.dist(other, p);
}
+inline Tensor dispatch_div(const Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.div(other);
+}
inline Tensor dispatch_div(const Tensor & self, const Tensor & other, Tensor out) {
AutoNoGIL no_gil;
@@ -2733,6 +2743,11 @@ inline std::tuple<Tensor,Tensor> dispatch_mode(const Tensor & self, int64_t dim,
AutoNoGIL no_gil;
return self.mode(dim, keepdim);
}
+inline Tensor dispatch_mul(const Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.mul(other);
+}
inline Tensor dispatch_mul(const Tensor & self, const Tensor & other, Tensor out) {
AutoNoGIL no_gil;
@@ -3773,6 +3788,11 @@ inline Tensor dispatch_stft(const Tensor & self, int64_t n_fft, c10::optional<in
AutoNoGIL no_gil;
return self.stft(n_fft, hop_length, win_length, window, normalized, onesided);
}
+inline Tensor dispatch_sub(const Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.sub(other, alpha);
+}
inline Tensor dispatch_sub(const Tensor & self, Scalar alpha, const Tensor & other, Tensor out) {
AutoNoGIL no_gil;
diff --git a/torch/csrc/autograd/generated/python_variable_methods.cpp b/torch/csrc/autograd/generated/python_variable_methods.cpp
index de6c822db723..9a014fb36720 100644
--- a/torch/csrc/autograd/generated/python_variable_methods.cpp
+++ b/torch/csrc/autograd/generated/python_variable_methods.cpp
@@ -1088,6 +1088,7 @@ static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "add(Scalar other, Scalar alpha=1)",
"add(Scalar alpha, Tensor other)|deprecated",
"add(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
@@ -1096,8 +1097,10 @@ static PyObject * THPVariable_add(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
- return wrap(dispatch_add(self, r.scalar(0), r.tensor(1)));
+ return wrap(dispatch_add(self, r.scalar(0), r.scalar(1)));
} else if (r.idx == 1) {
+ return wrap(dispatch_add(self, r.scalar(0), r.tensor(1)));
+ } else if (r.idx == 2) {
return wrap(dispatch_add(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
@@ -1107,6 +1110,7 @@ static PyObject * THPVariable_add_(PyObject* self_, PyObject* args, PyObject* kw
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "add_(Scalar other, Scalar alpha=1)",
"add_(Scalar alpha, Tensor other)|deprecated",
"add_(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
@@ -1115,8 +1119,10 @@ static PyObject * THPVariable_add_(PyObject* self_, PyObject* args, PyObject* kw
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
- return wrap(dispatch_add_(self, r.scalar(0), r.tensor(1)));
+ return wrap(dispatch_add_(self, r.scalar(0), r.scalar(1)));
} else if (r.idx == 1) {
+ return wrap(dispatch_add_(self, r.scalar(0), r.tensor(1)));
+ } else if (r.idx == 2) {
return wrap(dispatch_add_(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
@@ -2221,6 +2227,7 @@ static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "div(Scalar other)",
"div(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
@@ -2228,6 +2235,8 @@ static PyObject * THPVariable_div(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_div(self, r.scalar(0)));
+ } else if (r.idx == 1) {
return wrap(dispatch_div(self, r.tensor(0)));
}
Py_RETURN_NONE;
@@ -2237,6 +2246,7 @@ static PyObject * THPVariable_div_(PyObject* self_, PyObject* args, PyObject* kw
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "div_(Scalar other)",
"div_(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
@@ -2244,6 +2254,8 @@ static PyObject * THPVariable_div_(PyObject* self_, PyObject* args, PyObject* kw
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_div_(self, r.scalar(0)));
+ } else if (r.idx == 1) {
return wrap(dispatch_div_(self, r.tensor(0)));
}
Py_RETURN_NONE;
@@ -3914,6 +3926,7 @@ static PyObject * THPVariable_mul(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "mul(Scalar other)",
"mul(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
@@ -3921,6 +3934,8 @@ static PyObject * THPVariable_mul(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_mul(self, r.scalar(0)));
+ } else if (r.idx == 1) {
return wrap(dispatch_mul(self, r.tensor(0)));
}
Py_RETURN_NONE;
@@ -3930,6 +3945,7 @@ static PyObject * THPVariable_mul_(PyObject* self_, PyObject* args, PyObject* kw
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "mul_(Scalar other)",
"mul_(Tensor other)",
}, /*traceable=*/true);
auto& self = reinterpret_cast<THPVariable*>(self_)->cdata;
@@ -3937,6 +3953,8 @@ static PyObject * THPVariable_mul_(PyObject* self_, PyObject* args, PyObject* kw
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
+ return wrap(dispatch_mul_(self, r.scalar(0)));
+ } else if (r.idx == 1) {
return wrap(dispatch_mul_(self, r.tensor(0)));
}
Py_RETURN_NONE;
@@ -5381,6 +5399,7 @@ static PyObject * THPVariable_sub(PyObject* self_, PyObject* args, PyObject* kwa
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "sub(Scalar other, Scalar alpha=1)",
"sub(Scalar alpha, Tensor other)|deprecated",
"sub(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
@@ -5389,8 +5408,10 @@ static PyObject * THPVariable_sub(PyObject* self_, PyObject* args, PyObject* kwa
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
- return wrap(dispatch_sub(self, r.scalar(0), r.tensor(1)));
+ return wrap(dispatch_sub(self, r.scalar(0), r.scalar(1)));
} else if (r.idx == 1) {
+ return wrap(dispatch_sub(self, r.scalar(0), r.tensor(1)));
+ } else if (r.idx == 2) {
return wrap(dispatch_sub(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
@@ -5400,6 +5421,7 @@ static PyObject * THPVariable_sub_(PyObject* self_, PyObject* args, PyObject* kw
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
+ "sub_(Scalar other, Scalar alpha=1)",
"sub_(Scalar alpha, Tensor other)|deprecated",
"sub_(Tensor other, *, Scalar alpha=1)",
}, /*traceable=*/true);
@@ -5408,8 +5430,10 @@ static PyObject * THPVariable_sub_(PyObject* self_, PyObject* args, PyObject* kw
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
- return wrap(dispatch_sub_(self, r.scalar(0), r.tensor(1)));
+ return wrap(dispatch_sub_(self, r.scalar(0), r.scalar(1)));
} else if (r.idx == 1) {
+ return wrap(dispatch_sub_(self, r.scalar(0), r.tensor(1)));
+ } else if (r.idx == 2) {
return wrap(dispatch_sub_(self, r.tensor(0), r.scalar(1)));
}
Py_RETURN_NONE;
diff --git a/torch/csrc/autograd/generated/python_variable_methods_dispatch.h b/torch/csrc/autograd/generated/python_variable_methods_dispatch.h
index fb05763dce4b..beb7a2445d5c 100644
--- a/torch/csrc/autograd/generated/python_variable_methods_dispatch.h
+++ b/torch/csrc/autograd/generated/python_variable_methods_dispatch.h
@@ -169,6 +169,11 @@ inline Tensor dispatch_acos_(Tensor & self) {
AutoNoGIL no_gil;
return self.acos_();
}
+inline Tensor dispatch_add(Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.add(other, alpha);
+}
inline Tensor dispatch_add(Tensor & self, Scalar alpha, const Tensor & other) {
AutoNoGIL no_gil;
@@ -179,6 +184,11 @@ inline Tensor dispatch_add(Tensor & self, const Tensor & other, Scalar alpha) {
AutoNoGIL no_gil;
return self.add(other, alpha);
}
+inline Tensor dispatch_add_(Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.add_(other, alpha);
+}
inline Tensor dispatch_add_(Tensor & self, Scalar alpha, const Tensor & other) {
AutoNoGIL no_gil;
@@ -724,11 +734,21 @@ inline Tensor dispatch_dist(Tensor & self, const Tensor & other, Scalar p) {
AutoNoGIL no_gil;
return self.dist(other, p);
}
+inline Tensor dispatch_div(Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.div(other);
+}
inline Tensor dispatch_div(Tensor & self, const Tensor & other) {
AutoNoGIL no_gil;
return self.div(other);
}
+inline Tensor dispatch_div_(Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.div_(other);
+}
inline Tensor dispatch_div_(Tensor & self, const Tensor & other) {
AutoNoGIL no_gil;
@@ -1479,11 +1499,21 @@ inline std::tuple<Tensor,Tensor> dispatch_mode(Tensor & self, int64_t dim, bool
AutoNoGIL no_gil;
return self.mode(dim, keepdim);
}
+inline Tensor dispatch_mul(Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.mul(other);
+}
inline Tensor dispatch_mul(Tensor & self, const Tensor & other) {
AutoNoGIL no_gil;
return self.mul(other);
}
+inline Tensor dispatch_mul_(Tensor & self, Scalar other) {
+
+ AutoNoGIL no_gil;
+ return self.mul_(other);
+}
inline Tensor dispatch_mul_(Tensor & self, const Tensor & other) {
AutoNoGIL no_gil;
@@ -2124,6 +2154,11 @@ inline Tensor dispatch_stft(Tensor & self, int64_t n_fft, c10::optional<int64_t>
AutoNoGIL no_gil;
return self.stft(n_fft, hop_length, win_length, window, normalized, onesided);
}
+inline Tensor dispatch_sub(Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.sub(other, alpha);
+}
inline Tensor dispatch_sub(Tensor & self, Scalar alpha, const Tensor & other) {
AutoNoGIL no_gil;
@@ -2134,6 +2169,11 @@ inline Tensor dispatch_sub(Tensor & self, const Tensor & other, Scalar alpha) {
AutoNoGIL no_gil;
return self.sub(other, alpha);
}
+inline Tensor dispatch_sub_(Tensor & self, Scalar other, Scalar alpha) {
+
+ AutoNoGIL no_gil;
+ return self.sub_(other, alpha);
+}
inline Tensor dispatch_sub_(Tensor & self, Scalar alpha, const Tensor & other) {
AutoNoGIL no_gil; |
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) ghstack-source-id: 7974752 Pull Request resolved: #29238
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
The current algorithm implements Kahn's algorithm, but it seems to have put the order completely reversed. For example, in `python_torch_functions.py` currently generates
```python
static PythonArgParser parser({
"pow(Tensor input, Tensor exponent, *, Tensor out=None)",
"pow(Scalar self, Tensor exponent, *, Tensor out=None)",
"pow(Tensor input, Scalar exponent, *, Tensor out=None)",
}, /*traceable=*/true);
```
The order should be completely reversed so that the scalar version can be chosen first. This is also true when exposing the scalar version of add, sub, mul, div, rsub to Python in #29238. With the current ordering, the scalar version is never used.
[ghstack-poisoned]
So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) ghstack-source-id: 15c27c3 Pull Request resolved: #29238
So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) ghstack-source-id: 58b82ef Pull Request resolved: #29238
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
… first arg" Because #29238 removes special processing of `add/sub/rsub/mul/div`, calls like `torch.add(Scalar, Tensor)` will not be automatically translated to `torch.add(Tensor, Tensor)`. Therefore, we explicitly overload these ops with a scalar as their first arg. [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
… first arg" Because #29238 removes special processing of `add/sub/rsub/mul/div`, calls like `torch.add(Scalar, Tensor)` will not be automatically translated to `torch.add(Tensor, Tensor)`. Therefore, we explicitly overload these ops with a scalar as their first arg. [ghstack-poisoned]
So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) ghstack-source-id: 715ed31 Pull Request resolved: #29238
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
|
@gchanan Yes, I'm still actively fixing bugs on the stacked PR. Once the stacked PR is ready, this one should be ready to go |
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
|
I have a question related to the current CI failure. On the master branch, I have In [1]: import torch
In [2]: torch.add(1, 2)
Out[2]: tensor(3)
In [3]: torch.tan(1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-25eb9cba79db> in <module>
----> 1 torch.tan(1)
TypeError: tan(): argument 'input' (position 1) must be Tensor, not intIs this inconsistency intended? Do we also want to illegalize |
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
…l, div, rsub" So that their Scalar overload can be in effect from Python code. As discussed in #28534 (comment) [ghstack-poisoned]
|
Hi @xuhdev! Thank you for your pull request. We require contributors to sign our Contributor License Agreement, and yours needs attention. You currently have a record in our system, but we do not have a signature on file. In order for us to review and merge your code, please sign at https://code.facebook.com/cla. If you are contributing on behalf of someone else (eg your employer), the individual CLA may not be sufficient and your employer may need to sign the corporate CLA. If you have received this in error or have any questions, please contact us at cla@fb.com. Thanks! |
|
Thank you for signing our Contributor License Agreement. We can now accept your code for this (and any) Facebook open source project. Thanks! |
|
Looks like this PR hasn't been updated in a while so we're going to go ahead and mark this as |
|
Looks like this PR hasn't been updated in a while so we're going to go ahead and mark this as |
Stack from ghstack:
So that their Scalar overload can be in effect from Python code.
As discussed in #28534 (comment)