Delete a bunch of uses of getType in favor of TensorOptions.#11087
Delete a bunch of uses of getType in favor of TensorOptions.#11087ezyang wants to merge 8 commits intoexport-D9578734from
Conversation
Differential Revision: D9581560 Differential Version: 56526196
Differential Revision: D9581560 Differential Version: 56532263
Differential Revision: D9581560 Differential Version: 56532377
Differential Revision: D9581560 Differential Version: 56535272
Differential Revision: D9581560 Differential Version: 56537640
Differential Revision: D9581560 Differential Version: 56538492
torch/csrc/cuda/Module.cpp
Outdated
| using namespace torch::autograd; | ||
| HANDLE_TH_ERRORS | ||
| Variable var = VariableType::getType(CPU(kByte))->tensor(); | ||
| Variable var = torch::empty({0}, at::device(at::kCPU).dtype(at::kByte)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| for (auto i = 0; i < numTensors_; i++) { | ||
| deviceGuard.set_index(i % numDevices_); | ||
| inputs_[i] = type.tensor({16, 16}); | ||
| inputs_[i] = at::empty({16, 16}, at::device({at::kCUDA, i % numDevices_}).dtype(at::kFloat)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| // Initialize tensor list | ||
| std::vector<at::Tensor> tensors = { | ||
| at::ones({16, 16}, at::TensorOptions(at::CPU(at::kFloat))), | ||
| at::ones({16, 16}, at::device(at::kCPU).dtype(at::kFloat)), |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| for (auto i = 0; i < size; i++) { | ||
| auto tensor = | ||
| at::ones({16, 16}, at::TensorOptions(at::getType(b, at::kFloat))) * i; | ||
| at::ones({16, 16}, at::TensorOptions(b).dtype(at::kFloat)) * i; |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| } | ||
| inputs[k][l] = | ||
| at::ones({16, 16}, at::TensorOptions(type)) * (k * stride + l); | ||
| at::ones({16, 16}, at::TensorOptions(b).dtype(at::kFloat)) * (k * stride + l); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| for (auto i = 0; i < numDevices_; ++i) { | ||
| deviceGuard.set_index(i); | ||
| inputs_[i] = type.tensor({3, 3}); | ||
| inputs_[i] = at::empty({3, 3}, at::device(at::kCUDA).dtype(at::kFloat)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| outputs_[i].resize(worldSize_ * numDevices_); | ||
| for (auto j = 0; j < worldSize_ * numDevices_; ++j) { | ||
| outputs_[i][j] = type.tensor({3, 3}); | ||
| outputs_[i][j] = at::empty({3, 3}, at::device(at::kCUDA).dtype(at::kFloat)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| HANDLE_TH_ERRORS | ||
| THGenerator *generator = THPGenerator_TH_CData(self); | ||
| Variable var = VariableType::getType(CPU(kByte))->tensor(); | ||
| Variable var = torch::empty({0}, at::device(at::kCPU).dtype(at::kByte)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
test/cpp/api/serialization.cpp
Outdated
|
|
||
| auto x = torch::ones( | ||
| {5, 5}, torch::getType(torch::Backend::CPU, static_cast<torch::Dtype>(i))); | ||
| {5, 5}, at::device(at::kCPU).dtype(static_cast<torch::Dtype>(i))); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
aten/src/ATen/test/scalar_test.cpp
Outdated
| if(at::hasCUDA()) { | ||
| auto & CUDAFloat = C.getType(Backend::CUDA,ScalarType::Float); | ||
| auto t2 = zeros({4,4}, CUDAFloat); | ||
| auto t2 = zeros({4,4}, at::device(at::kCUDA).dtype(at::kFloat)); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
Differential Revision: D9581560 Differential Version: 56544398
| if(at::hasCUDA()) { | ||
| auto & CUDAFloat = C.getType(Backend::CUDA,ScalarType::Float); | ||
| auto t2 = zeros({4,4}, CUDAFloat); | ||
| auto t2 = zeros({4,4}, at::kCUDA); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| at::DeviceGuard deviceGuard; | ||
| for (auto l = 0; l < stride; l++) { | ||
| if (type.is_cuda()) { | ||
| if (b == at::Backend::CUDA) { // NB:wouldn't work with sparse |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
Summary: Pull Request resolved: pytorch/pytorch#11087 Reviewed By: cpuhrsch Differential Revision: D9581560 fbshipit-source-id: ebe3c4c0956da8a7215ada287bf6526dbcb2b07d
Summary: Pull Request resolved: pytorch#11087 Reviewed By: cpuhrsch Differential Revision: D9581560 fbshipit-source-id: ebe3c4c0956da8a7215ada287bf6526dbcb2b07d
Delete a bunch of uses of getType in favor of TensorOptions.
Differential Revision: D9581560
Stacked on #11080