Enable Detectron model inference for CPU and MKL-DNN paths#10157
Enable Detectron model inference for CPU and MKL-DNN paths#10157jgong5 wants to merge 10 commits intopytorch:masterfrom
Conversation
| auto& tensor_cpu = OperatorBase::Input<Tensor>(i, CPU); | ||
| CAFFE_ENFORCE(tensor_cpu.dims().size() == 0 || | ||
| tensor_cpu.size_from_dim(0) == 0, | ||
| "Expect zero dim tensor"); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| } else if ( | ||
| InputIsType<itensor>(i) && | ||
| Input(i).get_data_type() == itensor::data_type::s32) { | ||
| auto& input = Input(i); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| } else { | ||
| input.reorder_to(dtensor->template mutable_data<float>()); | ||
| if (input_share_[i]) { | ||
| local_input_blobs_[i]->Reset(); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| const_cast<void*>(src.raw_data())); | ||
| } else { | ||
| dtensor->set_data_handle(const_cast<void *>(src.raw_data())); | ||
| } |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| else if (meta == TypeMeta::Make<int>()) | ||
| return itensor::data_type::s32; | ||
| else if (meta == TypeMeta::Make<float16>()) | ||
| return itensor::data_type::s16; |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
…ther an IDEEP tensor nor CPU tensor, e.g. AtomicIter
|
Could you check the one test failure? |
facebook-github-bot
left a comment
There was a problem hiding this comment.
yinghai has imported this pull request. If you are a Facebook employee, you can view this diff on Phabricator.
|
@yinghai Rebased and now the pre-ci test passed. Please check. Thanks. |
| if (OperatorBase::InputBlob(i).template IsType<itensor>()) { | ||
| inputs.emplace_back(Input(i)); | ||
| } else { | ||
| CAFFE_ENFORCE(OperatorBase::InputBlob(i).template IsType<TensorCPU>(), |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
|
@yinghai Update. Please check. |
facebook-github-bot
left a comment
There was a problem hiding this comment.
yinghai has imported this pull request. If you are a Facebook employee, you can view this diff on Phabricator.
|
@BIT-silence Could you take a look at the detectron related ops? |
|
@BIT-silence Any comments? OK to merge? |
| // No CPU implementation for now | ||
| CAFFE_NOT_IMPLEMENTED; | ||
| auto translate_idx = [](int ii, int d1, int d2, int d3, int scale_factor) { | ||
| int x, y, z, w; |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| auto& X = Input(0); | ||
| auto* Y = Output(0); | ||
|
|
||
| vector<TIndex> out_shape; |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| d3 = Y->dim32(3); | ||
| } | ||
|
|
||
| const float *input_data = X.template data<T>(); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| } | ||
|
|
||
| const float *input_data = X.template data<T>(); | ||
| float *output_data = Y->template mutable_data<T>(); |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
caffe2/python/pybind_state_ideep.cc
Outdated
| if (ndim == 0) { | ||
| return true; | ||
| } | ||
| for (int i = 0; i < ndim; i++) { |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
caffe2/python/pybind_state_ideep.cc
Outdated
| auto g = MakeGuard([&]() { Py_XDECREF(array); }); | ||
| const auto npy_type = PyArray_TYPE(array); | ||
| const TypeMeta &meta = NumpyTypeToCaffe(npy_type); | ||
| CAFFE_ENFORCE( |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
caffe2/python/pybind_state_ideep.cc
Outdated
| } | ||
|
|
||
| switch (npy_type) { | ||
| case NPY_OBJECT: |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
|
|
||
| #include "upsample_nearest_op.h" | ||
| #ifdef CAFFE2_USE_IDEEP | ||
| #include <caffe2/ideep/operators/operator_fallback_ideep.h> |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
| #include "upsample_nearest_op.h" | ||
| #ifdef CAFFE2_USE_IDEEP | ||
| #include <caffe2/ideep/operators/operator_fallback_ideep.h> | ||
| #include <caffe2/ideep/utils/ideep_operator.h> |
This comment was marked as off-topic.
This comment was marked as off-topic.
Sorry, something went wrong.
|
@BIT-silence Thanks much for the review comments. Please check the updated patch. |
Conflicts: caffe2/ideep/operators/operator_fallback_ideep.cc
|
@BIT-silence OK to merge? Thanks. |
facebook-github-bot
left a comment
There was a problem hiding this comment.
yinghai has imported this pull request. If you are a Facebook employee, you can view this diff on Phabricator.
|
@yinghai OK to merge? |
facebook-github-bot
left a comment
There was a problem hiding this comment.
yinghai has imported this pull request. If you are a Facebook employee, you can view this diff on Phabricator.
…0157) Summary: 1. Support ops needed for inference of Faster-RCNN/Mask-RCNN needed in Detectron, mostly direct fallbacks. 2. Use CPU device to hold 0-dim tensors and integer tensors in both fallback op and blob feeder, needed by Detectron models. 3. Ignore 0-dim tensor in MKL-DNN concat operator. 4. Generate dynamic library of Detectron module for CPU device. This PR obsoletes pytorch#9164. Pull Request resolved: pytorch#10157 Differential Revision: D9276837 Pulled By: yinghai fbshipit-source-id: dc364932ae4a2e7fcefdee70b5fce3c0cee91b6f
This PR obsoletes #9164.