-
Notifications
You must be signed in to change notification settings - Fork 241
Expand file tree
/
Copy pathcapi.patch
More file actions
179 lines (174 loc) · 7.1 KB
/
capi.patch
File metadata and controls
179 lines (174 loc) · 7.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
index e49922c355..74857fa8f4 100644
--- a/cmake/flags.cmake
+++ b/cmake/flags.cmake
@@ -244,7 +244,6 @@ if(APPLE)
set(COMMON_FLAGS
-Wno-deprecated-register
-Werror=format
- -Werror=inconsistent-missing-override
-Werror=braced-scalar-init
-Werror=uninitialized
-Werror=tautological-constant-out-of-range-compare
diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc
index 51ecf263ca..456f90c202 100644
--- a/paddle/fluid/inference/api/analysis_predictor.cc
+++ b/paddle/fluid/inference/api/analysis_predictor.cc
@@ -464,7 +464,9 @@ bool AnalysisPredictor::Init(
paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads());
std::string model_path = config_.prog_file();
- if (!model_path.empty()) {
+ if (config_.model_from_memory()) {
+ load_pir_model_ = config_.new_executor_enabled();
+ } else if (!model_path.empty()) {
load_pir_model_ =
model_path.substr(model_path.find_last_of(".") + 1) == "json";
} else if (!config_.model_dir().empty()) {
@@ -540,7 +542,7 @@ bool AnalysisPredictor::Init(
if (!PrepareExecutor()) {
return true;
}
-
+
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
// TODO(inference): Now only gpu with external stream support private
// device_context.
@@ -1376,7 +1378,7 @@ bool AnalysisPredictor::SaveOrLoadPirParameters(bool for_save) {
}
} else {
- if (std::filesystem::exists(config_.params_file())) {
+ if (config_.model_from_memory_ || std::filesystem::exists(config_.params_file())) {
pir::LoadCombineFunction(config_.params_file(),
filter_param_names,
&tensor_out,
diff --git a/paddle/fluid/inference/capi_exp/pd_config.cc b/paddle/fluid/inference/capi_exp/pd_config.cc
index 9238599111..8e38198227 100644
--- a/paddle/fluid/inference/capi_exp/pd_config.cc
+++ b/paddle/fluid/inference/capi_exp/pd_config.cc
@@ -508,4 +508,32 @@ __pd_give PD_Cstr* PD_ConfigSummary(__pd_keep PD_Config* pd_config) {
return paddle_infer::CvtStrToCstr(sum_str);
}
+void PD_ConfigEnableNewExecutor(__pd_keep PD_Config* pd_config,
+ PD_Bool x) {
+ CHECK_AND_CONVERT_PD_CONFIG;
+ config->EnableNewExecutor(x);
+}
+
+PD_Bool PD_ConfigNewExecutorEnabled(__pd_keep PD_Config* pd_config) {
+ CHECK_AND_CONVERT_PD_CONFIG;
+ return config->new_executor_enabled(); // NOLINT
+}
+
+void PD_ConfigEnableNewIR(__pd_keep PD_Config* pd_config,
+ PD_Bool x) {
+ CHECK_AND_CONVERT_PD_CONFIG;
+ config->EnableNewIR(x);
+}
+
+PD_Bool PD_ConfigNewIREnabled(__pd_keep PD_Config* pd_config) {
+ CHECK_AND_CONVERT_PD_CONFIG;
+ return config->new_ir_enabled(); // NOLINT
+}
+
+void PD_ConfigUseOptimizedModel(__pd_keep PD_Config* pd_config,
+ PD_Bool x) {
+ CHECK_AND_CONVERT_PD_CONFIG;
+ config->UseOptimizedModel(x);
+}
+
} // extern "C"
diff --git a/paddle/fluid/inference/capi_exp/pd_config.h b/paddle/fluid/inference/capi_exp/pd_config.h
index b611328030..3dfe02b258 100644
--- a/paddle/fluid/inference/capi_exp/pd_config.h
+++ b/paddle/fluid/inference/capi_exp/pd_config.h
@@ -743,6 +743,39 @@ PADDLE_CAPI_EXPORT extern __pd_give PD_OneDimArrayCstr* PD_ConfigAllPasses(
PADDLE_CAPI_EXPORT extern __pd_give PD_Cstr* PD_ConfigSummary(
__pd_keep PD_Config* pd_config);
+/// \brief A boolean state telling whether to use new executor.
+/// \param[in] pd_config config
+/// \param[in] x enable new executor or not
+PADDLE_CAPI_EXPORT extern void PD_ConfigEnableNewExecutor(
+ __pd_keep PD_Config* pd_config,
+ PD_Bool x);
+
+/// \brief A boolean state telling whether the new executor is enabled.
+/// \param[in] pd_config config
+/// \return Whether new executor is enabled
+PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigNewExecutorEnabled(
+ __pd_keep PD_Config* pd_config);
+
+/// \brief A boolean state telling whether to use new IR.
+/// \param[in] pd_config config
+/// \param[in] x enable new IR or not
+PADDLE_CAPI_EXPORT extern void PD_ConfigEnableNewIR(
+ __pd_keep PD_Config* pd_config,
+ PD_Bool x);
+
+/// \brief A boolean state telling whether the new IR is enabled.
+/// \param[in] pd_config config
+/// \return Whether new IR is enabled
+PADDLE_CAPI_EXPORT extern PD_Bool PD_ConfigNewIREnabled(
+ __pd_keep PD_Config* pd_config);
+
+/// \brief Control whether to use optimized model to inference.
+/// \param[in] pd_config config
+/// \param[in] x whether to use optimized model
+PADDLE_CAPI_EXPORT extern void PD_ConfigUseOptimizedModel(
+ __pd_keep PD_Config* pd_config,
+ PD_Bool x);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/paddle/fluid/pir/serialize_deserialize/src/interface.cc b/paddle/fluid/pir/serialize_deserialize/src/interface.cc
index bc597db6d1..6c45afcd46 100644
--- a/paddle/fluid/pir/serialize_deserialize/src/interface.cc
+++ b/paddle/fluid/pir/serialize_deserialize/src/interface.cc
@@ -74,8 +74,14 @@ void WriteModule(const pir::Program& program,
bool ReadModule(const std::string& file_path,
pir::Program* program,
int64_t pir_version) {
- std::ifstream f(file_path);
- Json data = Json::parse(f);
+ Json data;
+ if (file_path.size() < 1000) {
+ std::ifstream f(file_path);
+ data = Json::parse(f);
+ } else {
+ data = Json::parse(file_path);
+ }
+
if (pir_version < 0) {
pir_version = DEVELOP_VERSION;
VLOG(6) << "pir_version is null, get pir_version: " << pir_version;
diff --git a/paddle/fluid/pir/serialize_deserialize/src/save_load_parameters.cc b/paddle/fluid/pir/serialize_deserialize/src/save_load_parameters.cc
index 1d563c326a..9c3318099d 100644
--- a/paddle/fluid/pir/serialize_deserialize/src/save_load_parameters.cc
+++ b/paddle/fluid/pir/serialize_deserialize/src/save_load_parameters.cc
@@ -188,13 +188,20 @@ void LoadCombineFunction(const std::string& file_path,
std::vector<phi::DenseTensor*>* out,
bool load_as_fp16,
phi::Place place) {
- std::ifstream fin(file_path, std::ios::binary);
- PADDLE_ENFORCE_EQ(static_cast<bool>(fin),
- true,
- common::errors::Unavailable(
- "Load operator fail to open file %s, please check "
- "whether the model file is complete or damaged.",
- file_path));
+ std::unique_ptr<std::istream> fin_ptr;
+ if (file_path.size() < 1000) {
+ fin_ptr = std::make_unique<std::ifstream>(file_path, std::ios::binary);
+ PADDLE_ENFORCE_EQ(static_cast<bool>(*fin_ptr),
+ true,
+ common::errors::Unavailable(
+ "Load operator fail to open file %s, please check "
+ "whether the model file is complete or damaged.",
+ file_path));
+ } else {
+ fin_ptr = std::make_unique<std::istringstream>(
+ file_path, std::ios::in | std::ios::binary);
+ }
+ std::istream& fin = *fin_ptr;
PADDLE_ENFORCE_GT(out->size(),
0UL,