System information (version)
- OpenCV => 4.2.0
- Tensorflow => 1.14.0
- Operating System / Platform => Ubuntu 18.04
- Compiler => 7.4.0
Detailed description
Using a frozen graph containing a tensorflow resize operation tf.image.resize for OpenCV inference works with a fixed size resize but not for arbitrary (None) size. See code to reproduce the error.
Basically I would like the resize to work on any input image size. In this simplified example, I have frozen a model with input placeholder of size None. In the model, we downsize this size by two then upscale by two, if I feed an image of any size (h, w), e.g (180,320), I would expect the model to downscale to (h/2, w/2) --> (90, 160) and then upscale back. But it throws the following error:
Traceback (most recent call last):
File "replicate_error.py", line 78, in <module>
cv_net = cv2.dnn.readNetFromTensorflow('./frozen-opt.pb')
cv2.error: OpenCV(4.2.0) /io/opencv/modules/dnn/src/tensorflow/tf_importer.cpp:582: error: (-2:Unspecified error) Input [g_net/resize_down/size] for node [g_net/resize_down/ResizeBilinear] not found in function 'getConstBlob'
@dkurt any insight on this issue? Thank you in advance!
Steps to reproduce
import tensorflow as tf
print(tf.__version__)
from tensorflow.python.framework import graph_util
from tensorflow.python.tools import optimize_for_inference_lib
import tensorflow.tools.graph_transforms as graph_transforms
import numpy as np
import cv2
def model(inputs, scope='g_net'):
with tf.compat.v1.variable_scope(scope, reuse=False):
shape_input = tf.shape(inputs)
h = shape_input[1]
w = shape_input[2]
scale = 0.5
hi = tf.cast(tf.math.round(tf.cast(h, tf.float32) * scale), tf.int32)
wi = tf.cast(tf.math.round(tf.cast(w, tf.float32) * scale), tf.int32)
input_down = tf.image.resize(inputs, [hi, wi], method=0, name='resize_down')
input_up = tf.image.resize(input_down, [h, w], method=0, name='resize_up')
def run_model(width, height, input_nodes, output_nodes):
tf.compat.v1.reset_default_graph()
# Create a placeholder for the inference input image
img_in = tf.compat.v1.placeholder(
dtype="float32",
shape=[1, height, width, 3],
name=input_nodes[0])
# Build model
infer_op = model(img_in)
with tf.compat.v1.Session() as sess:
# Initialise all the global variables in current session
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
# Freeze model and optimize for inference
frozen_graph = tf.compat.v1.graph_util.convert_variables_to_constants(
sess=sess, # Active TensorFlow session containing the variables
input_graph_def=sess.graph.as_graph_def(), # GraphDef object holding the network
output_node_names=output_nodes)
opt_frozen_graph = optimize_for_inference_lib.optimize_for_inference(
frozen_graph,
input_nodes,
output_nodes,
tf.float32.as_datatype_enum)
export_graph_def = graph_transforms.TransformGraph(
opt_frozen_graph,
input_nodes,
output_nodes,
['fold_constants', 'strip_unused_nodes'])
tf.io.write_graph(export_graph_def, "./", "frozen-opt.pb", as_text=False)
with tf.io.gfile.GFile("frozen-opt.pb", 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
input_nodes = ["input_placeholder"]
output_nodes = ["g_net/resize_up/ResizeBilinear"]
# WITH FIXED SIZE --> WORKS
width = 320
height = 180
run_model(width, height, input_nodes, output_nodes)
# Read model
cv_net = cv2.dnn.readNetFromTensorflow('./frozen-opt.pb')
img = np.zeros((1, 3, 180, 320), dtype='float32')
print(img.shape)
cv_net.setInput(img)
predicted = cv_net.forward()
print(predicted.shape)
# WITH ARBITRARY SIZE --> ERROR
width = None
height = None
run_model(width, height, input_nodes, output_nodes)
# Read model
cv_net = cv2.dnn.readNetFromTensorflow('./frozen-opt.pb')
img = np.zeros((1, 3, 180, 320), dtype='float32')
print(img.shape)
cv_net.setInput(img)
predicted = cv_net.forward()
print(predicted.shape)
System information (version)
Detailed description
Using a frozen graph containing a tensorflow resize operation
tf.image.resizefor OpenCV inference works with a fixed size resize but not for arbitrary (None) size. See code to reproduce the error.Basically I would like the resize to work on any input image size. In this simplified example, I have frozen a model with input placeholder of size None. In the model, we downsize this size by two then upscale by two, if I feed an image of any size (h, w), e.g (180,320), I would expect the model to downscale to (h/2, w/2) --> (90, 160) and then upscale back. But it throws the following error:
@dkurt any insight on this issue? Thank you in advance!
Steps to reproduce