Skip to content

Commit 819afd0

Browse files
KsenijaSebarsoum
authored andcommitted
Version Conversion from opset 8 to 9 (#2007)
* add tests * adapter for batchnorm opset 8 to 9 * adapter for upsample opset 8 to 9 * adapter for scan opset 8 to 9 * update convert.h file * fix syntax error * Add type annotations * Check node type for Cast operator * Empty Commit to rerun checks
1 parent 6c91366 commit 819afd0

5 files changed

Lines changed: 458 additions & 0 deletions

File tree

onnx/test/version_converter_test.py

Lines changed: 308 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -437,6 +437,38 @@ def test_maxpool_down(self): # type: () -> None
437437
assert converted_model.graph.node[0].op_type == "MaxPool"
438438
assert converted_model.opset_import[0].version == 1
439439

440+
# Test BatchNormalization Adapter: 8 -> 9
441+
def test_batch_normalization_8_9(self): # type: () -> None
442+
from_opset = 8
443+
to_opset = 9
444+
data_type = TensorProto.FLOAT
445+
446+
nodes = [helper.make_node(
447+
'BatchNormalization',
448+
inputs=["x", "s", "bias", "mean", "var"],
449+
outputs=["y"]
450+
)]
451+
452+
input_shape = (1, 2, 1, 3)
453+
x = helper.make_tensor_value_info("x", data_type, input_shape)
454+
scale = helper.make_tensor_value_info("s", data_type, [input_shape[1]])
455+
B = helper.make_tensor_value_info("bias", data_type, [input_shape[1]])
456+
mean = helper.make_tensor_value_info("mean", data_type, [input_shape[1]])
457+
var = helper.make_tensor_value_info("var", data_type, [input_shape[1]])
458+
y = helper.make_tensor_value_info("y", data_type, input_shape)
459+
460+
graph = helper.make_graph(
461+
nodes,
462+
"test_batchnormalization_8_9",
463+
[x, scale, B, mean, var],
464+
[y]
465+
)
466+
467+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
468+
469+
assert converted_model.graph.node[0].op_type == "BatchNormalization"
470+
assert converted_model.opset_import[0].version == to_opset
471+
440472
# Test BatchNormalization Adapter: 9 -> 8
441473
def test_batchnormalization_9_8(self): # type: () -> None
442474
from_opset = 9
@@ -466,6 +498,33 @@ def test_batchnormalization_9_8(self): # type: () -> None
466498
assert converted_model.graph.node[0].op_type == "BatchNormalization"
467499
assert converted_model.opset_import[0].version == to_opset
468500

501+
# Test Constant Adapter: 8 -> 9
502+
def test_constant_8_9(self): # type: () -> None
503+
from_opset = 8
504+
to_opset = 9
505+
data_type = TensorProto.FLOAT
506+
507+
output_shape = [2, 3, 4]
508+
output_value = np.arange(24)
509+
510+
nodes = [helper.make_node(
511+
"Constant",
512+
inputs=[],
513+
outputs=["Y"],
514+
value=helper.make_tensor("", data_type, output_shape, output_value))]
515+
516+
graph = helper.make_graph(
517+
nodes,
518+
"test_constant",
519+
[],
520+
[onnx.helper.make_tensor_value_info("Y", data_type, output_shape)])
521+
522+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
523+
524+
assert converted_model.graph.node[0].op_type == "Constant"
525+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
526+
assert converted_model.opset_import[0].version == to_opset
527+
469528
# Test Constant Adapter: 9 -> 8
470529
def test_constant_9_8(self): # type: () -> None
471530
from_opset = 9
@@ -493,6 +552,31 @@ def test_constant_9_8(self): # type: () -> None
493552
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
494553
assert converted_model.opset_import[0].version == to_opset
495554

555+
# Test Flatten Adapter: 8 -> 9
556+
def test_flatten_8_9(self): # type: () -> None
557+
from_opset = 8
558+
to_opset = 9
559+
data_type = TensorProto.FLOAT
560+
561+
nodes = [onnx.helper.make_node(
562+
"Flatten",
563+
inputs=["X"],
564+
outputs=["Y"],
565+
axis=1
566+
)]
567+
568+
graph = helper.make_graph(
569+
nodes,
570+
"test_flatten",
571+
[onnx.helper.make_tensor_value_info("X", data_type, [2, 3, 4])],
572+
[onnx.helper.make_tensor_value_info("Y", data_type, [2, 12])])
573+
574+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
575+
576+
assert converted_model.graph.node[0].op_type == "Flatten"
577+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
578+
assert converted_model.opset_import[0].version == to_opset
579+
496580
# Test Flatten Adapter: 9 -> 8
497581
def test_flatten_9_8(self): # type: () -> None
498582
from_opset = 9
@@ -518,6 +602,32 @@ def test_flatten_9_8(self): # type: () -> None
518602
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
519603
assert converted_model.opset_import[0].version == to_opset
520604

605+
# Test PRelu Adapter: 8 -> 9
606+
def test_prelu_8_9(self): # type: () -> None
607+
from_opset = 8
608+
to_opset = 9
609+
data_type = TensorProto.FLOAT
610+
611+
nodes = [onnx.helper.make_node(
612+
"PRelu",
613+
inputs=["X", "Slope"],
614+
outputs=["Y"]
615+
)]
616+
617+
input_shape = [2, 3, 4]
618+
graph = helper.make_graph(
619+
nodes,
620+
"test_prelu",
621+
[onnx.helper.make_tensor_value_info("X", data_type, input_shape),
622+
onnx.helper.make_tensor_value_info("Slope", data_type, input_shape)],
623+
[onnx.helper.make_tensor_value_info("Y", data_type, input_shape)])
624+
625+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
626+
627+
assert converted_model.graph.node[0].op_type == "PRelu"
628+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
629+
assert converted_model.opset_import[0].version == to_opset
630+
521631
# Test PRelu Adapter: 9 -> 8
522632
def test_prelu_9_8(self): # type: () -> None
523633
from_opset = 9
@@ -544,6 +654,32 @@ def test_prelu_9_8(self): # type: () -> None
544654
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
545655
assert converted_model.opset_import[0].version == to_opset
546656

657+
# Test Greater Adapter: 8 -> 9
658+
def test_greater_8_9(self): # type: () -> None
659+
from_opset = 8
660+
to_opset = 9
661+
data_type = TensorProto.FLOAT
662+
663+
nodes = [onnx.helper.make_node(
664+
"Greater",
665+
inputs=["X1", "X2"],
666+
outputs=["Y"]
667+
)]
668+
669+
input_shape = [2, 3, 4]
670+
graph = helper.make_graph(
671+
nodes,
672+
"test_greater",
673+
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
674+
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
675+
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
676+
677+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
678+
679+
assert converted_model.graph.node[0].op_type == "Greater"
680+
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
681+
assert converted_model.opset_import[0].version == to_opset
682+
547683
# Test Greater Adapter: 9 -> 8
548684
def test_greater_9_8(self): # type: () -> None
549685
from_opset = 9
@@ -570,6 +706,32 @@ def test_greater_9_8(self): # type: () -> None
570706
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
571707
assert converted_model.opset_import[0].version == to_opset
572708

709+
# Test Less Adapter: 8 -> 9
710+
def test_less_8_9(self): # type: () -> None
711+
from_opset = 8
712+
to_opset = 9
713+
data_type = TensorProto.FLOAT
714+
715+
nodes = [onnx.helper.make_node(
716+
"Less",
717+
inputs=["X1", "X2"],
718+
outputs=["Y"]
719+
)]
720+
721+
input_shape = [2, 3, 4]
722+
graph = helper.make_graph(
723+
nodes,
724+
"test_less",
725+
[onnx.helper.make_tensor_value_info("X1", data_type, input_shape),
726+
onnx.helper.make_tensor_value_info("X2", data_type, input_shape)],
727+
[onnx.helper.make_tensor_value_info("Y", TensorProto.BOOL, input_shape)])
728+
729+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
730+
731+
assert converted_model.graph.node[0].op_type == "Less"
732+
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
733+
assert converted_model.opset_import[0].version == to_opset
734+
573735
# Test Less Adapter: 9 -> 8
574736
def test_less_9_8(self): # type: () -> None
575737
from_opset = 9
@@ -596,6 +758,31 @@ def test_less_9_8(self): # type: () -> None
596758
assert converted_model.graph.output[0].type.tensor_type.elem_type == TensorProto.BOOL
597759
assert converted_model.opset_import[0].version == to_opset
598760

761+
# Test MatMul Adapter: 8 -> 9
762+
def test_matmul_8_9(self): # type: () -> None
763+
from_opset = 8
764+
to_opset = 9
765+
data_type = TensorProto.FLOAT
766+
767+
nodes = [onnx.helper.make_node(
768+
"MatMul",
769+
inputs=["X1", "X2"],
770+
outputs=["Y"]
771+
)]
772+
773+
graph = helper.make_graph(
774+
nodes,
775+
"test_matmul",
776+
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
777+
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3])],
778+
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
779+
780+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
781+
782+
assert converted_model.graph.node[0].op_type == "MatMul"
783+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
784+
assert converted_model.opset_import[0].version == to_opset
785+
599786
# Test MatMul Adapter: 9 -> 8
600787
def test_matmul_9_8(self): # type: () -> None
601788
from_opset = 9
@@ -621,6 +808,32 @@ def test_matmul_9_8(self): # type: () -> None
621808
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
622809
assert converted_model.opset_import[0].version == to_opset
623810

811+
# Test Gemm Adapter: 8 -> 9
812+
def test_gemm_8_9(self): # type: () -> None
813+
from_opset = 8
814+
to_opset = 9
815+
data_type = TensorProto.FLOAT
816+
817+
nodes = [onnx.helper.make_node(
818+
"Gemm",
819+
inputs=["X1", "X2", "X3"],
820+
outputs=["Y"]
821+
)]
822+
823+
graph = helper.make_graph(
824+
nodes,
825+
"test_gemm",
826+
[onnx.helper.make_tensor_value_info("X1", data_type, [3, 4]),
827+
onnx.helper.make_tensor_value_info("X2", data_type, [4, 3]),
828+
onnx.helper.make_tensor_value_info("X3", data_type, [3, 3])],
829+
[onnx.helper.make_tensor_value_info("Y", data_type, [3, 3])])
830+
831+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
832+
833+
assert converted_model.graph.node[0].op_type == "Gemm"
834+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
835+
assert converted_model.opset_import[0].version == to_opset
836+
624837
# Test Gemm Adapter: 9 -> 8
625838
def test_gemm_9_8(self): # type: () -> None
626839
from_opset = 9
@@ -647,6 +860,35 @@ def test_gemm_9_8(self): # type: () -> None
647860
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type
648861
assert converted_model.opset_import[0].version == to_opset
649862

863+
# Test Upsample Adapter: 8 -> 9
864+
def test_upsample_8_9(self): # type: () -> None
865+
from_opset = 8
866+
to_opset = 9
867+
data_type = TensorProto.FLOAT
868+
869+
nodes = [onnx.helper.make_node(
870+
"Upsample",
871+
inputs=["X"],
872+
outputs=["Y"],
873+
mode="nearest",
874+
scales=[1.0, 1.0, 2.0, 3.0],
875+
)]
876+
877+
graph = helper.make_graph(
878+
nodes,
879+
"test_upsample_8_9",
880+
[onnx.helper.make_tensor_value_info("X", data_type, [1, 1, 2, 2])],
881+
[onnx.helper.make_tensor_value_info("Y", data_type, [1, 1, 4, 6])]
882+
)
883+
884+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
885+
886+
assert len(converted_model.graph.node) == 1
887+
assert converted_model.graph.node[0].op_type == "Upsample"
888+
assert len(converted_model.graph.node[0].attribute) == 1
889+
assert converted_model.graph.node[0].attribute[0].name == "mode"
890+
assert converted_model.opset_import[0].version == to_opset
891+
650892
# Test Helper for Upsample Adapter: 9 -> 8
651893
def helper_upsample_with_initializer(self, raw_scale=False): # type: (bool) -> None
652894
from_opset = 9
@@ -730,6 +972,72 @@ def test_upsample_with_raw_initializer_9_8(self): # type: () -> None
730972
def test_upsample_with_raw_constant_node_9_8(self): # type: () -> None
731973
self.helper_upsample_with_constant(raw_scale=True)
732974

975+
# Test Scan Adapter: 8 -> 9
976+
def test_scan_8_9(self): # type: () -> None
977+
from_opset = 8
978+
to_opset = 9
979+
data_type = TensorProto.FLOAT
980+
981+
node1 = onnx.helper.make_node("Add", inputs=["sum_in", "next"], outputs=["sum_out"],)
982+
node2 = onnx.helper.make_node("Identity", inputs=["sum_out"], outputs=["scan_out"],)
983+
g = onnx.helper.make_graph(
984+
[node1, node2],
985+
"scan_body",
986+
[onnx.helper.make_tensor_value_info("sum_in", data_type, [2]),
987+
onnx.helper.make_tensor_value_info("next", data_type, [2])],
988+
[onnx.helper.make_tensor_value_info("sum_out", data_type, [2]),
989+
onnx.helper.make_tensor_value_info("scan_out", data_type, [2])]
990+
)
991+
992+
nodes = [onnx.helper.make_node(
993+
"Scan",
994+
inputs=["", "initial", "x"],
995+
outputs=["y", "z"],
996+
body=g,
997+
num_scan_inputs=1,
998+
)]
999+
1000+
seq_lens = onnx.helper.make_empty_tensor_value_info(" ")
1001+
initial = onnx.helper.make_tensor_value_info("initial", data_type, [1, 2])
1002+
x = onnx.helper.make_tensor_value_info("x", data_type, [1, 3, 2])
1003+
y = onnx.helper.make_tensor_value_info("y", data_type, [1, 2])
1004+
z = onnx.helper.make_tensor_value_info("z", data_type, [1, 3, 2])
1005+
1006+
graph = onnx.helper.make_graph(
1007+
nodes, "test_scan_8_9", [seq_lens, initial, x], [y, z]
1008+
)
1009+
1010+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
1011+
1012+
assert converted_model.graph.node[0].op_type == "Scan"
1013+
assert converted_model.opset_import[0].version == to_opset
1014+
1015+
# Test Cast Adapter: 8 -> 9
1016+
def test_cast_8_9(self): # type: () -> None
1017+
from_opset = 8
1018+
to_opset = 9
1019+
data_type_from = TensorProto.FLOAT
1020+
data_type_to = TensorProto.UINT32
1021+
1022+
nodes = [onnx.helper.make_node(
1023+
"Cast",
1024+
inputs=["X"],
1025+
outputs=["Y"],
1026+
to=TensorProto.UINT32
1027+
)]
1028+
1029+
graph = helper.make_graph(
1030+
nodes,
1031+
"test_cast",
1032+
[onnx.helper.make_tensor_value_info("X", data_type_from, [2, 3])],
1033+
[onnx.helper.make_tensor_value_info("Y", data_type_to, [2, 3])])
1034+
1035+
converted_model = self._converted(graph, helper.make_operatorsetid("", from_opset), to_opset)
1036+
1037+
assert converted_model.graph.node[0].op_type == "Cast"
1038+
assert converted_model.graph.output[0].type.tensor_type.elem_type == data_type_to
1039+
assert converted_model.opset_import[0].version == to_opset
1040+
7331041

7341042
if __name__ == '__main__':
7351043
unittest.main()

0 commit comments

Comments
 (0)