|
<?xml version="1.0"?> |
|
<net name="Model9" version="11"> |
|
<layers> |
|
<layer id="0" name="image_features" type="Parameter" version="opset1"> |
|
<data shape="?,?,1024" element_type="f32" /> |
|
<output> |
|
<port id="0" precision="FP32" names="image_features"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>1024</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="1" name="self.linear_1.weight" type="Const" version="opset1"> |
|
<data element_type="f32" shape="4096, 1024" offset="0" size="16777216" /> |
|
<output> |
|
<port id="0" precision="FP32" names="self.linear_1.weight"> |
|
<dim>4096</dim> |
|
<dim>1024</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="2" name="__module.linear_1/ov_ext::linear/MatMul" type="MatMul" version="opset1"> |
|
<data transpose_a="false" transpose_b="true" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>1024</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>4096</dim> |
|
<dim>1024</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="3" name="Constant_81245" type="Const" version="opset1"> |
|
<data element_type="f32" shape="1, 1, 4096" offset="16777216" size="16384" /> |
|
<output> |
|
<port id="0" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="4" name="__module.linear_1/ov_ext::linear/Add" type="Add" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32" names="11,input"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="5" name="__module.act/aten::gelu/Gelu" type="Gelu" version="opset7"> |
|
<data approximation_mode="ERF" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="1" precision="FP32" names="13"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="6" name="self.linear_2.weight" type="Const" version="opset1"> |
|
<data element_type="f32" shape="4096, 4096" offset="16793600" size="67108864" /> |
|
<output> |
|
<port id="0" precision="FP32" names="self.linear_2.weight"> |
|
<dim>4096</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="7" name="__module.linear_2/ov_ext::linear/MatMul" type="MatMul" version="opset1"> |
|
<data transpose_a="false" transpose_b="true" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>4096</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="8" name="Constant_81246" type="Const" version="opset1"> |
|
<data element_type="f32" shape="1, 1, 4096" offset="83902464" size="16384" /> |
|
<output> |
|
<port id="0" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="9" name="__module.linear_2/ov_ext::linear/Add" type="Add" version="opset1"> |
|
<data auto_broadcast="numpy" /> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
<port id="1" precision="FP32"> |
|
<dim>1</dim> |
|
<dim>1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
<output> |
|
<port id="2" precision="FP32" names="hidden_states"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</output> |
|
</layer> |
|
<layer id="10" name="Result_79362" type="Result" version="opset1" output_names="hidden_states"> |
|
<input> |
|
<port id="0" precision="FP32"> |
|
<dim>-1</dim> |
|
<dim>-1</dim> |
|
<dim>4096</dim> |
|
</port> |
|
</input> |
|
</layer> |
|
</layers> |
|
<edges> |
|
<edge from-layer="0" from-port="0" to-layer="2" to-port="0" /> |
|
<edge from-layer="1" from-port="0" to-layer="2" to-port="1" /> |
|
<edge from-layer="2" from-port="2" to-layer="4" to-port="0" /> |
|
<edge from-layer="3" from-port="0" to-layer="4" to-port="1" /> |
|
<edge from-layer="4" from-port="2" to-layer="5" to-port="0" /> |
|
<edge from-layer="5" from-port="1" to-layer="7" to-port="0" /> |
|
<edge from-layer="6" from-port="0" to-layer="7" to-port="1" /> |
|
<edge from-layer="7" from-port="2" to-layer="9" to-port="0" /> |
|
<edge from-layer="8" from-port="0" to-layer="9" to-port="1" /> |
|
<edge from-layer="9" from-port="2" to-layer="10" to-port="0" /> |
|
</edges> |
|
<rt_info> |
|
<Runtime_version value="2025.2.0-19140-c01cd93e24d-releases/2025/2" /> |
|
<conversion_parameters> |
|
<framework value="pytorch" /> |
|
<is_python_object value="True" /> |
|
</conversion_parameters> |
|
<optimum> |
|
<optimum_intel_version value="1.24.0" /> |
|
<optimum_version value="1.26.1" /> |
|
<pytorch_version value="2.7.1" /> |
|
<transformers_version value="4.52.4" /> |
|
</optimum> |
|
</rt_info> |
|
</net> |
|
|